Esempio n. 1
0
    def test_acceleration_comparison(self):
        """
        Test that all solvers return the same and correct solution.

        """

        # Convex functions.
        y = [1, 0, 0.1, 8, -6.5, 0.2, 0.004, 0.01]
        sol = [0.75, 0, 0, 7.75, -6.25, 0, 0, 0]
        w1, w2 = .8, .4
        f1 = functions.norm_l2(y=y, lambda_=w1 / 2.)  # Smooth.
        f2 = functions.norm_l1(lambda_=w2 / 2.)       # Non-smooth.

        # Solvers.
        L = w1  # Lipschitz continuous gradient.
        step = 1. / L
        slvs = []
        slvs.append(solvers.forward_backward(accel=acceleration.dummy(),
                                             step=step))
        slvs.append(solvers.forward_backward(accel=acceleration.fista(),
                                             step=step))
        slvs.append(solvers.forward_backward(
            accel=acceleration.fista_backtracking(eta=.999), step=step))

        # Compare solutions.
        params = {'rtol': 1e-14, 'verbosity': 'NONE', 'maxit': 1e4}
        niters = [2, 2, 6]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))
            ret = solvers.solve([f1, f2], x0, solver, **params)
            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
Esempio n. 2
0
    def test_backtracking(self):
        """
        Test forward-backward splitting solver with backtracking, solving
        problems with L1-norm, L2-norm, and dummy functions.

        """
        # Test constructor sanity
        a = acceleration.backtracking()
        self.assertRaises(ValueError, a.__init__, 2.)
        self.assertRaises(ValueError, a.__init__, -2.)

        y = [4., 5., 6., 7.]
        accel = acceleration.backtracking()
        step = 10  # Make sure backtracking is called
        solver = solvers.forward_backward(accel=accel, step=step)
        param = {'solver': solver, 'atol': 1e-32, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'ATOL')
        self.assertEqual(ret['niter'], 13)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'ATOL')
        self.assertLessEqual(ret['niter'], 4)  # win64 takes one iteration
Esempio n. 3
0
    def test_forward_backward_fista_backtracking(self):
        """
        Test forward-backward splitting solver with fista acceleration and
        backtracking, solving problems with L1-norm, L2-norm, and dummy
        functions.

        """
        y = [4., 5., 6., 7.]
        accel = acceleration.fista_backtracking()
        solver = solvers.forward_backward(accel=accel)
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 60)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 3)
Esempio n. 4
0
    def test_forward_backward(self):
        """
        Test forward-backward splitting algorithm without acceleration, and
        with L1-norm, L2-norm, and dummy functions.

        """
        y = [4., 5., 6., 7.]
        solver = solvers.forward_backward(accel=acceleration.dummy())
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 35)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 4)

        # Sanity check
        f3 = functions.dummy()
        x0 = np.zeros((4,))
        self.assertRaises(ValueError, solver.pre, [f1, f2, f3], x0)
Esempio n. 5
0
    def test_solver_comparison(self):
        """
        Test that all solvers return the same and correct solution.

        """

        # Convex functions.
        y = [1, 0, 0.1, 8, -6.5, 0.2, 0.004, 0.01]
        sol = [0.75, 0, 0, 7.75, -6.25, 0, 0, 0]
        w1, w2 = .8, .4
        f1 = functions.norm_l2(y=y, lambda_=w1 / 2.)  # Smooth.
        f2 = functions.norm_l1(lambda_=w2 / 2.)       # Non-smooth.

        # Solvers.
        L = w1  # Lipschitz continuous gradient.
        step = 1. / L
        lambda_ = 0.5
        params = {'step': step, 'lambda_': lambda_}
        slvs = []
        slvs.append(solvers.forward_backward(accel=acceleration.dummy(),
                                             step=step))
        slvs.append(solvers.douglas_rachford(**params))
        slvs.append(solvers.generalized_forward_backward(**params))

        # Compare solutions.
        params = {'rtol': 1e-14, 'verbosity': 'NONE', 'maxit': 1e4}
        niters = [2, 61, 26]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))
            ret = solvers.solve([f1, f2], x0, solver, **params)
            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
            self.assertIs(ret['sol'], x0)  # The initial value was modified.
Esempio n. 6
0
    def test_accel(self):
        """
        Test base acceleration scheme class

        """
        funs = [functions.dummy(), functions.dummy()]
        x0 = np.zeros((4,))
        a = acceleration.accel()
        s = solvers.forward_backward()
        o = [[1., 2.], [0., 1.]]
        n = 2

        self.assertRaises(NotImplementedError, a.pre, funs, x0)
        self.assertRaises(NotImplementedError, a.update_step, s, o, n)
        self.assertRaises(NotImplementedError, a.update_sol, s, o, n)
        self.assertRaises(NotImplementedError, a.post)
Esempio n. 7
0
    def test_forward_backward_fista(self):
        """
        Test forward-backward splitting solver with fista acceleration,
        solving problems with L1-norm, L2-norm, and dummy functions.

        """
        y = [4., 5., 6., 7.]
        solver = solvers.forward_backward(accel=acceleration.fista())
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 60)

        # Dummy prox and L2-norm gradient.
        f1 = functions.dummy()
        f2 = functions.norm_l2(y=y, lambda_=0.6)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 84)

        # L2-norm prox and L2-norm gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.norm_l2(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y, rtol=1e-2)
        self.assertEqual(ret['crit'], 'MAXIT')
        self.assertEqual(ret['niter'], 200)

        # L1-norm prox and dummy gradient.
        f1 = functions.norm_l1(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 6)

        # Dummy prox and L1-norm gradient. As L1-norm possesses no gradient,
        # the algorithm exchanges the functions : exact same solution.
        f1 = functions.dummy()
        f2 = functions.norm_l1(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 6)

        # L1-norm prox and L1-norm gradient. L1-norm possesses no gradient.
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l1(y=y)
        self.assertRaises(ValueError, solvers.solve,
                          [f1, f2], np.zeros(len(y)), **param)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 10)
Esempio n. 8
0
def classification_tikhonov_simplex(G, y, M, tau=0.1, **kwargs):
    r"""Solve a classification problem on graph via Tikhonov minimization
    with simple constraints.

    The function first transforms :math:`y` in logits :math:`Y`, then solves

    .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X)
              \text{ s.t. } sum(X) = 1 \text{ and } X >= 0,

    where :math:`X` and :math:`Y` are logits.

    Parameters
    ----------
    G : :class:`pygsp.graphs.Graph`
    y : array, length G.n_vertices
        Measurements.
    M : array of boolean, length G.n_vertices
        Masking vector.
    tau : float
        Regularization parameter.
    kwargs : dict
        Parameters for :func:`pyunlocbox.solvers.solve`.

    Returns
    -------
    logits : array, length G.n_vertices
        The logits :math:`X`.

    Examples
    --------
    >>> from pygsp import graphs, learning
    >>> import matplotlib.pyplot as plt
    >>>
    >>> G = graphs.Logo()
    >>> G.estimate_lmax()

    Create a ground truth signal:

    >>> signal = np.zeros(G.n_vertices)
    >>> signal[G.info['idx_s']] = 1
    >>> signal[G.info['idx_p']] = 2

    Construct a measurement signal from a binary mask:

    >>> rs = np.random.RandomState(42)
    >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5
    >>> measures = signal.copy()
    >>> measures[~mask] = np.nan

    Solve the classification problem by reconstructing the signal:

    >>> recovery = learning.classification_tikhonov_simplex(
    ...     G, measures, mask, tau=0.1, verbosity='NONE')

    Plot the results.
    Note that we recover the class with ``np.argmax(recovery, axis=1)``.

    >>> prediction = np.argmax(recovery, axis=1)
    >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6))
    >>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth')
    >>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements')
    >>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class')
    >>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0')
    >>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1')
    >>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2')
    >>> _ = fig.tight_layout()

    """

    functions, solvers = _import_pyunlocbox()

    if tau <= 0:
        raise ValueError('Tau should be greater than 0.')

    y[M == False] = 0
    Y = _to_logits(y.astype(np.int))
    Y[M == False, :] = 0

    def proj_simplex(y):
        d = y.shape[1]
        a = np.ones(d)
        idx = np.argsort(y)

        def evalpL(y, k, idx):
            return np.sum(y[idx[k:]] - y[idx[k]]) - 1

        def bisectsearch(idx, y):
            idxL, idxH = 0, d-1
            L = evalpL(y, idxL, idx)
            H = evalpL(y, idxH, idx)

            if L < 0:
                return idxL

            while (idxH-idxL) > 1:
                iMid = int((idxL + idxH) / 2)
                M = evalpL(y, iMid, idx)

                if M > 0:
                    idxL, L = iMid, M
                else:
                    idxH, H = iMid, M

            return idxH

        def proj(idx, y):
            k = bisectsearch(idx, y)
            lam = (np.sum(y[idx[k:]]) - 1) / (d - k)
            return np.maximum(0, y - lam)

        x = np.empty_like(y)
        for i in range(len(y)):
            x[i] = proj(idx[i], y[i])
        # x = np.stack(map(proj, idx, y))

        return x

    def smooth_eval(x):
        xTLx = np.sum(x * (G.L.dot(x)))
        e = M * ((M * x.T) - Y.T)
        l2 = np.sum(e * e)
        return tau * xTLx + l2

    def smooth_grad(x):
        return 2 * ((M * (M * x.T - Y.T)).T + tau * G.L * x)

    f1 = functions.func()
    f1._eval = smooth_eval
    f1._grad = smooth_grad

    f2 = functions.func()
    f2._eval = lambda x: 0  # Indicator functions evaluate to zero.
    f2._prox = lambda x, step: proj_simplex(x)

    step = 0.5 / (1 + tau * G.lmax)
    solver = solvers.forward_backward(step=step)
    ret = solvers.solve([f1, f2], Y.copy(), solver, **kwargs)
    return ret['sol']
Esempio n. 9
0
    def test_forward_backward_fista(self):
        """
        Test forward-backward splitting solver with fista acceleration,
        solving problems with L1-norm, L2-norm, and dummy functions.

        """
        y = [4., 5., 6., 7.]
        solver = solvers.forward_backward(accel=acceleration.fista())
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 60)

        # Dummy prox and L2-norm gradient.
        f1 = functions.dummy()
        f2 = functions.norm_l2(y=y, lambda_=0.6)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 84)

        # L2-norm prox and L2-norm gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.norm_l2(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y, rtol=1e-2)
        self.assertEqual(ret['crit'], 'MAXIT')
        self.assertEqual(ret['niter'], 200)

        # L1-norm prox and dummy gradient.
        f1 = functions.norm_l1(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 6)

        # Dummy prox and L1-norm gradient. As L1-norm possesses no gradient,
        # the algorithm exchanges the functions : exact same solution.
        f1 = functions.dummy()
        f2 = functions.norm_l1(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 6)

        # L1-norm prox and L1-norm gradient. L1-norm possesses no gradient.
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l1(y=y)
        self.assertRaises(ValueError, solvers.solve, [f1, f2],
                          np.zeros(len(y)), **param)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 10)
Esempio n. 10
0
    def test_solve(self):
        """
        Test some features of the solving function.

        """

        # We have to set a seed here for the random draw if we are required
        # below to assert that the number of iterations of the solvers are
        # equal to some specific values. Otherwise, we get trivial errors when
        # x0 is a little farther away from y in a given draw.
        rs = np.random.RandomState(42)

        y = 5 - 10 * rs.uniform(size=(15, 4))

        def x0(): return np.zeros(y.shape)
        nverb = {'verbosity': 'NONE'}

        # Function verbosity.
        f = functions.dummy()
        self.assertEqual(f.verbosity, 'NONE')
        f.verbosity = 'LOW'
        solvers.solve([f], x0(), **nverb)
        self.assertEqual(f.verbosity, 'LOW')

        # Input parameters.
        self.assertRaises(ValueError, solvers.solve, [f], x0(), verbosity='??')

        # Addition of dummy function.
        self.assertRaises(ValueError, solvers.solve, [], x0(), **nverb)
        solver = solvers.forward_backward()
        solvers.solve([f], x0(), solver, **nverb)
        # self.assertIsInstance(solver.f1, functions.dummy)
        # self.assertIsInstance(solver.f2, functions.dummy)

        # Automatic solver selection.
        f0 = functions.func()
        f0._eval = lambda x: 0
        f0._grad = lambda x: x
        f1 = functions.func()
        f1._eval = lambda x: 0
        f1._grad = lambda x: x
        f1._prox = lambda x, T: x
        f2 = functions.func()
        f2._eval = lambda x: 0
        f2._prox = lambda x, T: x
        self.assertRaises(ValueError, solvers.solve, [f0, f0], x0(), **nverb)
        ret = solvers.solve([f0, f1], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f1, f0], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f1, f2], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f2, f2], x0(), **nverb)
        self.assertEqual(ret['solver'], 'douglas_rachford')
        ret = solvers.solve([f1, f2, f0], x0(), **nverb)
        self.assertEqual(ret['solver'], 'generalized_forward_backward')

        # Stopping criteria.
        f = functions.norm_l2(y=y)
        tol = 1e-6
        r = solvers.solve([f], x0(), None, tol, None, None, None, None, 'NONE')
        self.assertEqual(r['crit'], 'ATOL')
        self.assertLess(np.sum(r['objective'][-1]), tol)
        self.assertEqual(r['niter'], 9)
        tol = 1e-8
        r = solvers.solve([f], x0(), None, None, tol, None, None, None, 'NONE')
        self.assertEqual(r['crit'], 'DTOL')
        err = np.abs(np.sum(r['objective'][-1]) - np.sum(r['objective'][-2]))
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 17)
        tol = .1
        r = solvers.solve([f], x0(), None, None, None, tol, None, None, 'NONE')
        self.assertEqual(r['crit'], 'RTOL')
        err = np.abs(np.sum(r['objective'][-1]) - np.sum(r['objective'][-2]))
        err /= np.sum(r['objective'][-1])
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 13)
        tol = 1e-4
        r = solvers.solve([f], x0(), None, None, None, None, tol, None, 'NONE')
        self.assertEqual(r['crit'], 'XTOL')
        r2 = solvers.solve([f], x0(), maxit=r['niter'] - 1, **nverb)
        err = np.linalg.norm(r['sol'] - r2['sol']) / np.sqrt(x0().size)
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 14)
        nit = 15
        r = solvers.solve([f], x0(), None, None, None, None, None, nit, 'NONE')
        self.assertEqual(r['crit'], 'MAXIT')
        self.assertEqual(r['niter'], nit)

        # Return values.
        f = functions.norm_l2(y=y)
        ret = solvers.solve([f], x0(), **nverb)
        self.assertEqual(len(ret), 6)
        self.assertIsInstance(ret['sol'], np.ndarray)
        self.assertIsInstance(ret['solver'], str)
        self.assertIsInstance(ret['crit'], str)
        self.assertIsInstance(ret['niter'], int)
        self.assertIsInstance(ret['time'], float)
        self.assertIsInstance(ret['objective'], list)
Esempio n. 11
0
def classification_tikhonov_simplex(G, y, M, tau=0.1, **kwargs):
    r"""Solve a classification problem on graph via Tikhonov minimization
    with simple constraints.

    The function first transforms :math:`y` in logits :math:`Y`, then solves

    .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X)
              \text{ s.t. } sum(X) = 1 \text{ and } X >= 0,

    where :math:`X` and :math:`Y` are logits.

    Parameters
    ----------
    G : :class:`pygsp.graphs.Graph`
    y : array, length G.n_vertices
        Measurements.
    M : array of boolean, length G.n_vertices
        Masking vector.
    tau : float
        Regularization parameter.
    kwargs : dict
        Parameters for :func:`pyunlocbox.solvers.solve`.

    Returns
    -------
    logits : array, length G.n_vertices
        The logits :math:`X`.

    Examples
    --------
    >>> from pygsp import graphs, learning
    >>> import matplotlib.pyplot as plt
    >>>
    >>> G = graphs.Logo()
    >>> G.estimate_lmax()

    Create a ground truth signal:

    >>> signal = np.zeros(G.n_vertices)
    >>> signal[G.info['idx_s']] = 1
    >>> signal[G.info['idx_p']] = 2

    Construct a measurement signal from a binary mask:

    >>> rs = np.random.RandomState(42)
    >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5
    >>> measures = signal.copy()
    >>> measures[~mask] = np.nan

    Solve the classification problem by reconstructing the signal:

    >>> recovery = learning.classification_tikhonov_simplex(
    ...     G, measures, mask, tau=0.1, verbosity='NONE')

    Plot the results.
    Note that we recover the class with ``np.argmax(recovery, axis=1)``.

    >>> prediction = np.argmax(recovery, axis=1)
    >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6))
    >>> _ = G.plot(signal, ax=ax[0, 0], title='Ground truth')
    >>> _ = G.plot(measures, ax=ax[0, 1], title='Measurements')
    >>> _ = G.plot(prediction, ax=ax[0, 2], title='Recovered class')
    >>> _ = G.plot(recovery[:, 0], ax=ax[1, 0], title='Logit 0')
    >>> _ = G.plot(recovery[:, 1], ax=ax[1, 1], title='Logit 1')
    >>> _ = G.plot(recovery[:, 2], ax=ax[1, 2], title='Logit 2')
    >>> _ = fig.tight_layout()

    """

    functions, solvers = _import_pyunlocbox()

    if tau <= 0:
        raise ValueError('Tau should be greater than 0.')

    y = y.copy()
    y[M == False] = 0
    Y = _to_logits(y.astype(np.int))
    Y[M == False, :] = 0

    def proj_simplex(y):
        d = y.shape[1]
        a = np.ones(d)
        idx = np.argsort(y)

        def evalpL(y, k, idx):
            return np.sum(y[idx[k:]] - y[idx[k]]) - 1

        def bisectsearch(idx, y):
            idxL, idxH = 0, d - 1
            L = evalpL(y, idxL, idx)
            H = evalpL(y, idxH, idx)

            if L < 0:
                return idxL

            while (idxH - idxL) > 1:
                iMid = int((idxL + idxH) / 2)
                M = evalpL(y, iMid, idx)

                if M > 0:
                    idxL, L = iMid, M
                else:
                    idxH, H = iMid, M

            return idxH

        def proj(idx, y):
            k = bisectsearch(idx, y)
            lam = (np.sum(y[idx[k:]]) - 1) / (d - k)
            return np.maximum(0, y - lam)

        x = np.empty_like(y)
        for i in range(len(y)):
            x[i] = proj(idx[i], y[i])
        # x = np.stack(map(proj, idx, y))

        return x

    def smooth_eval(x):
        xTLx = np.sum(x * (G.L.dot(x)))
        e = M * ((M * x.T) - Y.T)
        l2 = np.sum(e * e)
        return tau * xTLx + l2

    def smooth_grad(x):
        return 2 * ((M * (M * x.T - Y.T)).T + tau * G.L * x)

    f1 = functions.func()
    f1._eval = smooth_eval
    f1._grad = smooth_grad

    f2 = functions.func()
    f2._eval = lambda x: 0  # Indicator functions evaluate to zero.
    f2._prox = lambda x, step: proj_simplex(x)

    step = 0.5 / (1 + tau * G.lmax)
    solver = solvers.forward_backward(step=step)
    ret = solvers.solve([f1, f2], Y.copy(), solver, **kwargs)
    return ret['sol']
Esempio n. 12
0
    plt.figure(14)
    plt.title("Estimated Super resolution gaussian - Wiener filter")
    plt.imshow(H_estimated_wf_gaussian, cmap='gray')
    plt.figure(15)
    plt.title("Estimated Super resolution box - Wiener filter")
    plt.imshow(H_estimated_wf_box, cmap='gray')

    # Task 5.2
    tau = 100

    g = lambda H:  signal.convolve2d(H, K_gaussian, boundary='symm', mode='same')
    l_blurred_cpy = np.array(blurred_image_gaussian_l)
    tv_prior_f = functions.norm_tv(maxit=50, dim=2)
    norm_l2_f = functions.norm_l2(y=l_blurred_cpy, A=g, lambda_=tau)
    solver = solvers.forward_backward(step=0.0001 / tau)
    H_estimated_lms_tv_gaussian = solvers.solve([tv_prior_f, norm_l2_f], l_blurred_cpy, solver, maxit=100)

    g = lambda H:  signal.convolve2d(H, K_box, boundary='symm', mode='same')
    l_blurred_cpy = np.array(blurred_image_box_l)
    tv_prior_f = functions.norm_tv(maxit=50, dim=2)
    norm_l2_f = functions.norm_l2(y=l_blurred_cpy, A=g, lambda_=tau)
    solver = solvers.forward_backward(step=0.0001 / tau)
    H_estimated_lms_tv_box = solvers.solve([tv_prior_f, norm_l2_f], l_blurred_cpy, solver, maxit=100)

    plt.figure(16)
    plt.title("Estimated Super resolution gaussian - Least mean square with TV prior")
    plt.imshow(H_estimated_lms_tv_gaussian['sol'], cmap='gray')

    plt.figure(17)
    plt.title("Estimated Super resolution box - Least mean square with TV prior")
Esempio n. 13
0
    def test_solve(self):
        """
        Test some features of the solving function.

        """

        # We have to set a seed here for the random draw if we are required
        # below to assert that the number of iterations of the solvers are
        # equal to some specific values. Otherwise, we get trivial errors when
        # x0 is a little farther away from y in a given draw.
        rs = np.random.RandomState(42)

        y = 5 - 10 * rs.uniform(size=(15, 4))

        def x0(): return np.zeros_like(y)
        nverb = {'verbosity': 'NONE'}

        # Function verbosity.
        f = functions.dummy()
        self.assertEqual(f.verbosity, 'NONE')
        f.verbosity = 'LOW'
        solvers.solve([f], x0(), **nverb)
        self.assertEqual(f.verbosity, 'LOW')

        # Input parameters.
        self.assertRaises(ValueError, solvers.solve, [f], x0(), verbosity='??')

        # Addition of dummy function.
        self.assertRaises(ValueError, solvers.solve, [], x0(), **nverb)
        solver = solvers.forward_backward()
        solvers.solve([f], x0(), solver, **nverb)
        # self.assertIsInstance(solver.f1, functions.dummy)
        # self.assertIsInstance(solver.f2, functions.dummy)

        # Automatic solver selection.
        f0 = functions.func()
        f0._eval = lambda x: 0
        f0._grad = lambda x: x
        f1 = functions.func()
        f1._eval = lambda x: 0
        f1._grad = lambda x: x
        f1._prox = lambda x, T: x
        f2 = functions.func()
        f2._eval = lambda x: 0
        f2._prox = lambda x, T: x
        self.assertRaises(ValueError, solvers.solve, [f0, f0], x0(), **nverb)
        ret = solvers.solve([f0, f1], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f1, f0], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f1, f2], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f2, f2], x0(), **nverb)
        self.assertEqual(ret['solver'], 'douglas_rachford')
        ret = solvers.solve([f1, f2, f0], x0(), **nverb)
        self.assertEqual(ret['solver'], 'generalized_forward_backward')

        # Stopping criteria.
        f = functions.norm_l2(y=y)
        tol = 1e-6
        r = solvers.solve([f], x0(), None, tol, None, None, None, None, 'NONE')
        self.assertEqual(r['crit'], 'ATOL')
        self.assertLess(np.sum(r['objective'][-1]), tol)
        self.assertEqual(r['niter'], 9)
        tol = 1e-8
        r = solvers.solve([f], x0(), None, None, tol, None, None, None, 'NONE')
        self.assertEqual(r['crit'], 'DTOL')
        err = np.abs(np.sum(r['objective'][-1]) - np.sum(r['objective'][-2]))
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 17)
        tol = .1
        r = solvers.solve([f], x0(), None, None, None, tol, None, None, 'NONE')
        self.assertEqual(r['crit'], 'RTOL')
        err = np.abs(np.sum(r['objective'][-1]) - np.sum(r['objective'][-2]))
        err /= np.sum(r['objective'][-1])
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 13)
        tol = 1e-4
        r = solvers.solve([f], x0(), None, None, None, None, tol, None, 'NONE')
        self.assertEqual(r['crit'], 'XTOL')
        r2 = solvers.solve([f], x0(), maxit=r['niter'] - 1, **nverb)
        err = np.linalg.norm(r['sol'] - r2['sol']) / np.sqrt(x0().size)
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 14)
        nit = 15
        r = solvers.solve([f], x0(), None, None, None, None, None, nit, 'NONE')
        self.assertEqual(r['crit'], 'MAXIT')
        self.assertEqual(r['niter'], nit)

        # Return values.
        f = functions.norm_l2(y=y)
        ret = solvers.solve([f], x0(), **nverb)
        self.assertEqual(len(ret), 6)
        self.assertIsInstance(ret['sol'], np.ndarray)
        self.assertIsInstance(ret['solver'], str)
        self.assertIsInstance(ret['crit'], str)
        self.assertIsInstance(ret['niter'], int)
        self.assertIsInstance(ret['time'], float)
        self.assertIsInstance(ret['objective'], list)
Esempio n. 14
0
def run(scale=1.99,
        sigma_blur=0.1,
        noise_level_denoiser=0.005,
        num=None,
        method='FBS',
        pretrained_weights=True):
    if not os.path.isdir('results_conv'):
        os.mkdir('results_conv')
    # declare model
    act = tf.keras.activations.relu
    num_filters = 64
    max_dim = 128
    num_layers = 8
    sizes = [None] * (num_layers)
    conv_shapes = [(num_filters, max_dim)] * num_layers
    filter_length = 5
    model = StiefelModel(sizes,
                         None,
                         convolutional=True,
                         filter_length=filter_length,
                         dim=2,
                         conv_shapes=conv_shapes,
                         activation=act,
                         scale_layer=scale)
    pred = model(tf.random.normal((10, 40, 40)))
    model.fast_execution = True

    # load weights
    if pretrained_weights:
        file_name = 'data/pretrained_weights/scale' + str(
            scale) + '_noise_level' + str(noise_level_denoiser) + '.pickle'
    else:
        if num is None:
            file_name = 'results_conv/scale' + str(
                scale) + '_noise_level' + str(
                    noise_level_denoiser) + '/adam.pickle'
        else:
            file_name = 'results_conv/scale' + str(
                scale) + '_noise_level' + str(
                    noise_level_denoiser) + '/adam' + str(num) + '.pickle'
    with open(file_name, 'rb') as f:
        trainable_vars = pickle.load(f)
    for i in range(len(model.trainable_variables)):
        model.trainable_variables[i].assign(trainable_vars[i])
    beta = 1e8
    project = True
    if project:
        # project convolution matrices on the Stiefel manifold
        for i in range(len(model.stiefel)):
            convs = model.stiefel[i].convs
            smaller = convs.shape[0] < convs.shape[1]
            if smaller:
                convs = transpose_convs(convs)
            iden = np.zeros((convs.shape[1], convs.shape[1],
                             4 * filter_length + 1, 4 * filter_length + 1),
                            dtype=np.float32)
            for j in range(convs.shape[1]):
                iden[j, j, 2 * filter_length, 2 * filter_length] = 1
            iden = tf.constant(iden)
            C = tf.identity(convs)

            def projection_objective(C):
                return 0.5 * beta * tf.reduce_sum(
                    (conv_mult(transpose_convs(C), C) - iden)**
                    2) + .5 * tf.reduce_sum((C - convs)**2)

            for iteration in range(100):
                with tf.GradientTape(persistent=True) as tape:
                    tape.watch(C)
                    val = projection_objective(C)
                    grad = tape.gradient(val, C)
                    grad_sum = tf.reduce_sum(grad * grad)
                hess = tape.gradient(grad_sum, C)
                hess *= 0.5 / tf.sqrt(grad_sum)
                C -= grad / tf.sqrt(tf.reduce_sum(hess * hess))
            if smaller:
                C = transpose_convs(C)
            model.stiefel[i].convs.assign(C)

    # load data
    test_directory = 'data/BSD68'
    fileList = os.listdir(test_directory + '/')
    fileList.sort()
    img_names = fileList
    save_path = 'results_conv/PnP_blur_' + method + str(sigma_blur)
    if not os.path.isdir(save_path):
        os.mkdir(save_path)
    if not os.path.isdir(save_path + '/blurred_data'):
        os.mkdir(save_path + '/blurred_data')
    if not os.path.isdir(save_path + '/l2tv'):
        os.mkdir(save_path + '/l2tv')
    psnr_sum = 0.
    psnr_noisy_sum = 0.
    psnr_l2tv_sum = 0.
    error_sum = 0.
    error_bm3d_sum = 0.
    counter = 0
    sig = sigma_blur
    sig_sq = sig**2
    noise_level = 0.01
    kernel_width = 9
    x_range = 1. * np.array(range(kernel_width))
    kernel_x = np.tile(x_range[:, np.newaxis],
                       (1, kernel_width)) - .5 * (kernel_width - 1)
    y_range = 1. * np.array(range(kernel_width))
    kernel_y = np.tile(y_range[np.newaxis, :],
                       (kernel_width, 1)) - .5 * (kernel_width - 1)
    kernel = np.exp(-(kernel_x**2 + kernel_y**2) / (2 * sig_sq))
    kernel /= np.sum(kernel)
    kernel = tf.constant(kernel, dtype=tf.float32)
    myfile = open(save_path + "/psnrs.txt", "w")
    myfile.write("PSNRs:\n")
    myfile.close()
    np.random.seed(25)
    for name in img_names:
        # load image and compute blurred version
        counter += 1
        img = Image.open(test_directory + '/' + name)
        img = img.convert('L')
        img_gray = 1.0 * np.array(img)
        img_gray /= 255.0

        img_gray_pil = Image.fromarray(img_gray * 255.0)
        img_gray_pil = img_gray_pil.convert('RGB')
        img_gray_pil.save(save_path + '/original' + name)
        one_img = tf.ones(img_gray.shape)

        img_blurred = tf.nn.conv2d(
            tf.expand_dims(
                tf.expand_dims(tf.constant(img_gray, dtype=tf.float32), 0),
                -1), tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'SAME')
        img_blurred = tf.squeeze(img_blurred).numpy()
        ones_blurred = tf.nn.conv2d(
            tf.expand_dims(
                tf.expand_dims(tf.constant(one_img, dtype=tf.float32), 0), -1),
            tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'SAME')
        ones_blurred = tf.squeeze(ones_blurred).numpy()
        img_blurred /= ones_blurred
        noise = np.random.normal(0, 1, img_blurred.shape)
        img_blurred += noise_level * noise
        pad = kernel_width // 2
        img_obs = img_blurred[pad:-pad, pad:-pad]
        img_start = np.pad(img_obs, ((pad, pad), (pad, pad)), 'edge')
        img_obs_big = np.concatenate([
            np.zeros((img_obs.shape[0], pad)), img_obs,
            np.zeros((img_obs.shape[0], pad))
        ], 1)
        img_obs_big = np.concatenate([
            np.zeros((pad, img_obs_big.shape[1])), img_obs_big,
            np.zeros((pad, img_obs_big.shape[1]))
        ], 0)
        savemat(save_path + '/blurred_data/' + name[:-4] + '_blurred.mat',
                {'img_blur': (img_blurred) * 255})
        scalar = scale
        alpha_star = 0.5
        conv_coord = 1 - scalar + 2 * alpha_star * scalar

        # declare functions for PnP
        def my_f(signal, inp_signal):
            signal_blurred = tf.nn.conv2d(
                tf.expand_dims(signal, -1),
                tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'VALID')
            signal_blurred = tf.reshape(signal_blurred,
                                        signal_blurred.shape[:3])
            out = .5 * tf.reduce_sum((signal_blurred - img_obs)**2)
            return out

        def prox_my_f(signal, lam, inp_signal):
            out_signal = tf.identity(signal)
            for i in range(50):
                with tf.GradientTape(persistent=True) as tape:
                    tape.watch(out_signal)
                    term1 = my_f(out_signal, inp_signal)
                    term2 = .5 * tf.reduce_sum((out_signal - signal)**2)
                    objective = term1 / lam + term2
                    grad = tape.gradient(objective, out_signal)
                    grad_sum = tf.reduce_sum(grad**2)
                hess = .5 * tape.gradient(grad_sum,
                                          out_signal) / tf.sqrt(grad_sum)
                out_signal -= grad / tf.sqrt(tf.reduce_sum(hess**2))
            return out_signal

        def grad_f(signal):
            signal_blurred = tf.nn.conv2d(
                tf.expand_dims(signal, -1),
                tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'SAME')
            signal_blurred_minus_inp = tf.reshape(
                signal_blurred, signal_blurred.shape[:3]) - img_blurred

            AtA = tf.nn.conv2d(tf.expand_dims(signal_blurred_minus_inp, -1),
                               tf.expand_dims(tf.expand_dims(kernel, -1), -1),
                               1, 'SAME')
            AtA = tf.reshape(AtA, signal_blurred.shape[:3])
            return AtA

        #L2-TV
        def g(signal):
            signal_blurred = tf.nn.conv2d(
                tf.expand_dims(
                    tf.expand_dims(tf.constant(signal, tf.float32), -1), 0),
                tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'VALID')
            signal_blurred = tf.squeeze(signal_blurred)
            signal_blurred = np.concatenate([
                np.zeros((signal_blurred.shape[0], pad)),
                signal_blurred.numpy(),
                np.zeros((signal_blurred.shape[0], pad))
            ], 1)
            signal_blurred = np.concatenate([
                np.zeros((pad, signal_blurred.shape[1])), signal_blurred,
                np.zeros((pad, signal_blurred.shape[1]))
            ], 0)
            return signal_blurred

        f1 = functions.norm_tv(maxit=50, dim=2)
        l2tv_lambda = 0.001
        f2 = functions.norm_l2(y=img_obs_big, A=g, lambda_=1 / l2tv_lambda)
        solver = solvers.forward_backward(step=0.5 * l2tv_lambda)
        img_blurred2 = tf.identity(img_start).numpy()
        l2tv = solvers.solve([f1, f2],
                             img_blurred2,
                             solver,
                             maxit=100,
                             verbosity='NONE')
        l2tv = l2tv['sol']

        def my_T(inp, model):
            my_fac = 1.
            return (1 - 1 /
                    (conv_coord)) * l2tv + 1 / (conv_coord) * (inp - model(
                        (inp - .5) * my_fac))

        # Compute PnP result
        if method == 'FBS':
            pred = PnP_FBS(model,
                           l2tv[np.newaxis, :, :],
                           tau=1.9,
                           T_fun=my_T,
                           eps=1e-3,
                           fun=my_f)
        elif method == 'ADMM':
            pred = PnP_ADMM(l2tv[np.newaxis, :, :],
                            lambda x: my_T(x, model),
                            gamma=.52,
                            prox_fun=prox_my_f)
        else:
            raise ValueError('Unknown method!')

        # save results
        noisy = (img_start) * 255
        reconstructed = (tf.reshape(
            pred, [pred.shape[1], pred.shape[2]]).numpy()) * 255.
        img_gray = (img_gray) * 255.
        l2tv *= 255
        error_sum += tf.reduce_sum(
            ((reconstructed - img_gray) / 255.)**2).numpy()
        psnr = meanPSNR(
            tf.keras.backend.flatten(reconstructed[2 * pad:-2 * pad,
                                                   2 * pad:-2 * pad]).numpy() /
            255.0,
            tf.keras.backend.flatten(
                img_gray[2 * pad:-2 * pad, 2 * pad:-2 * pad]).numpy() / 255.0,
            one_dist=True)
        psnr_l2tv = meanPSNR(
            tf.keras.backend.flatten(l2tv[2 * pad:-2 * pad,
                                          2 * pad:-2 * pad]).numpy() / 255.0,
            tf.keras.backend.flatten(
                img_gray[2 * pad:-2 * pad, 2 * pad:-2 * pad]).numpy() / 255.0,
            one_dist=True)
        psnr_noisy = meanPSNR(
            tf.keras.backend.flatten(noisy[2 * pad:-2 * pad,
                                           2 * pad:-2 * pad]).numpy() / 255.0,
            tf.keras.backend.flatten(
                img_gray[2 * pad:-2 * pad, 2 * pad:-2 * pad]).numpy() / 255.0,
            one_dist=True)
        print('PSNR of ' + name + ':                    ' + str(psnr))
        print('PSNR L2TV of ' + name + ':               ' + str(psnr_l2tv))
        print('PSNR of noisy ' + name + ':              ' + str(psnr_noisy))
        psnr_sum += psnr
        psnr_noisy_sum += psnr_noisy
        psnr_l2tv_sum += psnr_l2tv
        print('Mean PSNR PPNN:      ' + str(psnr_sum / counter))
        print('Mean PSNR L2TV:      ' + str(psnr_l2tv_sum / counter))
        print('Mean PSNR noisy:     ' + str(psnr_noisy_sum / counter))
        myfile = open(save_path + "/psnrs.txt", "a")
        myfile.write('PSNR of ' + name + ':                    ' + str(psnr) +
                     '\n')
        myfile.write('PSNR L2TV of ' + name + ':               ' +
                     str(psnr_l2tv) + '\n')
        myfile.write('PSNR of noisy ' + name + ':              ' +
                     str(psnr_noisy) + '\n')
        myfile.close()
        img = Image.fromarray(noisy)
        img = img.convert('RGB')
        img.save(save_path + '/noisy' + name)
        img = Image.fromarray(l2tv)
        img = img.convert('RGB')
        img.save(save_path + '/l2tv/l2tv' + name)
        img = Image.fromarray(reconstructed)
        img = img.convert('RGB')
        img.save(save_path + '/reconstructed' + name)
    print('Mean PSNR on images: ' + str(psnr_sum / len(img_names)))
    print('Mean PSNR on noisy images: ' + str(psnr_noisy_sum / len(img_names)))
Esempio n. 15
0
mask = np.random.uniform(size=im_original.shape)
mask = mask > 0.05

g = lambda x: mask * x
im_masked = g(im_original)
mask = 1
g = lambda x: mask * x

from pyunlocbox import functions
f1 = functions.norm_tv(maxit=50, dim=2)

tau = 100
f2 = functions.norm_l2(y=im_masked, A=g, lambda_=tau)

from pyunlocbox import solvers
solver = solvers.forward_backward(step=0.5 / tau)

x0 = np.array(im_masked)  # Make a copy to preserve im_masked.
ret = solvers.solve([f1, f2], x0, solver, maxit=100)

import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 2.5))
ax1 = fig.add_subplot(1, 3, 1)
_ = ax1.imshow(im_original, cmap='gray')
_ = ax1.axis('off')
_ = ax1.set_title('Original image')
ax2 = fig.add_subplot(1, 3, 2)
_ = ax2.imshow(im_masked, cmap='gray')
_ = ax2.axis('off')
_ = ax2.set_title('Masked image')
ax3 = fig.add_subplot(1, 3, 3)
Esempio n. 16
0
def main_tv(hparams):

    ## === Set up=== ##
    # Printer setup
    #sys.stdout = open(hparams.text_file_path, 'w')

    # Get inputs
    if hparams.image_mode == '1D':
        x_real = np.array(load_1D(hparams.path, hparams.img_name)).astype(
            np.float32)  #[4096,1]
    elif hparams.image_mode == '2D':
        x_real = np.array(load_2D(hparams.path, hparams.img_name)).astype(
            np.float32)  #[64,64]
    elif hparams.image_mode == '3D':
        x_real = np.array(
            load_img(hparams.path,
                     hparams.img_name, hparams.decoder_type)).astype(
                         np.float32)  #[178,218,3] /  [224,224,3]

    # Initialization
    #np.random.seed(7)
    sig_shape = x_real.shape[0] * x_real.shape[
        1]  #n = 4096*1 or 64*64 or 178*218 or 224*224
    random_vector = None  #initialization
    A = None  #initialization
    selection_mask = None  #initialization
    random_arr = random_flip(sig_shape)  #initialization #[n,]
    mask = None  #initialization

    # Get measurement matirx
    if hparams.model_type == 'denoising' or hparams.model_type == 'compressing':
        if hparams.type_measurements == 'random':  #compressed sensing
            if hparams.image_mode != '3D':
                A = np.random.randn(hparams.num_measurements,
                                    sig_shape).astype(np.float32)  #[m,n]
                noise_shape = [hparams.num_measurements, 1]  #[m,1]
            else:
                A = np.random.randn(int(hparams.num_measurements / 3),
                                    sig_shape).astype(np.float32)  #[m,n]
                noise_shape = [int(hparams.num_measurements / 3), 1]  #[m,1]
        elif hparams.type_measurements == 'identity':  #denoising
            A = np.identity(sig_shape).astype(np.float32)  #[n,n]
            noise_shape = [sig_shape, 1]  #[n,1]
            observ_noise = hparams.noise_level * np.random.randn(
                noise_shape[0], noise_shape[1])  #[n,1]
        elif hparams.type_measurements == 'circulant':  #compressed sensing
            if hparams.image_mode != '3D':
                random_vector = np.random.normal(size=sig_shape)  #[n,]
                selection_mask = create_A_selection(
                    sig_shape, hparams.num_measurements)  #[1,n]
            else:
                random_vector = np.random.normal(size=sig_shape)  #[n,]
                selection_mask = create_A_selection(
                    sig_shape, int(hparams.num_measurements / 3))  #[1,n]

            def circulant_np(signal_vector,
                             random_arr_p=random_arr.reshape(-1, 1),
                             random_vector_p=random_vector.reshape(-1, 1),
                             selection_mask_p=selection_mask.reshape(-1, 1)):
                #step 0: Flip
                signal_vector = signal_vector * random_arr_p  #[n,1] * [n,1] -> [n,1]
                #step 1: F^{-1} @ x
                r1 = ifft(signal_vector)  #[n,1]
                #step 2: Diag() @ F^{-1} @ x
                Ft = fft(random_vector_p)  #[n,1]
                r2 = np.multiply(r1, Ft)  #[n,1] * [n,1] -> [n,1]
                #step 3: F @ Diag() @ F^{-1} @ x
                compressive = fft(r2)  #[n,1]
                #step 4:  R_{omega} @ C_{t} @ D){epsilon}
                compressive = compressive.real  #[n,1]
                select_compressive = compressive * selection_mask_p  #[n,1] * [n,1] -> [n,1]
                return select_compressive

    elif hparams.model_type == 'inpainting':
        if hparams.image_mode == '1D':
            mask = load_mask('Masks', hparams.mask_name_1D, hparams.image_mode,
                             hparams.decoder_type)  #[n,1]
        elif hparams.image_mode == '2D' or hparams.image_mode == '3D':
            mask = load_mask('Masks', hparams.mask_name_2D, hparams.image_mode,
                             hparams.decoder_type)  #[n,n]

    ## === TV norm === ##
    if hparams.decoder_type == 'tv_norm':
        # Construct observation and perform reconstruction
        if hparams.model_type == 'inpainting':
            # measurements and observation
            g = lambda x: mask * x  #[4096,1] * [4096,1] / [178,218,3] * [178,218,3]
            y_real = g(x_real)  #[4096,1] / [178,218,3]
            # tv norm
            if hparams.image_mode == '1D':
                f1 = functions.norm_tv(dim=1)
            elif hparams.image_mode == '2D':
                f1 = functions.norm_tv(dim=2)
            elif hparams.image_mode == '3D':
                f1 = functions.norm_tv(dim=3)
            # L2 norm
            tau = hparams.tau
            f2 = functions.norm_l2(y=y_real, A=g, lambda_=tau)
            # optimisation
            solver = solvers.forward_backward(step=0.5 / tau)
            x0 = np.array(y_real)  # Make a copy to preserve im_masked.
            ret = solvers.solve([f1, f2], x0, solver,
                                maxit=3000)  #output = ret['sol']
            # output
            out_img = ret['sol']  #[4096,1] / [178,218,3]
        elif hparams.model_type == 'denoising':
            assert hparams.type_measurements == 'identity'
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    # measurements and observation
                    y_real = np.matmul(A, x_real[:, :, i].reshape(
                        -1, 1)) + observ_noise  # [n,n] * [n,1] -> [n,1]
                    # tv norm
                    f1 = functions.norm_tv(dim=1)
                    # epsilon
                    N = math.sqrt(sig_shape)
                    epsilon = N * hparams.noise_level
                    # L2 ball
                    y = np.reshape(y_real, -1)  #[n,1] -> [n,]
                    f = functions.proj_b2(y=y, epsilon=epsilon)
                    f2 = functions.func()
                    # Indicator functions
                    f2._eval = lambda x: 0

                    def prox(x, step):
                        return np.reshape(f.prox(np.reshape(x, -1), 0),
                                          y_real.shape)

                    f2._prox = prox
                    # solver
                    solver = solvers.douglas_rachford(step=0.1)
                    x0 = np.array(y_real)  #[n,1]
                    ret = solvers.solve([f1, f2], x0, solver)
                    # output
                    out_img_piece = ret['sol'].reshape(
                        x_real.shape[0], x_real.shape[1])  #[178,218]
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            else:
                # measurements and observation
                y_real = np.matmul(A, x_real.reshape(
                    -1, 1)) + observ_noise  # [n,n] * [n,1] -> [n,1]
                # tv norm
                f1 = functions.norm_tv(dim=1)
                # epsilon
                N = math.sqrt(sig_shape)
                epsilon = N * hparams.noise_level
                # L2 ball
                y = np.reshape(y_real, -1)  #[n,1] -> [n,]
                f = functions.proj_b2(y=y, epsilon=epsilon)
                f2 = functions.func()
                # Indicator functions
                f2._eval = lambda x: 0

                def prox(x, step):
                    return np.reshape(f.prox(np.reshape(x, -1), 0),
                                      y_real.shape)

                f2._prox = prox
                # solver
                solver = solvers.douglas_rachford(step=0.1)
                x0 = np.array(y_real)  #[n,1]
                ret = solvers.solve([f1, f2], x0, solver)
                # output
                out_img = ret['sol']  #[n,1]
        elif hparams.model_type == 'compressing':
            assert hparams.type_measurements == 'circulant'
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    # construct observation
                    g = circulant_np
                    y_real = g(x_real[:, :, i].reshape(-1, 1))  #[n,1] -> [n,1]
                    # tv norm
                    f1 = functions.norm_tv(dim=1)
                    # L2 norm
                    tau = hparams.tau
                    f2 = functions.norm_l2(y=y_real, A=g, lambda_=tau)
                    # optimisation solver
                    A_real = np.random.normal(
                        size=(int(hparams.num_measurements / 3), sig_shape))
                    step = 0.5 / np.linalg.norm(A_real, ord=2)**2
                    solver = solvers.forward_backward(
                        step=step
                    )  #solver = solvers.forward_backward(step=0.5/tau)
                    # initialisation
                    x0 = np.array(y_real)  #[n,1]
                    # output
                    ret = solvers.solve([f1, f2],
                                        x0,
                                        solver,
                                        rtol=1e-4,
                                        maxit=3000)  #output = ret['sol']
                    out_img_piece = ret['sol'].reshape(
                        x_real.shape[0], x_real.shape[1])  #[178,218]
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            else:
                # construct observation
                g = circulant_np
                y_real = g(x_real.reshape(-1, 1))  #[n,1] -> [n,1]
                # tv norm
                f1 = functions.norm_tv(dim=1)
                # L2 norm
                tau = hparams.tau
                f2 = functions.norm_l2(y=y_real, A=g, lambda_=tau)
                # optimisation solver
                A_real = np.random.normal(size=(hparams.num_measurements,
                                                sig_shape))
                step = 0.5 / np.linalg.norm(A_real, ord=2)**2
                solver = solvers.forward_backward(
                    step=step
                )  #solver = solvers.forward_backward(step=0.5/tau)
                # initialisation
                x0 = np.array(y_real)  #[n,1]
                # output
                ret = solvers.solve([f1, f2],
                                    x0,
                                    solver,
                                    rtol=1e-4,
                                    maxit=3000)  #output = ret['sol']
                out_img = ret['sol']  #[n,1]

    # ## === Lasso  wavelet === ##
    elif hparams.decoder_type == 'lasso_wavelet':
        # Construct lasso wavelet functions
        def solve_lasso(A_val, y_val, hparams):  #(n,m), (1,m)
            if hparams.lasso_solver == 'sklearn':
                lasso_est = Lasso(alpha=hparams.lmbd)
                lasso_est.fit(A_val.T, y_val.reshape(hparams.num_measurements))
                x_hat = lasso_est.coef_
                x_hat = np.reshape(x_hat, [-1])
            elif hparams.lasso_solver == 'cvxopt':
                A_mat = matrix(A_val.T)  #[m,n]
                y_mat = matrix(y_val.T)  ###
                x_hat_mat = l1regls(A_mat, y_mat)
                x_hat = np.asarray(x_hat_mat)
                x_hat = np.reshape(x_hat, [-1])  #[n, ]
            elif hparams.lasso_solver == 'pyunlocbox':
                tau = hparams.tau
                f1 = functions.norm_l1(lambda_=tau)
                f2 = functions.norm_l2(y=y_val.T, A=A_val.T)
                if hparams.model_type == 'compressing':
                    if hparams.image_mode == '3D':
                        A_real = np.random.normal(
                            size=(int(hparams.num_measurements / 3),
                                  sig_shape))
                    else:
                        A_real = np.random.normal(
                            size=(hparams.num_measurements, sig_shape))
                    step = 0.5 / np.linalg.norm(A_real, ord=2)**2
                else:
                    step = 0.5 / np.linalg.norm(A_val, ord=2)**2
                solver = solvers.forward_backward(step=step)
                x0 = np.zeros((sig_shape, 1))
                ret = solvers.solve([f1, f2],
                                    x0,
                                    solver,
                                    rtol=1e-4,
                                    maxit=3000)
                x_hat_mat = ret['sol']
                x_hat = np.asarray(x_hat_mat)
                x_hat = np.reshape(x_hat, [-1])  #[n, ]
            return x_hat

        #generate basis
        def generate_basis(size):
            """generate the basis"""
            x = np.zeros((size, size))
            coefs = pywt.wavedec2(x, 'db1')
            n_levels = len(coefs)
            basis = []
            for i in range(n_levels):
                coefs[i] = list(coefs[i])
                n_filters = len(coefs[i])
                for j in range(n_filters):
                    for m in range(coefs[i][j].shape[0]):
                        try:
                            for n in range(coefs[i][j].shape[1]):
                                coefs[i][j][m][n] = 1
                                temp_basis = pywt.waverec2(coefs, 'db1')
                                basis.append(temp_basis)
                                coefs[i][j][m][n] = 0
                        except IndexError:
                            coefs[i][j][m] = 1
                            temp_basis = pywt.waverec2(coefs, 'db1')
                            basis.append(temp_basis)
                            coefs[i][j][m] = 0
            basis = np.array(basis)
            return basis

        def wavelet_basis(path_):
            if path_ == 'Ieeg_signal':
                W_ = generate_basis(32)
                W_ = W_.reshape((1024, 1024))
            elif path_ == 'Celeb_signal':
                W_ = generate_basis(128)
                W_ = W_.reshape((16384, 16384))
            else:
                W_ = generate_basis(64)
                W_ = W_.reshape((4096, 4096))
            return W_

        def lasso_wavelet_estimator(A_val, y_val, hparams):  #(n,m), (1,m)
            W = wavelet_basis(hparams.path)  #[n,n]
            if not callable(A_val):
                WA = np.dot(W, A_val)  #[n,n] * [n,m] = [n,m]
            else:
                WA = np.array([
                    A_val(W[i, :].reshape(-1, 1)).reshape(-1)
                    for i in range(len(W))
                ])  #[n,n] -> [n,n]
            z_hat = solve_lasso(WA, y_val, hparams)  # [n, ]
            x_hat = np.dot(z_hat, W)  #[n, ] * [n,n] = [n, ]
            x_hat_max = np.abs(x_hat).max()
            x_hat = x_hat / (1.0 * x_hat_max)
            return x_hat

        # Construct inpainting masks
        def get_A_inpaint(mask_p):
            mask = mask_p.reshape(1, -1)
            A = np.eye(np.prod(mask.shape)) * np.tile(mask,
                                                      [np.prod(mask.shape), 1])
            A = np.asarray([a for a in A if np.sum(a) != 0])
            A = np.sqrt(
                sig_shape
            ) * A  # Make sure that the norm of each row of A is sig_shape
            assert all(np.abs(np.sum(A**2, 1) - sig_shape) < 1e-6)
            return A.T

        # Perofrm reconstruction
        if hparams.model_type == 'inpainting':
            # measurements and observation
            A_val = get_A_inpaint(mask)  #(n,m)
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    y_real = np.matmul(x_real[:, :, i].reshape(1, -1),
                                       A_val)  #(1,m)
                    out_img_piece = lasso_wavelet_estimator(
                        A_val, y_real, hparams)
                    out_img_piece = out_img_piece.reshape(
                        x_real.shape[0], x_real.shape[1])
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            elif hparams.image_mode == '1D':
                y_real = np.matmul(x_real.reshape(1, -1), A_val)  #(1,m)
                out_img = lasso_wavelet_estimator(A_val, y_real, hparams)
                out_img = out_img.reshape(-1, 1)
        elif hparams.model_type == 'denoising':
            assert hparams.type_measurements == 'identity'
            A_val = A  #(n,n)
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    y_real = x_real[:, :, i].reshape(1, -1) + observ_noise.T
                    out_img_piece = lasso_wavelet_estimator(
                        A_val, y_real, hparams)
                    out_img_piece = out_img_piece.reshape(
                        x_real.shape[0], x_real.shape[1])
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            elif hparams.image_mode == '1D':
                y_real = np.matmul(x_real.reshape(1, -1),
                                   A_val) + observ_noise.T
                out_img = lasso_wavelet_estimator(A_val, y_real, hparams)
                out_img = out_img.reshape(-1, 1)
        elif hparams.model_type == 'compressing':
            assert hparams.type_measurements == 'circulant'
            A_val = circulant_np
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    y_real = A_val(x_real[:, :, i].reshape(-1, 1)).reshape(
                        1, -1)  #[n,1] -> [1,n]
                    out_img_piece = lasso_wavelet_estimator(
                        A_val, y_real, hparams)
                    out_img_piece = out_img_piece.reshape(
                        x_real.shape[0], x_real.shape[1])
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            elif hparams.image_mode == '1D':
                y_real = A_val(x_real).reshape(1, -1)  #[n,1] -> [1,n]
                out_img = lasso_wavelet_estimator(A_val, y_real, hparams)
                out_img = out_img.reshape(-1, 1)

    ## === Printer === ##
    # Compute and print measurement and l2 loss
    # if hparams.image_mode == '3D' and hparams.model_type != 'inpainting':
    #     x_real = x_real.reshape(-1,1)
    l2_losses = get_l2_loss(out_img, x_real, hparams.image_mode,
                            hparams.decoder_type)
    psnr = 10 * np.log10(1 * 1 / l2_losses)  #PSNR

    # Printer info
    if hparams.model_type == 'inpainting':
        if hparams.image_mode == '1D':
            mask_info = hparams.mask_name_1D[8:-4]
        elif hparams.image_mode == '2D' or hparams.image_mode == '3D':
            mask_info = hparams.mask_name_2D[8:-4]
        type_mea_info = 'NA'
        num_mea_info = 'NA'
        noise_level_info = 'NA'
    elif hparams.model_type == 'compressing':
        mask_info = 'NA'
        type_mea_info = hparams.type_measurements
        num_mea_info = str(hparams.num_measurements)
        noise_level_info = 'NA'
    elif hparams.model_type == 'denoising':
        mask_info = 'NA'
        type_mea_info = 'NA'
        num_mea_info = 'NA'
        noise_level_info = str(hparams.noise_level)

    # Print result
    print(
        'Final representation PSNR for img_name:{}, model_type:{}, type_mea:{}, num_mea:{}, mask:{}, decoder:{} tau:{} noise:{} is {}'
        .format(hparams.img_name, hparams.model_type, type_mea_info,
                num_mea_info, mask_info, hparams.decoder_type, hparams.tau,
                noise_level_info, psnr))
    print('END')
    print('\t')
    #sys.stdout.close()

    ## == to pd frame == ##
    if hparams.pickle == 1:
        pickle_file_path = hparams.pickle_file_path
        if not os.path.exists(pickle_file_path):
            d = {
                'img_name': [hparams.img_name],
                'model_type': [hparams.model_type],
                'type_mea': [type_mea_info],
                'num_mea': [num_mea_info],
                'mask_info': [mask_info],
                'decoder_type': [hparams.decoder_type],
                'tau': [hparams.tau],
                'noise': [noise_level_info],
                'psnr': [psnr]
            }
            df = pd.DataFrame(data=d)
            df.to_pickle(pickle_file_path)
        else:
            d = {
                'img_name': hparams.img_name,
                'model_type': hparams.model_type,
                'type_mea': type_mea_info,
                'num_mea': num_mea_info,
                'mask_info': mask_info,
                'decoder_type': hparams.decoder_type,
                'tau': hparams.tau,
                'noise': noise_level_info,
                'psnr': psnr
            }
            df = pd.read_pickle(pickle_file_path)
            df = df.append(d, ignore_index=True)
            df.to_pickle(pickle_file_path)

    ## === Save === ##
    if hparams.save == 1:
        save_out_img(out_img, 'result/', hparams.img_name,
                     hparams.decoder_type, hparams.model_type, num_mea_info,
                     mask_info, noise_level_info, hparams.image_mode)