Exemple #1
0
    def least_squares(self,
                      iterations=1,
                      L=None,
                      tau=None,
                      sigma=None,
                      theta=None,
                      non_negativiy_constraint=False,
                      tv_norm=False,
                      verbose=True):
        """Least-squares problem with optional TV-regularisation and/or
        non-negativity constraint.

        Parameters
        ----------
        :type iterations: int (default 1)
        :param iterations: Number of iterations the optimization should
        run for.
        :type L: float (defaul: None)
        :param L: Matrix norm of forward projector. If 'None' matrix_norm is
        called with 20 iterations.
        :type tau: float (default 1/L)
        :param tau:
        :type sigma: float (default 1/L)
        :param sigma:
        :type theta: float (default 1)
        :param theta:
        :type non_negativiy_constraint: bool (default False)
        :param non_negativiy_constraint: Add non-negativity constraint to
        optimization problem (via indicator function).
        :type tv_norm: bool | float (default False)
        :param tv_norm: Unless False, coincides with the numerical value of
        the parameter lambda for TV-Regularisation.
        :type verbose: bool (default False)
        :param verbose: Show intermediate reconstructions and
        convergence measures during iteration.

        Returns
        -------
        :rtype: odl.Vector, odl.Vector, numpy.ndarray, numpy.ndarray
        :returns: u, p, cpd, l2_du
         u: vector of reconstructed volume
         p: vector of dual projection variable
         cpd: condition primal-dual gap (convergence measure)
         l2_du: l2-norm of constraint-induced convergence measure
        """

        # step 1:
        if L is None:
            L = self.matrix_norm(20)
        if tau is None:
            tau = 1 / L
        if sigma is None:
            sigma = 1 / L
        if theta is None:
            theta = 1

        # print 'tau:', tau
        # print 'sigma:', sigma
        # print 'theta:', theta

        geom = self.geom
        g = self.proj  # domain: D

        # l2-norm of (volume update / tau)
        l2_du = np.zeros(iterations)
        # conditional primal-dual gap
        cpd = np.zeros(iterations)

        # step 2: initialize u and p with zeros
        u = self.recon_space.zero()  # domain: I
        p = g.space.zero()  # domain: D
        # q: spatial vector = list of ndarrays in I (not Rn vectors)
        if tv_norm:
            ndim = geom.vol_ndim
            # domain of q: V = [I, I, ...]
            q = [
                np.zeros(geom.vol_shape, dtype=u.data.dtype)
                for _ in range(ndim)
            ]

        # step 3: ub <- u
        ub = u.copy()  # domain: I

        # initialize projector
        # A = Projector(geom, u.space, p.space)
        A = Projector(geom)

        # visual output instance
        disp = DisplayIntermediates(verbose=verbose,
                                    vol=u.data.reshape(geom.vol_shape),
                                    cpd=cpd,
                                    l2_du=l2_du)

        # step 4: repeat
        for n in range(iterations):

            # step 5: p_{n+1} <- (p_n + sigma(A^T ub_n - g)) / (1 + sigma)
            if n >= 0:
                # with(Timer('proj:')):
                #     # p_tmp <- A ub
                #     p_tmp = A.forward(ub)
                #     # p_tmp <- p_tmp - g
                #     p_tmp -= g
                #     # p <- p + sigma * p_tmp
                #     p += sigma * p_tmp
                # p_n <- p_n + sigma(A ub -g )
                tmp = A.forward(ub)
                # print 'p:', p.data.shape, 'Au:', tmp.data.shape, 'g:', \
                #     g.data.shape
                p += sigma * (A.forward(ub) - g)
            else:
                p -= sigma * g
            # p <- p / (1 + sigma)
            p /= 1 + sigma

            # TV step 6: q_{n+1} <- lambda(q_n + sigma grad ub_n) /
            # max(lambda 1_I, |q_n + sigma grad ub_n|)
            if tv_norm:

                for dim in range(ndim):
                    # q_n <- q_n + sigma * grad ub_n
                    q[dim] += sigma * partial(
                        ub.data.reshape(self.geom.vol_shape), dim,
                        geom.voxel_width[dim])

                # |q_n|: isotropic TV
                # use div_q to save memory, q = [qi] where qi are ndarrays
                div_q = np.sqrt(reduce(add, (qi**2 for qi in q)))

                # max(lambda 1_I, |q_n + sigma diff ub_n|)
                # print 'q_mag:', div_q.min(), div_q.max()
                div_q[div_q < tv_norm] = tv_norm

                # q_n <- lambda * q_n / |q_n|
                for dim in range(ndim):
                    q[dim] /= div_q
                    q[dim] *= tv_norm

                # div q_{n+1}
                div_q = reduce(add, (partial(qi, dim, geom.voxel_width[dim])
                                     for (dim, qi) in enumerate(q)))
                div_q *= tau

            # step 6: u_{n+1} <- u_{n} - tau * A^T p_{n+1}
            # TV step 7: u_{n+1} <- u_{n} - tau * A^T p_{n+1} + div q_{n+1}
            # ub_tmp <- A^T p
            ub_tmp = A.backward(p)
            ub_tmp *= tau
            ub_tmp *= self.adj_scal_fac
            # l2-norm per voxel of ub_tmp = A^T p
            l2_du[n:] = ub_tmp.norm()  # / u.data.size
            if tv_norm:
                l2_du[n:] += np.linalg.norm(div_q.ravel())  # / u.data.size
            # store current u_n temporarily in ub_n
            ub = -u.copy()
            # u <- u - tau ub_tmp
            u -= ub_tmp
            # TV: u <- u + tau div q
            if tv_norm:
                print('{0}: u - A^T p: min = {1}, max = {2}'.format(
                    n, u.data.min(), u.data.max()))
                print('{0}: div q: min = {1}, max = {2}'.format(
                    n, div_q.min(), div_q.max()))
                u.data[:] += div_q.ravel()

            # Positivity constraint
            if non_negativiy_constraint:
                u.data[u.data < 0] = 0
                # print '\nu:', u.data.min(), u.data.max()

            # conditional primal-dual gap for current u and p
            # 1/2||A u - g||_2^2 + 1/2||p||_2^2 + <p,g>_D
            # p_tmp <- A u
            # p_tmp = A.forward(u)
            # p_tmp -= g
            # cpd[n:] = (0.5 * p_tmp.norm() ** 2 +
            cpd[n:] = (0.5 * p.space.norm(A.forward(u) - g)**2 +
                       0.5 * p.norm()**2 + p.inner(g))  # / p.data.size
            if tv_norm:
                cpd[n:] += tv_norm * np.linalg.norm(reduce(
                    add, (partial(u.data.reshape(geom.vol_shape), dim,
                                  geom.voxel_width[dim])
                          for dim in range(geom.vol_ndim))).ravel(),
                                                    ord=1)  # / u.data.size

            # step 7 / TV step 8: ub_{n+1} <- u_{n+1} + theta(u_{n+1} - u_n)
            # ub <- ub + u_{n+1}, remember ub = -u_n
            ub += u
            # ub <- theta * ub
            ub *= theta
            # ub <- ub + u_{n+1}
            ub += u

            # visual output
            disp.update()

        A.clear_astra_memory()

        # Should avoid window freezing
        disp.show()

        return u, p, cpd, l2_du
Exemple #2
0
    def least_squares(self, num_iterations=1, L=None, tau=None, sigma=None,
                      theta=None, non_negativiy_constraint=False,
                      verbose=True):
        """Least-squares problem, unconstrained or with
        non-negativity constraint.

        Parameters
        ----------
        :type num_iterations: int (default 1)
        :param num_iterations: Number of iterations the optimization should
        run for.
        :type L: float (defaul: None)
        :param L: Matrix norm of forward projector. If 'None' matrix_norm is
        called with 20 iterations.
        :type tau: float (default 1/L)
        :param tau:
        :type sigma: float (default 1/L)
        :param sigma:
        :type theta: float (default 1)
        :param theta:
        :type non_negativiy_constraint: bool (default False)
        :param non_negativiy_constraint: Add non-negativity constraint to
        optimization problem (via indicator function).
        :type verbose: bool (default False)
        :param verbose: Show intermediate reconstructions and
        convergence measures during iteration.

        Returns
        -------

        Consider the unconstrained least-squares problem, i.e. a
        quadratic error function, only. The primal problem states:

            min_u 1/2 || A u - g ||_2^2.    (11)

        Association with the primal problem (1), see class docstring:

            F(y) = 1/2  || y - g ||_2^2.    (12)
            G(x) = 0,    (13)
            x = u, y = A u,    (14)
            K = A.    (15)

        From equation (3), the convex conjugates of F and G are obtained:

            F^*(p) = 1/2 || p ||_2^2 + <p,g>_D,    (16)
            G^*(q) = max_x <q,x>_I = delta_{O_I}(q),    (17)

        where p in D dual to g, q in I dual to x.

        The optimization problem dual to equation (11) reads:

            max_p { -1/2||p||_2^2 - <p,g>_D - delta_{0_I}(-A^T p) }.    (19)

        The proximal mappings for y in Dreads and x in I:

            prox_sigma[F^*](y) = arg min_{y'} { 1/2||y'||_2^2 + <y',g>_D
                                  + 1/(2sigma)||y-y'||_2^2 }     (22)

                                = (y = sigma g) / (1 + sigma) ,

            prox_\tau[G](x) = x .    (23)

        Conditional primal-dual gap, i.e. the difference between the primal
        and dual objectives ignoring the indicator function) for estimates
        u' and p':

            cPD(u',p') = 1/2||A u' -g||_2^2 + 1/2||p'||_2^^2 + <p',g>_D .  (21)

        cPD needs not to be positive, but should tend to zero. Also monitor
        A^T p' which should tend to 0_I.
        """

        # step 0:
        g = self.y
        # l2-norm of A^T p' with intermediate result p' = p
        l2_atp = np.zeros(num_iterations)
        # conditional primal-dual gap
        cpd = np.zeros(num_iterations)

        # step 1:
        if L is None:
            L = self.matrix_norm(20)
        if tau is None:
            tau = 1 / L
        if sigma is None:
            sigma = 1 / L
        if theta is None:
            theta = 1

        # step 2:
        u = np.zeros(self.K.num_voxel, dtype=np.float32)
        p = np.zeros((self.K.det_col_count,
                      len(self.K.angles), self.K.det_row_count),
                     dtype=np.float32)

        # step 3:
        ub = np.zeros_like(u)

        u_size = u.size
        p_size = p.size

        # visual output
        disp = DisplayIntermediates(verbose=verbose, vol=u, cpd=cpd,
                                    l2_du=l2_atp)

        # step 4: repeat
        for n in range(num_iterations):
            # step 5: p_{n+1}
            if n >= 0:
                self.K.set_volume_data(ub)
                self.K.forward()
                print 'p:', p.shape, 'g:', g.shape, 'proj:', \
                    self.K.projection_data.shape
                p += sigma * (self.K.projection_data - g)
            else:
                p += sigma * (- g)
            p /= 1 + sigma

            # step 6:
            # A^T pnnn
            self.K.set_projection_data(p)
            self.K.backward()
            # l2-norm of A^T p
            l2_atp[n:] = np.linalg.norm(np.ravel(self.K.volume_data)) / u_size
            # Use 'ub' as temporary memory for 'u_n'
            ub = u.copy()
            # u_{n+1} = u_{n} - tau * A^T p_{n+1}
            u -= tau * self.K.volume_data
            if non_negativiy_constraint:
                u[u < 0] = 0

            # conditional primal-dual gap:
            # 1/2||A u-g||_2^2 + 1/2||p||_2^2 + <p,g>_D
            self.K.set_volume_data(u)
            self.K.forward()
            cpd[n:] = (0.5 *
                       np.linalg.norm(
                           np.ravel(self.K.projection_data - g)) ** 2 +
                       0.5 * np.linalg.norm(np.ravel(p)) ** 2 +
                       np.sum(np.ravel(p * g))) / p_size

            # step 7:
            ub = u + theta * (u - ub)

            # visual output
            disp.update()

        self.K.clear()
        disp.show()

        return u, p, cpd, l2_atp
Exemple #3
0
    def least_squares(self, iterations=1, L=None, tau=None, sigma=None,
                      theta=None, non_negativiy_constraint=False,
                      tv_norm=False,
                      verbose=True):
        """Least-squares problem with optional TV-regularisation and/or
        non-negativity constraint.

        Parameters
        ----------
        :type iterations: int (default 1)
        :param iterations: Number of iterations the optimization should
        run for.
        :type L: float (defaul: None)
        :param L: Matrix norm of forward projector. If 'None' matrix_norm is
        called with 20 iterations.
        :type tau: float (default 1/L)
        :param tau:
        :type sigma: float (default 1/L)
        :param sigma:
        :type theta: float (default 1)
        :param theta:
        :type non_negativiy_constraint: bool (default False)
        :param non_negativiy_constraint: Add non-negativity constraint to
        optimization problem (via indicator function).
        :type tv_norm: bool | float (default False)
        :param tv_norm: Unless False, coincides with the numerical value of
        the parameter lambda for TV-Regularisation.
        :type verbose: bool (default False)
        :param verbose: Show intermediate reconstructions and
        convergence measures during iteration.

        Returns
        -------
        :rtype: odl.Vector, odl.Vector, numpy.ndarray, numpy.ndarray
        :returns: u, p, cpd, l2_du
         u: vector of reconstructed volume
         p: vector of dual projection variable
         cpd: condition primal-dual gap (convergence measure)
         l2_du: l2-norm of constraint-induced convergence measure
        """

        # step 1:
        if L is None:
            L = self.matrix_norm(20)
        if tau is None:
            tau = 1 / L
        if sigma is None:
            sigma = 1 / L
        if theta is None:
            theta = 1

        # print 'tau:', tau
        # print 'sigma:', sigma
        # print 'theta:', theta

        geom = self.geom
        g = self.proj  # domain: D

        # l2-norm of (volume update / tau)
        l2_du = np.zeros(iterations)
        # conditional primal-dual gap
        cpd = np.zeros(iterations)

        # step 2: initialize u and p with zeros
        u = self.recon_space.zero()  # domain: I
        p = g.space.zero()  # domain: D
        # q: spatial vector = list of ndarrays in I (not Rn vectors)
        if tv_norm:
            ndim = geom.vol_ndim
            # domain of q: V = [I, I, ...]
            q = [np.zeros(geom.vol_shape, dtype=u.data.dtype) for _ in range(
                ndim)]

        # step 3: ub <- u
        ub = u.copy()  # domain: I

        # initialize projector
        # A = Projector(geom, u.space, p.space)
        A = Projector(geom)

        # visual output instance
        disp = DisplayIntermediates(verbose=verbose, vol=u.data.reshape(
            geom.vol_shape), cpd=cpd, l2_du=l2_du)

        # step 4: repeat
        for n in range(iterations):

            # step 5: p_{n+1} <- (p_n + sigma(A^T ub_n - g)) / (1 + sigma)
            if n >= 0:
                # with(Timer('proj:')):
                #     # p_tmp <- A ub
                #     p_tmp = A.forward(ub)
                #     # p_tmp <- p_tmp - g
                #     p_tmp -= g
                #     # p <- p + sigma * p_tmp
                #     p += sigma * p_tmp
                # p_n <- p_n + sigma(A ub -g )
                tmp = A.forward(ub)
                # print 'p:', p.data.shape, 'Au:', tmp.data.shape, 'g:', \
                #     g.data.shape
                p += sigma * (A.forward(ub) - g)
            else:
                p -= sigma * g
            # p <- p / (1 + sigma)
            p /= 1 + sigma

            # TV step 6: q_{n+1} <- lambda(q_n + sigma grad ub_n) /
            # max(lambda 1_I, |q_n + sigma grad ub_n|)
            if tv_norm:

                for dim in range(ndim):
                    # q_n <- q_n + sigma * grad ub_n
                    q[dim] += sigma * partial(ub.data.reshape(
                        self.geom.vol_shape), dim, geom.voxel_width[dim])

                # |q_n|: isotropic TV
                # use div_q to save memory, q = [qi] where qi are ndarrays
                div_q = np.sqrt(reduce(add, (qi ** 2 for qi in q)))

                # max(lambda 1_I, |q_n + sigma diff ub_n|)
                # print 'q_mag:', div_q.min(), div_q.max()
                div_q[div_q < tv_norm] = tv_norm

                # q_n <- lambda * q_n / |q_n|
                for dim in range(ndim):
                    q[dim] /= div_q
                    q[dim] *= tv_norm

                # div q_{n+1}
                div_q = reduce(add, (partial(qi, dim, geom.voxel_width[dim])
                                     for (dim, qi) in enumerate(q)))
                div_q *= tau

            # step 6: u_{n+1} <- u_{n} - tau * A^T p_{n+1}
            # TV step 7: u_{n+1} <- u_{n} - tau * A^T p_{n+1} + div q_{n+1}
            # ub_tmp <- A^T p
            ub_tmp = A.backward(p)
            ub_tmp *= tau
            ub_tmp *= self.adj_scal_fac
            # l2-norm per voxel of ub_tmp = A^T p
            l2_du[n:] = ub_tmp.norm()  # / u.data.size
            if tv_norm:
                l2_du[n:] += np.linalg.norm(div_q.ravel())  # / u.data.size
            # store current u_n temporarily in ub_n
            ub = -u.copy()
            # u <- u - tau ub_tmp
            u -= ub_tmp
            # TV: u <- u + tau div q
            if tv_norm:
                print('{0}: u - A^T p: min = {1}, max = {2}'.format(
                    n, u.data.min(), u.data.max()))
                print('{0}: div q: min = {1}, max = {2}'.format(
                    n, div_q.min(), div_q.max()))
                u.data[:] += div_q.ravel()

            # Positivity constraint
            if non_negativiy_constraint:
                u.data[u.data < 0] = 0
                # print '\nu:', u.data.min(), u.data.max()

            # conditional primal-dual gap for current u and p
            # 1/2||A u - g||_2^2 + 1/2||p||_2^2 + <p,g>_D
            # p_tmp <- A u
            # p_tmp = A.forward(u)
            # p_tmp -= g
            # cpd[n:] = (0.5 * p_tmp.norm() ** 2 +
            cpd[n:] = (0.5 * p.space.norm(A.forward(u) - g) ** 2 +
                       0.5 * p.norm() ** 2 +
                       p.inner(g))  # / p.data.size
            if tv_norm:
                cpd[n:] += tv_norm * np.linalg.norm(
                    reduce(add, (partial(u.data.reshape(geom.vol_shape),
                                         dim, geom.voxel_width[dim]) for dim
                                 in range(geom.vol_ndim))
                           ).ravel(), ord=1)  # / u.data.size

            # step 7 / TV step 8: ub_{n+1} <- u_{n+1} + theta(u_{n+1} - u_n)
            # ub <- ub + u_{n+1}, remember ub = -u_n
            ub += u
            # ub <- theta * ub
            ub *= theta
            # ub <- ub + u_{n+1}
            ub += u

            # visual output
            disp.update()

        A.clear_astra_memory()

        # Should avoid window freezing
        disp.show()

        return u, p, cpd, l2_du
Exemple #4
0
    def least_squares(self,
                      num_iterations=1,
                      L=None,
                      tau=None,
                      sigma=None,
                      theta=None,
                      non_negativiy_constraint=False,
                      verbose=True):
        """Least-squares problem, unconstrained or with
        non-negativity constraint.

        Parameters
        ----------
        :type num_iterations: int (default 1)
        :param num_iterations: Number of iterations the optimization should
        run for.
        :type L: float (defaul: None)
        :param L: Matrix norm of forward projector. If 'None' matrix_norm is
        called with 20 iterations.
        :type tau: float (default 1/L)
        :param tau:
        :type sigma: float (default 1/L)
        :param sigma:
        :type theta: float (default 1)
        :param theta:
        :type non_negativiy_constraint: bool (default False)
        :param non_negativiy_constraint: Add non-negativity constraint to
        optimization problem (via indicator function).
        :type verbose: bool (default False)
        :param verbose: Show intermediate reconstructions and
        convergence measures during iteration.

        Returns
        -------

        Consider the unconstrained least-squares problem, i.e. a
        quadratic error function, only. The primal problem states:

            min_u 1/2 || A u - g ||_2^2.    (11)

        Association with the primal problem (1), see class docstring:

            F(y) = 1/2  || y - g ||_2^2.    (12)
            G(x) = 0,    (13)
            x = u, y = A u,    (14)
            K = A.    (15)

        From equation (3), the convex conjugates of F and G are obtained:

            F^*(p) = 1/2 || p ||_2^2 + <p,g>_D,    (16)
            G^*(q) = max_x <q,x>_I = delta_{O_I}(q),    (17)

        where p in D dual to g, q in I dual to x.

        The optimization problem dual to equation (11) reads:

            max_p { -1/2||p||_2^2 - <p,g>_D - delta_{0_I}(-A^T p) }.    (19)

        The proximal mappings for y in Dreads and x in I:

            prox_sigma[F^*](y) = arg min_{y'} { 1/2||y'||_2^2 + <y',g>_D
                                  + 1/(2sigma)||y-y'||_2^2 }     (22)

                                = (y = sigma g) / (1 + sigma) ,

            prox_\tau[G](x) = x .    (23)

        Conditional primal-dual gap, i.e. the difference between the primal
        and dual objectives ignoring the indicator function) for estimates
        u' and p':

            cPD(u',p') = 1/2||A u' -g||_2^2 + 1/2||p'||_2^^2 + <p',g>_D .  (21)

        cPD needs not to be positive, but should tend to zero. Also monitor
        A^T p' which should tend to 0_I.
        """

        # step 0:
        g = self.y
        # l2-norm of A^T p' with intermediate result p' = p
        l2_atp = np.zeros(num_iterations)
        # conditional primal-dual gap
        cpd = np.zeros(num_iterations)

        # step 1:
        if L is None:
            L = self.matrix_norm(20)
        if tau is None:
            tau = 1 / L
        if sigma is None:
            sigma = 1 / L
        if theta is None:
            theta = 1

        # step 2:
        u = np.zeros(self.K.num_voxel, dtype=np.float32)
        p = np.zeros(
            (self.K.det_col_count, len(self.K.angles), self.K.det_row_count),
            dtype=np.float32)

        # step 3:
        ub = np.zeros_like(u)

        u_size = u.size
        p_size = p.size

        # visual output
        disp = DisplayIntermediates(verbose=verbose,
                                    vol=u,
                                    cpd=cpd,
                                    l2_du=l2_atp)

        # step 4: repeat
        for n in range(num_iterations):
            # step 5: p_{n+1}
            if n >= 0:
                self.K.set_volume_data(ub)
                self.K.forward()
                print 'p:', p.shape, 'g:', g.shape, 'proj:', \
                    self.K.projection_data.shape
                p += sigma * (self.K.projection_data - g)
            else:
                p += sigma * (-g)
            p /= 1 + sigma

            # step 6:
            # A^T pnnn
            self.K.set_projection_data(p)
            self.K.backward()
            # l2-norm of A^T p
            l2_atp[n:] = np.linalg.norm(np.ravel(self.K.volume_data)) / u_size
            # Use 'ub' as temporary memory for 'u_n'
            ub = u.copy()
            # u_{n+1} = u_{n} - tau * A^T p_{n+1}
            u -= tau * self.K.volume_data
            if non_negativiy_constraint:
                u[u < 0] = 0

            # conditional primal-dual gap:
            # 1/2||A u-g||_2^2 + 1/2||p||_2^2 + <p,g>_D
            self.K.set_volume_data(u)
            self.K.forward()
            cpd[n:] = (
                0.5 * np.linalg.norm(np.ravel(self.K.projection_data - g))**2 +
                0.5 * np.linalg.norm(np.ravel(p))**2 +
                np.sum(np.ravel(p * g))) / p_size

            # step 7:
            ub = u + theta * (u - ub)

            # visual output
            disp.update()

        self.K.clear()
        disp.show()

        return u, p, cpd, l2_atp