Esempio n. 1
0
File: h2.py Progetto: pymor/pymor
class OneSidedIRKAReductor(BasicInterface):
    """One-Sided Iterative Rational Krylov Algorithm reductor.

    Parameters
    ----------
    fom
        The full-order |LTIModel| to reduce.
    version
        Version of the one-sided IRKA:

        - `'V'`: Galerkin projection using the input Krylov subspace,
        - `'W'`: Galerkin projection using the output Krylov subspace.
    """
    def __init__(self, fom, version):
        assert isinstance(fom, LTIModel)
        assert version in ('V', 'W')
        self.fom = fom
        self.version = version
        self.V = None
        self._pg_reductor = None
        self.conv_crit = None
        self.sigmas = None
        self.R = None
        self.L = None
        self.errors = None

    def reduce(self, r, sigma=None, b=None, c=None, rd0=None, tol=1e-4, maxit=100, num_prev=1,
               force_sigma_in_rhp=False, projection='orth', conv_crit='sigma',
               compute_errors=False):
        r"""Reduce using one-sided IRKA.

        Parameters
        ----------
        r
            Order of the reduced order model.
        sigma
            Initial interpolation points (closed under conjugation).

            If `None`, interpolation points are log-spaced between 0.1 and 10.
            If `sigma` is an `int`, it is used as a seed to generate it randomly.
            Otherwise, it needs to be a one-dimensional array-like of length `r`.

            `sigma` and `rd0` cannot both be not `None`.
        b
            Initial right tangential directions.

            If `None`, if is chosen as all ones.
            If `b` is an `int`, it is used as a seed to generate it randomly.
            Otherwise, it needs to be a |VectorArray| of length `r` from `fom.B.source`.

            `b` and `rd0` cannot both be not `None`.
        c
            Initial left tangential directions.

            If `None`, if is chosen as all ones.
            If `c` is an `int`, it is used as a seed to generate it randomly.
            Otherwise, it needs to be a |VectorArray| of length `r` from `fom.C.range`.

            `c` and `rd0` cannot both be not `None`.
        rd0
            Initial reduced order model.

            If `None`, then `sigma`, `b`, and `c` are used.
            Otherwise, it needs to be an |LTIModel| of order `r` and it is used to construct
            `sigma`, `b`, and `c`.
        tol
            Tolerance for the largest change in interpolation points.
        maxit
            Maximum number of iterations.
        num_prev
            Number of previous iterations to compare the current iteration to.
            A larger number can avoid occasional cyclic behavior.
        force_sigma_in_rhp
            If 'False`, new interpolation are reflections of reduced order model's poles.
            Otherwise, they are always in the right half-plane.
        projection
            Projection method:

            - `'orth'`: projection matrix is orthogonalized with respect to the Euclidean inner
              product,
            - `'Eorth'`: projection matrix is orthogonalized with respect to the E product.
        conv_crit
            Convergence criterion:

            - `'sigma'`: relative change in interpolation points,
            - `'h2'`: relative :math:`\mathcal{H}_2` distance of reduced order models.
        compute_errors
            Should the relative :math:`\mathcal{H}_2`-errors of intermediate reduced order models be
            computed.

            .. warning::
                Computing :math:`\mathcal{H}_2`-errors is expensive.
                Use this option only if necessary.

        Returns
        -------
        rom
            Reduced |LTIModel| model.
        """
        fom = self.fom
        if not fom.cont_time:
            raise NotImplementedError
        assert 0 < r < fom.order
        assert isinstance(num_prev, int) and num_prev >= 1
        assert projection in ('orth', 'Eorth')
        assert conv_crit in ('sigma', 'h2')

        # initial interpolation points and tangential directions
        assert sigma is None or isinstance(sigma, int) or len(sigma) == r
        assert b is None or isinstance(b, int) or b in fom.B.source and len(b) == r
        assert c is None or isinstance(c, int) or c in fom.C.range and len(c) == r
        assert (rd0 is None
                or isinstance(rd0, LTIModel)
                and rd0.order == r and rd0.input_space == fom.input_space and rd0.output_space == fom.output_space)
        assert sigma is None or rd0 is None
        assert b is None or rd0 is None
        assert c is None or rd0 is None
        if rd0 is not None:
            poles, b, c = _poles_and_tangential_directions(rd0)
            sigma = np.abs(poles.real) + poles.imag * 1j if force_sigma_in_rhp else -poles
        else:
            if sigma is None:
                sigma = np.logspace(-1, 1, r)
            elif isinstance(sigma, int):
                np.random.seed(sigma)
                sigma = np.abs(np.random.randn(r))
            if self.version == 'V':
                if b is None:
                    b = fom.B.source.ones(r)
                elif isinstance(b, int):
                    b = fom.B.source.random(r, distribution='normal', seed=b)
            else:
                if c is None:
                    c = fom.C.range.ones(r)
                elif isinstance(c, int):
                    c = fom.C.range.random(r, distribution='normal', seed=c)

        self.logger.info('Starting one-sided IRKA')
        self.conv_crit = []
        self.sigmas = [np.array(sigma)]
        if self.version == 'V':
            self.R = [b]
        else:
            self.L = [c]
        self.errors = [] if compute_errors else None
        # main loop
        for it in range(maxit):
            # interpolatory reduced order model
            self._projection_matrix(r, sigma, b, c, projection)
            rom = self._pg_reductor.reduce()

            # new interpolation points and tangential directions
            poles, b, c = _poles_and_tangential_directions(rom)
            sigma = np.abs(poles.real) + poles.imag * 1j if force_sigma_in_rhp else -poles
            self.sigmas.append(sigma)
            if self.version == 'V':
                self.R.append(b)
            else:
                self.L.append(c)

            # compute convergence criterion
            if conv_crit == 'sigma':
                dist = _convergence_criterion(self.sigmas[:-num_prev-2:-1], conv_crit)
                self.conv_crit.append(dist)
            elif conv_crit == 'h2':
                if it == 0:
                    rom_list = (num_prev + 1) * [None]
                    rom_list[0] = rom
                    self.conv_crit.append(np.inf)
                else:
                    rom_list[1:] = rom_list[:-1]
                    rom_list[0] = rom
                    dist = _convergence_criterion(rom_list, conv_crit)
                    self.conv_crit.append(dist)

            # report convergence
            self.logger.info(f'Convergence criterion in iteration {it + 1}: {self.conv_crit[-1]:e}')
            if compute_errors:
                if np.max(rom.poles().real) < 0:
                    err = fom - rom
                    rel_H2_err = err.h2_norm() / fom.h2_norm()
                else:
                    rel_H2_err = np.inf
                self.errors.append(rel_H2_err)

                self.logger.info(f'Relative H2-error in iteration {it + 1}: {rel_H2_err:e}')

            # check if convergence criterion is satisfied
            if self.conv_crit[-1] < tol:
                break

        # final reduced order model
        self._projection_matrix(r, sigma, b, c, projection)
        rom = self._pg_reductor.reduce()
        return rom

    def _projection_matrix(self, r, sigma, b, c, projection):
        fom = self.fom
        if self.version == 'V':
            V = fom.A.source.empty(reserve=r)
        else:
            W = fom.A.source.empty(reserve=r)
        for i in range(r):
            if sigma[i].imag == 0:
                sEmA = sigma[i].real * self.fom.E - self.fom.A
                if self.version == 'V':
                    Bb = fom.B.apply(b.real[i])
                    V.append(sEmA.apply_inverse(Bb))
                else:
                    CTc = fom.C.apply_adjoint(c.real[i])
                    W.append(sEmA.apply_inverse_adjoint(CTc))
            elif sigma[i].imag > 0:
                sEmA = sigma[i] * self.fom.E - self.fom.A
                if self.version == 'V':
                    Bb = fom.B.apply(b[i])
                    v = sEmA.apply_inverse(Bb)
                    V.append(v.real)
                    V.append(v.imag)
                else:
                    CTc = fom.C.apply_adjoint(c[i].conj())
                    w = sEmA.apply_inverse_adjoint(CTc)
                    W.append(w.real)
                    W.append(w.imag)

        if self.version == 'V':
            self.V = gram_schmidt(V, atol=0, rtol=0, product=None if projection == 'orth' else fom.E)
        else:
            self.V = gram_schmidt(W, atol=0, rtol=0, product=None if projection == 'orth' else fom.E)

        self._pg_reductor = LTIPGReductor(fom, self.V, self.V, projection == 'Eorth')

    def reconstruct(self, u):
        """Reconstruct high-dimensional vector from reduced vector `u`."""
        return self._pg_reductor.reconstruct(u)
Esempio n. 2
0
File: h2.py Progetto: pymor/pymor
class TSIAReductor(BasicInterface):
    """Two-Sided Iteration Algorithm reductor.

    Parameters
    ----------
    fom
        The full-order |LTIModel| to reduce.
    """
    def __init__(self, fom):
        assert isinstance(fom, LTIModel)
        self.fom = fom
        self.V = None
        self.W = None
        self._pg_reductor = None
        self.conv_crit = None
        self.errors = None

    def reduce(self, rom0, tol=1e-4, maxit=100, num_prev=1, projection='orth', conv_crit='sigma',
               compute_errors=False):
        r"""Reduce using TSIA.

        See [XZ11]_ (Algorithm 1) and [BKS11]_.

        In exact arithmetic, TSIA is equivalent to IRKA (under some
        assumptions on the poles of the reduced model). The main
        difference in implementation is that TSIA computes the Schur
        decomposition of the reduced matrices, while IRKA computes the
        eigenvalue decomposition. Therefore, TSIA might behave better
        for non-normal reduced matrices.

        Parameters
        ----------
        rom0
            Initial reduced order model.
        tol
            Tolerance for the convergence criterion.
        maxit
            Maximum number of iterations.
        num_prev
            Number of previous iterations to compare the current
            iteration to. Larger number can avoid occasional cyclic
            behavior of TSIA.
        projection
            Projection method:

            - `'orth'`: projection matrices are orthogonalized with
              respect to the Euclidean inner product
            - `'biorth'`: projection matrices are biorthogolized with
              respect to the E product
        conv_crit
            Convergence criterion:

            - `'sigma'`: relative change in interpolation points
            - `'h2'`: relative :math:`\mathcal{H}_2` distance of
              reduced-order models
        compute_errors
            Should the relative :math:`\mathcal{H}_2`-errors of
            intermediate reduced order models be computed.

            .. warning::
                Computing :math:`\mathcal{H}_2`-errors is expensive. Use
                this option only if necessary.

        Returns
        -------
        rom
            Reduced |LTIModel|.
        """
        fom = self.fom
        assert isinstance(rom0, LTIModel) and rom0.B.source == fom.B.source and rom0.C.range == fom.C.range
        r = rom0.order
        assert 0 < r < fom.order
        assert isinstance(num_prev, int) and num_prev >= 1
        assert projection in ('orth', 'biorth')
        assert conv_crit in ('sigma', 'h2')

        # begin logging
        self.logger.info('Starting TSIA')

        # find initial projection matrices
        self._projection_matrices(rom0, projection)

        data = (num_prev + 1) * [None]
        data[0] = rom0.poles() if conv_crit == 'sigma' else rom0
        self.conv_crit = []
        self.errors = [] if compute_errors else None
        # main loop
        for it in range(maxit):
            # project the full order model
            rom = self._pg_reductor.reduce()

            # compute convergence criterion
            data[1:] = data[:-1]
            data[0] = rom.poles() if conv_crit == 'sigma' else rom
            dist = _convergence_criterion(data, conv_crit)
            self.conv_crit.append(dist)

            # report convergence
            self.logger.info(f'Convergence criterion in iteration {it + 1}: {self.conv_crit[-1]:e}')
            if compute_errors:
                if np.max(rom.poles().real) < 0:
                    err = fom - rom
                    rel_H2_err = err.h2_norm() / fom.h2_norm()
                else:
                    rel_H2_err = np.inf
                self.errors.append(rel_H2_err)

                self.logger.info(f'Relative H2-error in iteration {it + 1}: {rel_H2_err:e}')

            # new projection matrices
            self._projection_matrices(rom, projection)

            # check convergence criterion
            if self.conv_crit[-1] < tol:
                break

        # final reduced order model
        rom = self._pg_reductor.reduce()
        return rom

    def _projection_matrices(self, rom, projection):
        fom = self.fom
        self.V, self.W = solve_sylv_schur(fom.A, rom.A,
                                          E=fom.E, Er=rom.E,
                                          B=fom.B, Br=rom.B,
                                          C=fom.C, Cr=rom.C)
        if projection == 'orth':
            self.V = gram_schmidt(self.V, atol=0, rtol=0)
            self.W = gram_schmidt(self.W, atol=0, rtol=0)
        elif projection == 'biorth':
            self.V, self.W = gram_schmidt_biorth(self.V, self.W, product=fom.E)

        self._pg_reductor = LTIPGReductor(fom, self.W, self.V, projection == 'biorth')

    def reconstruct(self, u):
        """Reconstruct high-dimensional vector from reduced vector `u`."""
        return self._pg_reductor.reconstruct(u)
Esempio n. 3
0
File: h2.py Progetto: deneick/pymor
class TSIAReductor(BasicInterface):
    """Two-Sided Iteration Algorithm reductor.

    Parameters
    ----------
    fom
        The full-order |LTIModel| to reduce.
    """
    def __init__(self, fom):
        assert isinstance(fom, LTIModel)
        self.fom = fom
        self.V = None
        self.W = None
        self._pg_reductor = None
        self.conv_crit = None
        self.errors = None

    def reduce(self,
               rom0,
               tol=1e-4,
               maxit=100,
               num_prev=1,
               projection='orth',
               conv_crit='sigma',
               compute_errors=False):
        r"""Reduce using TSIA.

        See [XZ11]_ (Algorithm 1) and [BKS11]_.

        In exact arithmetic, TSIA is equivalent to IRKA (under some
        assumptions on the poles of the reduced model). The main
        difference in implementation is that TSIA computes the Schur
        decomposition of the reduced matrices, while IRKA computes the
        eigenvalue decomposition. Therefore, TSIA might behave better
        for non-normal reduced matrices.

        Parameters
        ----------
        rom0
            Initial reduced order model.
        tol
            Tolerance for the convergence criterion.
        maxit
            Maximum number of iterations.
        num_prev
            Number of previous iterations to compare the current
            iteration to. Larger number can avoid occasional cyclic
            behavior of TSIA.
        projection
            Projection method:

            - `'orth'`: projection matrices are orthogonalized with
              respect to the Euclidean inner product
            - `'biorth'`: projection matrices are biorthogolized with
              respect to the E product
        conv_crit
            Convergence criterion:

            - `'sigma'`: relative change in interpolation points
            - `'h2'`: relative :math:`\mathcal{H}_2` distance of
              reduced-order models
        compute_errors
            Should the relative :math:`\mathcal{H}_2`-errors of
            intermediate reduced order models be computed.

            .. warning::
                Computing :math:`\mathcal{H}_2`-errors is expensive. Use
                this option only if necessary.

        Returns
        -------
        rom
            Reduced |LTIModel|.
        """
        fom = self.fom
        assert isinstance(
            rom0, LTIModel
        ) and rom0.B.source == fom.B.source and rom0.C.range == fom.C.range
        r = rom0.order
        assert 0 < r < fom.order
        assert isinstance(num_prev, int) and num_prev >= 1
        assert projection in ('orth', 'biorth')
        assert conv_crit in ('sigma', 'h2')

        # begin logging
        self.logger.info('Starting TSIA')

        # find initial projection matrices
        self._projection_matrices(rom0, projection)

        data = (num_prev + 1) * [None]
        data[0] = rom0.poles() if conv_crit == 'sigma' else rom0
        self.conv_crit = []
        self.errors = [] if compute_errors else None
        # main loop
        for it in range(maxit):
            # project the full order model
            rom = self._pg_reductor.reduce()

            # compute convergence criterion
            data[1:] = data[:-1]
            data[0] = rom.poles() if conv_crit == 'sigma' else rom
            dist = _convergence_criterion(data, conv_crit)
            self.conv_crit.append(dist)

            # report convergence
            self.logger.info(
                f'Convergence criterion in iteration {it + 1}: {self.conv_crit[-1]:e}'
            )
            if compute_errors:
                if np.max(rom.poles().real) < 0:
                    err = fom - rom
                    rel_H2_err = err.h2_norm() / fom.h2_norm()
                else:
                    rel_H2_err = np.inf
                self.errors.append(rel_H2_err)

                self.logger.info(
                    f'Relative H2-error in iteration {it + 1}: {rel_H2_err:e}')

            # new projection matrices
            self._projection_matrices(rom, projection)

            # check convergence criterion
            if self.conv_crit[-1] < tol:
                break

        # final reduced order model
        rom = self._pg_reductor.reduce()
        return rom

    def _projection_matrices(self, rom, projection):
        fom = self.fom
        self.V, self.W = solve_sylv_schur(fom.A,
                                          rom.A,
                                          E=fom.E,
                                          Er=rom.E,
                                          B=fom.B,
                                          Br=rom.B,
                                          C=fom.C,
                                          Cr=rom.C)
        if projection == 'orth':
            self.V = gram_schmidt(self.V, atol=0, rtol=0)
            self.W = gram_schmidt(self.W, atol=0, rtol=0)
        elif projection == 'biorth':
            self.V, self.W = gram_schmidt_biorth(self.V, self.W, product=fom.E)

        self._pg_reductor = LTIPGReductor(fom, self.W, self.V,
                                          projection == 'biorth')

    def reconstruct(self, u):
        """Reconstruct high-dimensional vector from reduced vector `u`."""
        return self._pg_reductor.reconstruct(u)
Esempio n. 4
0
File: h2.py Progetto: deneick/pymor
class OneSidedIRKAReductor(BasicInterface):
    """One-Sided Iterative Rational Krylov Algorithm reductor.

    Parameters
    ----------
    fom
        The full-order |LTIModel| to reduce.
    version
        Version of the one-sided IRKA:

        - `'V'`: Galerkin projection using the input Krylov subspace,
        - `'W'`: Galerkin projection using the output Krylov subspace.
    """
    def __init__(self, fom, version):
        assert isinstance(fom, LTIModel)
        assert version in ('V', 'W')
        self.fom = fom
        self.version = version
        self.V = None
        self._pg_reductor = None
        self.conv_crit = None
        self.sigmas = None
        self.R = None
        self.L = None
        self.errors = None

    def reduce(self,
               r,
               sigma=None,
               b=None,
               c=None,
               rd0=None,
               tol=1e-4,
               maxit=100,
               num_prev=1,
               force_sigma_in_rhp=False,
               projection='orth',
               conv_crit='sigma',
               compute_errors=False):
        r"""Reduce using one-sided IRKA.

        Parameters
        ----------
        r
            Order of the reduced order model.
        sigma
            Initial interpolation points (closed under conjugation).

            If `None`, interpolation points are log-spaced between 0.1 and 10.
            If `sigma` is an `int`, it is used as a seed to generate it randomly.
            Otherwise, it needs to be a one-dimensional array-like of length `r`.

            `sigma` and `rd0` cannot both be not `None`.
        b
            Initial right tangential directions.

            If `None`, if is chosen as all ones.
            If `b` is an `int`, it is used as a seed to generate it randomly.
            Otherwise, it needs to be a |VectorArray| of length `r` from `fom.B.source`.

            `b` and `rd0` cannot both be not `None`.
        c
            Initial left tangential directions.

            If `None`, if is chosen as all ones.
            If `c` is an `int`, it is used as a seed to generate it randomly.
            Otherwise, it needs to be a |VectorArray| of length `r` from `fom.C.range`.

            `c` and `rd0` cannot both be not `None`.
        rd0
            Initial reduced order model.

            If `None`, then `sigma`, `b`, and `c` are used.
            Otherwise, it needs to be an |LTIModel| of order `r` and it is used to construct
            `sigma`, `b`, and `c`.
        tol
            Tolerance for the largest change in interpolation points.
        maxit
            Maximum number of iterations.
        num_prev
            Number of previous iterations to compare the current iteration to.
            A larger number can avoid occasional cyclic behavior.
        force_sigma_in_rhp
            If 'False`, new interpolation are reflections of reduced order model's poles.
            Otherwise, they are always in the right half-plane.
        projection
            Projection method:

            - `'orth'`: projection matrix is orthogonalized with respect to the Euclidean inner
              product,
            - `'Eorth'`: projection matrix is orthogonalized with respect to the E product.
        conv_crit
            Convergence criterion:

            - `'sigma'`: relative change in interpolation points,
            - `'h2'`: relative :math:`\mathcal{H}_2` distance of reduced order models.
        compute_errors
            Should the relative :math:`\mathcal{H}_2`-errors of intermediate reduced order models be
            computed.

            .. warning::
                Computing :math:`\mathcal{H}_2`-errors is expensive.
                Use this option only if necessary.

        Returns
        -------
        rom
            Reduced |LTIModel| model.
        """
        fom = self.fom
        if not fom.cont_time:
            raise NotImplementedError
        assert 0 < r < fom.order
        assert isinstance(num_prev, int) and num_prev >= 1
        assert projection in ('orth', 'Eorth')
        assert conv_crit in ('sigma', 'h2')

        # initial interpolation points and tangential directions
        assert sigma is None or isinstance(sigma, int) or len(sigma) == r
        assert b is None or isinstance(
            b, int) or b in fom.B.source and len(b) == r
        assert c is None or isinstance(c,
                                       int) or c in fom.C.range and len(c) == r
        assert (rd0 is None or isinstance(rd0, LTIModel) and rd0.order == r
                and rd0.input_space == fom.input_space
                and rd0.output_space == fom.output_space)
        assert sigma is None or rd0 is None
        assert b is None or rd0 is None
        assert c is None or rd0 is None
        if rd0 is not None:
            poles, b, c = _poles_and_tangential_directions(rd0)
            sigma = np.abs(
                poles.real) + poles.imag * 1j if force_sigma_in_rhp else -poles
        else:
            if sigma is None:
                sigma = np.logspace(-1, 1, r)
            elif isinstance(sigma, int):
                np.random.seed(sigma)
                sigma = np.abs(np.random.randn(r))
            if self.version == 'V':
                if b is None:
                    b = fom.B.source.ones(r)
                elif isinstance(b, int):
                    b = fom.B.source.random(r, distribution='normal', seed=b)
            else:
                if c is None:
                    c = fom.C.range.ones(r)
                elif isinstance(c, int):
                    c = fom.C.range.random(r, distribution='normal', seed=c)

        self.logger.info('Starting one-sided IRKA')
        self.conv_crit = []
        self.sigmas = [np.array(sigma)]
        if self.version == 'V':
            self.R = [b]
        else:
            self.L = [c]
        self.errors = [] if compute_errors else None
        # main loop
        for it in range(maxit):
            # interpolatory reduced order model
            self._projection_matrix(r, sigma, b, c, projection)
            rom = self._pg_reductor.reduce()

            # new interpolation points and tangential directions
            poles, b, c = _poles_and_tangential_directions(rom)
            sigma = np.abs(
                poles.real) + poles.imag * 1j if force_sigma_in_rhp else -poles
            self.sigmas.append(sigma)
            if self.version == 'V':
                self.R.append(b)
            else:
                self.L.append(c)

            # compute convergence criterion
            if conv_crit == 'sigma':
                dist = _convergence_criterion(self.sigmas[:-num_prev - 2:-1],
                                              conv_crit)
                self.conv_crit.append(dist)
            elif conv_crit == 'h2':
                if it == 0:
                    rom_list = (num_prev + 1) * [None]
                    rom_list[0] = rom
                    self.conv_crit.append(np.inf)
                else:
                    rom_list[1:] = rom_list[:-1]
                    rom_list[0] = rom
                    dist = _convergence_criterion(rom_list, conv_crit)
                    self.conv_crit.append(dist)

            # report convergence
            self.logger.info(
                f'Convergence criterion in iteration {it + 1}: {self.conv_crit[-1]:e}'
            )
            if compute_errors:
                if np.max(rom.poles().real) < 0:
                    err = fom - rom
                    rel_H2_err = err.h2_norm() / fom.h2_norm()
                else:
                    rel_H2_err = np.inf
                self.errors.append(rel_H2_err)

                self.logger.info(
                    f'Relative H2-error in iteration {it + 1}: {rel_H2_err:e}')

            # check if convergence criterion is satisfied
            if self.conv_crit[-1] < tol:
                break

        # final reduced order model
        self._projection_matrix(r, sigma, b, c, projection)
        rom = self._pg_reductor.reduce()
        return rom

    def _projection_matrix(self, r, sigma, b, c, projection):
        fom = self.fom
        if self.version == 'V':
            V = fom.A.source.empty(reserve=r)
        else:
            W = fom.A.source.empty(reserve=r)
        for i in range(r):
            if sigma[i].imag == 0:
                sEmA = sigma[i].real * self.fom.E - self.fom.A
                if self.version == 'V':
                    Bb = fom.B.apply(b.real[i])
                    V.append(sEmA.apply_inverse(Bb))
                else:
                    CTc = fom.C.apply_adjoint(c.real[i])
                    W.append(sEmA.apply_inverse_adjoint(CTc))
            elif sigma[i].imag > 0:
                sEmA = sigma[i] * self.fom.E - self.fom.A
                if self.version == 'V':
                    Bb = fom.B.apply(b[i])
                    v = sEmA.apply_inverse(Bb)
                    V.append(v.real)
                    V.append(v.imag)
                else:
                    CTc = fom.C.apply_adjoint(c[i].conj())
                    w = sEmA.apply_inverse_adjoint(CTc)
                    W.append(w.real)
                    W.append(w.imag)

        if self.version == 'V':
            self.V = gram_schmidt(
                V,
                atol=0,
                rtol=0,
                product=None if projection == 'orth' else fom.E)
        else:
            self.V = gram_schmidt(
                W,
                atol=0,
                rtol=0,
                product=None if projection == 'orth' else fom.E)

        self._pg_reductor = LTIPGReductor(fom, self.V, self.V,
                                          projection == 'Eorth')

    def reconstruct(self, u):
        """Reconstruct high-dimensional vector from reduced vector `u`."""
        return self._pg_reductor.reconstruct(u)
Esempio n. 5
0
class GenericBTReductor(BasicObject):
    """Generic Balanced Truncation reductor.

    Parameters
    ----------
    fom
        The full-order |LTIModel| to reduce.
    mu
        |Parameter|.
    """
    def __init__(self, fom, mu=None):
        assert isinstance(fom, LTIModel)
        self.fom = fom
        self.mu = fom.parse_parameter(mu)
        self.V = None
        self.W = None
        self._pg_reductor = None
        self._sv_U_V_cache = None

    def _gramians(self):
        """Return low-rank Cholesky factors of Gramians."""
        raise NotImplementedError

    def _sv_U_V(self):
        """Return singular values and vectors."""
        if self._sv_U_V_cache is None:
            cf, of = self._gramians()
            U, sv, Vh = spla.svd(self.fom.E.apply2(of, cf, mu=self.mu),
                                 lapack_driver='gesvd')
            self._sv_U_V_cache = (sv, U.T, Vh)
        return self._sv_U_V_cache

    def error_bounds(self):
        """Returns error bounds for all possible reduced orders."""
        raise NotImplementedError

    def reduce(self, r=None, tol=None, projection='bfsr'):
        """Generic Balanced Truncation.

        Parameters
        ----------
        r
            Order of the reduced model if `tol` is `None`, maximum order if `tol` is specified.
        tol
            Tolerance for the error bound if `r` is `None`.
        projection
            Projection method used:

            - `'sr'`: square root method
            - `'bfsr'`: balancing-free square root method (default, since it avoids scaling by
              singular values and orthogonalizes the projection matrices, which might make it more
              accurate than the square root method)
            - `'biorth'`: like the balancing-free square root method, except it biorthogonalizes the
              projection matrices (using :func:`~pymor.algorithms.gram_schmidt.gram_schmidt_biorth`)

        Returns
        -------
        rom
            Reduced-order model.
        """
        assert r is not None or tol is not None
        assert r is None or 0 < r < self.fom.order
        assert projection in ('sr', 'bfsr', 'biorth')

        cf, of = self._gramians()
        sv, sU, sV = self._sv_U_V()

        # find reduced order if tol is specified
        if tol is not None:
            error_bounds = self.error_bounds()
            r_tol = np.argmax(error_bounds <= tol) + 1
            r = r_tol if r is None else min(r, r_tol)
        if r > min(len(cf), len(of)):
            raise ValueError(
                'r needs to be smaller than the sizes of Gramian factors.')

        # compute projection matrices
        self.V = cf.lincomb(sV[:r])
        self.W = of.lincomb(sU[:r])
        if projection == 'sr':
            alpha = 1 / np.sqrt(sv[:r])
            self.V.scal(alpha)
            self.W.scal(alpha)
        elif projection == 'bfsr':
            gram_schmidt(self.V, atol=0, rtol=0, copy=False)
            gram_schmidt(self.W, atol=0, rtol=0, copy=False)
        elif projection == 'biorth':
            gram_schmidt_biorth(self.V, self.W, product=self.fom.E, copy=False)

        # find reduced-order model
        if self.fom.parametric:
            fom_mu = self.fom.with_(**{
                op: getattr(self.fom, op).assemble(mu=self.mu)
                for op in ['A', 'B', 'C', 'D', 'E']
            },
                                    parameter_space=None)
        else:
            fom_mu = self.fom
        self._pg_reductor = LTIPGReductor(fom_mu, self.W, self.V, projection
                                          in ('sr', 'biorth'))
        rom = self._pg_reductor.reduce()
        return rom

    def reconstruct(self, u):
        """Reconstruct high-dimensional vector from reduced vector `u`."""
        return self._pg_reductor.reconstruct(u)
Esempio n. 6
0
File: bt.py Progetto: pymor/pymor
class GenericBTReductor(BasicInterface):
    """Generic Balanced Truncation reductor.

    Parameters
    ----------
    fom
        The full-order |LTIModel| to reduce.
    """
    def __init__(self, fom):
        assert isinstance(fom, LTIModel)
        self.fom = fom
        self.V = None
        self.W = None
        self._pg_reductor = None
        self._sv_U_V_cache = None

    def _gramians(self):
        """Return low-rank Cholesky factors of Gramians."""
        raise NotImplementedError

    def _sv_U_V(self):
        """Return singular values and vectors."""
        if self._sv_U_V_cache is None:
            cf, of = self._gramians()
            U, sv, Vh = spla.svd(self.fom.E.apply2(of, cf), lapack_driver='gesvd')
            self._sv_U_V_cache = (sv, U.T, Vh)
        return self._sv_U_V_cache

    def error_bounds(self):
        """Returns error bounds for all possible reduced orders."""
        raise NotImplementedError

    def reduce(self, r=None, tol=None, projection='bfsr'):
        """Generic Balanced Truncation.

        Parameters
        ----------
        r
            Order of the reduced model if `tol` is `None`, maximum order if `tol` is specified.
        tol
            Tolerance for the error bound if `r` is `None`.
        projection
            Projection method used:

            - `'sr'`: square root method
            - `'bfsr'`: balancing-free square root method (default, since it avoids scaling by
              singular values and orthogonalizes the projection matrices, which might make it more
              accurate than the square root method)
            - `'biorth'`: like the balancing-free square root method, except it biorthogonalizes the
              projection matrices (using :func:`~pymor.algorithms.gram_schmidt.gram_schmidt_biorth`)

        Returns
        -------
        rom
            Reduced-order model.
        """
        assert r is not None or tol is not None
        assert r is None or 0 < r < self.fom.order
        assert projection in ('sr', 'bfsr', 'biorth')

        cf, of = self._gramians()
        sv, sU, sV = self._sv_U_V()

        # find reduced order if tol is specified
        if tol is not None:
            error_bounds = self.error_bounds()
            r_tol = np.argmax(error_bounds <= tol) + 1
            r = r_tol if r is None else min(r, r_tol)
        if r > min(len(cf), len(of)):
            raise ValueError('r needs to be smaller than the sizes of Gramian factors.')

        # compute projection matrices
        self.V = cf.lincomb(sV[:r])
        self.W = of.lincomb(sU[:r])
        if projection == 'sr':
            alpha = 1 / np.sqrt(sv[:r])
            self.V.scal(alpha)
            self.W.scal(alpha)
        elif projection == 'bfsr':
            self.V = gram_schmidt(self.V, atol=0, rtol=0)
            self.W = gram_schmidt(self.W, atol=0, rtol=0)
        elif projection == 'biorth':
            self.V, self.W = gram_schmidt_biorth(self.V, self.W, product=self.fom.E)

        # find reduced-order model
        self._pg_reductor = LTIPGReductor(self.fom, self.W, self.V, projection in ('sr', 'biorth'))
        rom = self._pg_reductor.reduce()
        return rom

    def reconstruct(self, u):
        """Reconstruct high-dimensional vector from reduced vector `u`."""
        return self._pg_reductor.reconstruct(u)
Esempio n. 7
0
class GenericBTReductor(BasicInterface):
    """Generic Balanced Truncation reductor.

    Parameters
    ----------
    fom
        The system which is to be reduced.
    """
    def __init__(self, fom):
        assert isinstance(fom, LTIModel)
        self.fom = fom
        self.V = None
        self.W = None
        self.sv = None
        self.sU = None
        self.sV = None

    def gramians(self):
        """Return low-rank Cholesky factors of Gramians."""
        raise NotImplementedError

    def sv_U_V(self):
        """Return singular values and vectors."""
        if self.sv is None or self.sU is None or self.sV is None:
            cf, of = self.gramians()
            U, sv, Vh = spla.svd(self.fom.E.apply2(of, cf), lapack_driver='gesvd')
            self.sv = sv
            self.sU = U.T
            self.sV = Vh
        return self.sv, self.sU, self.sV

    def error_bounds(self):
        """Returns error bounds for all possible reduced orders."""
        raise NotImplementedError

    def reduce(self, r=None, tol=None, projection='bfsr'):
        """Generic Balanced Truncation.

        Parameters
        ----------
        r
            Order of the reduced model if `tol` is `None`, maximum order
            if `tol` is specified.
        tol
            Tolerance for the error bound if `r` is `None`.
        projection
            Projection method used:

            - `'sr'`: square root method
            - `'bfsr'`: balancing-free square root method (default,
              since it avoids scaling by singular values and
              orthogonalizes the projection matrices, which might make
              it more accurate than the square root method)
            - `'biorth'`: like the balancing-free square root method,
              except it biorthogonalizes the projection matrices (using
              :func:`~pymor.algorithms.gram_schmidt.gram_schmidt_biorth`)

        Returns
        -------
        rom
            Reduced system.
        """
        assert r is not None or tol is not None
        assert r is None or 0 < r < self.fom.order
        assert projection in ('sr', 'bfsr', 'biorth')

        cf, of = self.gramians()
        sv, sU, sV = self.sv_U_V()

        # find reduced order if tol is specified
        if tol is not None:
            error_bounds = self.error_bounds()
            r_tol = np.argmax(error_bounds <= tol) + 1
            r = r_tol if r is None else min(r, r_tol)

        if r > min(len(cf), len(of)):
            raise ValueError('r needs to be smaller than the sizes of Gramian factors.')

        # compute projection matrices and find the reduced model
        self.V = cf.lincomb(sV[:r])
        self.W = of.lincomb(sU[:r])
        if projection == 'sr':
            alpha = 1 / np.sqrt(sv[:r])
            self.V.scal(alpha)
            self.W.scal(alpha)
        elif projection == 'bfsr':
            self.V = gram_schmidt(self.V, atol=0, rtol=0)
            self.W = gram_schmidt(self.W, atol=0, rtol=0)
        elif projection == 'biorth':
            self.V, self.W = gram_schmidt_biorth(self.V, self.W, product=self.fom.E)

        self.pg_reductor = LTIPGReductor(self.fom, self.W, self.V, projection in ('sr', 'biorth'))
        rom = self.pg_reductor.reduce()

        return rom

    def reconstruct(self, u):
        """Reconstruct high-dimensional vector from reduced vector `u`."""
        self.pg_reductor.reconstruct(u)