Beispiel #1
0
 def set(self, key, value):
     key = base64.b64encode(key)
     now = datetime.datetime.now()
     filename = now.isoformat() + '.dat'
     file_path = os.path.join(self.path, filename)
     while os.path.exists(file_path):
         now = now + datetime.timedelta(microseconds=1)
         filename = now().isoformat()
         file_path = os.path.join(self.path, filename)
     fd = os.open(file_path, os.O_WRONLY | os.O_EXCL | os.O_CREAT)
     try:
         f = os.fdopen(fd, 'w')
         dump(value, f)
         file_size = f.tell()
     finally:
         f.close()
     conn = self.conn
     c = conn.cursor()
     try:
         c.execute("INSERT INTO entries(key, filename, size) VALUES ('{}', '{}', {})".format(key, filename, file_size))
         conn.commit()
     except sqlite3.IntegrityError:
         conn.commit()
         from pymor.core.logger import getLogger
         getLogger('pymor.core.cache.SQLiteRegion').warn('Key already present in cache region, ignoring.')
         os.unlink(file_path)
     self.bytes_written += file_size
     if self.bytes_written >= 0.1 * self.max_size:
         self.housekeeping()
Beispiel #2
0
 def set(self, key, value):
     if key in self._cache:
         getLogger('pymor.core.cache.MemoryRegion').warn('Key already present in cache region, ignoring.')
         return
     if len(self._cache) == self.max_keys:
         self._cache.popitem(last=False)
     self._cache[key] = value
Beispiel #3
0
    def housekeeping(self):
        self.bytes_written = 0
        conn = self.conn
        c = conn.cursor()
        c.execute('SELECT SUM(size) FROM entries')
        size = c.fetchone()
        # size[0] can apparently also be None
        try:
            size = int(size[0]) if size is not None else 0
        except TypeError:
            size = 0
        if size > self.max_size:
            bytes_to_delete = size - self.max_size + 0.75 * self.max_size
            deleted = 0
            ids_to_delete = []
            files_to_delete = []
            c.execute('SELECT id, filename, size FROM entries ORDER BY id ASC')
            while deleted < bytes_to_delete:
                id_, filename, file_size = c.fetchone()
                ids_to_delete.append(id_)
                files_to_delete.append(filename)
                deleted += file_size
            c.execute('DELETE FROM entries WHERE id in ({})'.format(','.join(map(str, ids_to_delete))))
            conn.commit()
            path = self.path
            for filename in files_to_delete:
                try:
                    os.unlink(os.path.join(path, filename))
                except OSError:
                    from pymor.core.logger import getLogger
                    getLogger('pymor.core.cache.SQLiteRegion').warn('Cannot delete cache entry ' + filename)

            from pymor.core.logger import getLogger
            getLogger('pymor.core.cache.SQLiteRegion').info('Removed {} old cache entries'.format(len(ids_to_delete)))
Beispiel #4
0
    def _call_pymess_dense_nm_gmpare(A, E, B, C, R, S, trans=False, options=None, plus=False):
        """Return the solution from pymess.dense_nm_gmpare solver."""
        A = to_matrix(A, format='dense')
        E = to_matrix(E, format='dense') if E else None
        B = B.to_numpy().T
        C = C.to_numpy()
        S = S.to_numpy().T if S else None

        Q = B.dot(B.T) if not trans else C.T.dot(C)
        pymess_trans = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE
        if not trans:
            RinvC = spla.solve(R, C) if R is not None else C
            G = C.T.dot(RinvC)
            if S is not None:
                RinvST = spla.solve(R, S.T) if R is not None else S.T
                if not plus:
                    A -= S.dot(RinvC)
                    Q -= S.dot(RinvST)
                else:
                    A += S.dot(RinvC)
                    Q += S.dot(RinvST)
        else:
            RinvBT = spla.solve(R, B.T) if R is not None else B.T
            G = B.dot(RinvBT)
            if S is not None:
                RinvST = spla.solve(R, S.T) if R is not None else S.T
                if not plus:
                    A -= RinvBT.T.dot(S.T)
                    Q -= S.dot(RinvST)
                else:
                    A += RinvBT.T.dot(S.T)
                    Q += S.dot(RinvST)
        X, absres, relres = pymess.dense_nm_gmpare(None,
                                                   A, E, Q, G,
                                                   plus=plus, trans=pymess_trans,
                                                   linesearch=options['linesearch'],
                                                   maxit=options['maxit'],
                                                   absres_tol=options['absres_tol'],
                                                   relres_tol=options['relres_tol'],
                                                   nrm=options['nrm'])
        if absres > options['absres_tol']:
            logger = getLogger('pymess.dense_nm_gmpcare')
            logger.warning(f'Desired absolute residual tolerance was not achieved '
                           f'({absres:e} > {options["absres_tol"]:e}).')
        if relres > options['relres_tol']:
            logger = getLogger('pymess.dense_nm_gmpcare')
            logger.warning(f'Desired relative residual tolerance was not achieved '
                           f'({relres:e} > {options["relres_tol"]:e}).')

        return X
Beispiel #5
0
 def set(self, key, value):
     key = base64.b64encode(key)
     response = self.server.set(self.secret, key)
     assert len(response) == 2 and isinstance(response[0], bool) and isinstance(response[1], str)
     if response[0]:
         with open(response[1], 'w') as f:
             dump(value, f)
             file_size = f.tell()
         response = self.server.set_finished(self.secret, key, file_size)
         assert isinstance(response, bool) and response
     else:
         from pymor.core.logger import getLogger
         getLogger('pymor.core.network_cache.NetworkFilesystemRegion')\
             .warn('Key already present in cache region, ignoring.')
Beispiel #6
0
def _setup(name='pymor'):
    root_logger = logger.getLogger(name)
    root_logger.setLevel(logging.ERROR)
    test_logger = logger.getLogger(name)
    test_logger.setLevel(logging.DEBUG)  # config_files.append(os.path.join(os.path.dirname(pymor.__file__), '../../setup.cfg'))
    # config defaults to no plugins -> specify defaults...
    manager = nose.plugins.manager.DefaultPluginManager()
    config_files = nose.config.all_config_files()
    config = nose.config.Config(files=config_files, plugins=manager)
    config.exclude = []
    selector = PymorTestSelector(config=config)
    loader = nose.loader.defaultTestLoader(config=config, selector=selector)
    cli = [__file__, '-vv', '-d']
    return cli, loader, config
Beispiel #7
0
def _load_all():
    import pymor
    ignore_playground = True
    fails = []
    for _, module_name, _ in pkgutil.walk_packages(pymor.__path__, pymor.__name__ + '.',
                                                   lambda n: fails.append((n, ''))):
        if ignore_playground and 'playground' in module_name:
            continue
        try:
            __import__(module_name, level=0)
        except (TypeError, ImportError) as t:
            fails.append((module_name, t))
    if len(fails) > 0:
        logger.getLogger(__name__).fatal('Failed imports: {}'.format(pprint.pformat(fails)))
        raise ImportError(__name__)
Beispiel #8
0
Datei: io.py Projekt: pymor/pymor
def load_matrix(path, key=None):

    logger = getLogger('pymor.tools.io.load_matrix')
    logger.info('Loading matrix from file ' + path)

    path_parts = path.split('.')
    if len(path_parts[-1]) == 3:
        extension = path_parts[-1].lower()
    elif path_parts[-1].lower() == 'gz' and len(path_parts) >= 2 and len(path_parts[-2]) == 3:
        extension = '.'.join(path_parts[-2:]).lower()
    else:
        extension = None

    file_format_map = {'mat': ('MATLAB', _loadmat),
                       'mtx': ('Matrix Market', _mmread),
                       'mtz.gz': ('Matrix Market', _mmread),
                       'npy': ('NPY/NPZ', _load),
                       'npz': ('NPY/NPZ', _load),
                       'txt': ('Text', _loadtxt)}

    if extension in file_format_map:
        file_type, loader = file_format_map[extension]
        logger.info(file_type + ' file detected.')
        return loader(path, key)

    logger.warning('Could not detect file format. Trying all loaders ...')

    loaders = [_loadmat, _mmread, _loadtxt, _load]
    for loader in loaders:
        try:
            return loader(path, key)
        except IOError:
            pass

    raise IOError(f'Could not load file {path} (key = {key})')
Beispiel #9
0
    def __init__(cls, name, bases, namespace):
        '''I copy my class docstring if deriving class has none. I tell base classes when I derive
        a new class from them. I publish a new contract type for each new class I create.
        '''
        doc = namespace.get("__doc__", None)
        if not doc:
            for base in cls.__mro__[1:]:
                if base.__doc__:
                    doc = base.__doc__
                    break
        cls.__doc__ = doc

        # monkey a new contract into the decorator module so checking for that type at runtime can work
        dname = (cls.__module__ + '.' + name).replace('__main__.', 'main.').replace('.', '_')
        if not dname in decorators.__dict__:
            decorators.__dict__[dname] = contracts.new_contract(dname, lambda x: isinstance(x, cls))

        # all bases except object get the derived class' name appended
        for base in [b for b in bases if b != object]:
            derived = cls
            # mangle the name to the base scope
            attribute = '_%s__implementors' % base.__name__
            if hasattr(base, attribute):
                getattr(base, attribute).append(derived)
            else:
                setattr(base, attribute, [derived])
        cls.logger = logger.getLogger('{}.{}'.format(cls.__module__.replace('__main__', 'pymor'), name))
        abc.ABCMeta.__init__(cls, name, bases, namespace)
Beispiel #10
0
 def add_class(self, cls, wrapped_cls):
     self.wrapped_classes[cls] = wrapped_cls
     if hasattr(cls, 'type_this'):
         try:
             self.wrapped_classes_by_type_this[cls.type_this()] = wrapped_cls
         except TypeError:
             logger = getLogger('dune.pymor.core')
             logger.warn('Could not call type_this on {}. (Not a static method?)'.format(cls.__name__))
Beispiel #11
0
 def clear(self):
     # Try to safely delete all cache entries, even if another process
     # accesses the same region.
     self.bytes_written = 0
     conn = self.conn
     c = conn.cursor()
     c.execute('SELECT id, filename FROM entries ORDER BY id ASC')
     entries = c.fetchall()
     if entries:
         ids_to_delete, files_to_delete = zip(*entries)
         c.execute('DELETE FROM entries WHERE id in ({})'.format(','.join(map(str, ids_to_delete))))
         conn.commit()
         path = self.path
         for filename in files_to_delete:
             try:
                 os.unlink(os.path.join(path, filename))
             except OSError:
                 from pymor.core.logger import getLogger
                 getLogger('pymor.core.cache.SQLiteRegion').warn('Cannot delete cache entry ' + filename)
Beispiel #12
0
 def update(self, defaults, type='user'):
     assert type in ('user', 'file')
     import pymor.core.interfaces
     if pymor.core.interfaces.ImmutableMeta.sids_created:
         from pymor.core.logger import getLogger
         getLogger('pymor.core.defaults').warn(
             'Changing defaults after calculation of the first state id. '
             + '(see pymor.core.defaults for more information.)')
     for k, v in defaults.iteritems():
         self._data[k][type] = v
         func = self._data[k].get('func', None)
         if func:
             argname = k.split('.')[-1]
             func._defaultsdict[argname] = v
             argspec = inspect.getargspec(func)
             argind = argspec.args.index(argname) - len(argspec.args)
             defaults = list(argspec.defaults)
             defaults[argind] = v
             func.__defaults__ = tuple(defaults)
     self._calc_sid()
Beispiel #13
0
 def set(self, key, value):
     fd, file_path = tempfile.mkstemp('.dat', _safe_filename(datetime.datetime.now().isoformat()[:-7]) + '-', self.path)
     filename = os.path.basename(file_path)
     with os.fdopen(fd, 'wb') as f:
         dump(value, f)
         file_size = f.tell()
     conn = self.conn
     c = conn.cursor()
     try:
         c.execute("INSERT INTO entries(key, filename, size) VALUES ('{}', '{}', {})"
                   .format(key, filename, file_size))
         conn.commit()
     except sqlite3.IntegrityError:
         conn.commit()
         from pymor.core.logger import getLogger
         getLogger('pymor.core.cache.SQLiteRegion').warn('Key already present in cache region, ignoring.')
         os.unlink(file_path)
     self.bytes_written += file_size
     if self.bytes_written >= 0.1 * self.max_size:
         self.housekeeping()
Beispiel #14
0
 def colormap_texture(name='viridis'):
     resolution = min(gl.GL_MAX_TEXTURE_SIZE, 1024)
     colormap_id = gl.glGenTextures(1)
     gl.glBindTexture(gl.GL_TEXTURE_1D, colormap_id)
     gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
     gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
     gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
     colormap = np.empty((resolution, 4), dtype='f4')
     from matplotlib.pyplot import get_cmap
     try:
         cmap = get_cmap(name)
     except ValueError:
         from pymor.core.logger import getLogger
         # this is our default which might not exist for older matplotlib so a warning would be annoying
         if name != 'viridis':
             getLogger('pymor.gui.gl.colormap_texture').warn('Unknown colormap {}, using default colormap'.format(name))
         cmap = get_cmap()
     colormap[:] = cmap(np.linspace(0., 1., resolution))
     gl.glTexImage1D(gl.GL_TEXTURE_1D, 0, gl.GL_RGBA, resolution, 0, gl.GL_RGBA, gl.GL_FLOAT, colormap)
     gl.glBindTexture(gl.GL_TEXTURE_1D, 0)
     return colormap_id
Beispiel #15
0
 def __new__(cls, classname, bases, classdict):
     c = UberMeta.__new__(cls, classname, bases, classdict)
     init_arguments = c._init_arguments
     try:
         for a in c.sid_ignore:
             if a not in init_arguments and a not in ImmutableMeta.init_arguments_never_warn:
                 raise ValueError(a)
     except ValueError as e:
         # The _logger attribute of our new class has not been initialized yet, so create
         # our own logger.
         l = logger.getLogger('{}.{}'.format(c.__module__.replace('__main__', 'pymor'), classname))
         l.warn('sid_ignore contains "{}" which is not an __init__ argument!'.format(e))
     return c
Beispiel #16
0
def _import_all(package_name='pymor'):

    package = __import__(package_name)

    if hasattr(package, '__path__'):
        def onerror(name):
            from pymor.core.logger import getLogger
            logger = getLogger('pymor.core.defaults._import_all')
            logger.warn('Failed to import ' + name)

        for p in pkgutil.walk_packages(package.__path__, package_name + '.', onerror=onerror):
            try:
                __import__(p[1])
            except ImportError:
                from pymor.core.logger import getLogger
                logger = getLogger('pymor.core.defaults._import_all')
                logger.warn('Failed to import ' + p[1])
Beispiel #17
0
    def check_consistency(self, delete=False):
        self.import_all()
        from pymor.core.logger import getLogger
        logger = getLogger('pymor.core.defaults')
        keys_to_delete = []

        for k, v in self._data.iteritems():
            if ('user' in v or 'file' in v) and 'code' not in v:
                keys_to_delete.append(k)

        if delete:
            for k in keys_to_delete:
                del self._data[k]

        for k in keys_to_delete:
            logger.warn('Undefined default value: ' + k + (' (deleted)' if delete else ''))

        return len(keys_to_delete) > 0
Beispiel #18
0
    def __init__(cls, name, bases, namespace):
        """Metaclass of :class:`BasicInterface`.

        I tell base classes when I derive a new class from them. I create a logger
        for each class I create. I add an `init_args` attribute to the class.
        """

        # all bases except object get the derived class' name appended
        for base in [b for b in bases if b != object]:
            derived = cls
            # mangle the name to the base scope
            attribute = '_%s__implementors' % base.__name__
            if hasattr(base, attribute):
                getattr(base, attribute).append(derived)
            else:
                setattr(base, attribute, [derived])
        cls._logger = logger.getLogger('{}.{}'.format(cls.__module__.replace('__main__', 'pymor'), name))
        abc.ABCMeta.__init__(cls, name, bases, namespace)
Beispiel #19
0
def testDump(basicinterface_subclass):
    try:
        obj = basicinterface_subclass()
        assert isinstance(obj, basicinterface_subclass)
        if issubclass(basicinterface_subclass, core.Unpicklable):
            return
    except TypeError as e:
        logger = getLogger('pymortests.core.pickling')
        logger.debug('PicklingError: Not testing {} because its init failed: {}'.format(basicinterface_subclass,
                                                                                        str(e)))
        return

    with tempfile.NamedTemporaryFile(mode='wb', delete=False) as dump_file:
        core.dump(obj, dump_file)
        dump_file.close()
        f = open(dump_file.name, 'rb')
        unpickled = core.load(f)
        assert obj.__class__ == unpickled.__class__
        os.unlink(dump_file.name)
Beispiel #20
0
def _import_all(package_name="pymor"):

    package = importlib.import_module(package_name)

    if hasattr(package, "__path__"):

        def onerror(name):
            from pymor.core.logger import getLogger

            logger = getLogger("pymor.core.defaults._import_all")
            logger.warn("Failed to import " + name)

        for p in pkgutil.walk_packages(package.__path__, package_name + ".", onerror=onerror):
            try:
                importlib.import_module(p[1])
            except ImportError:
                from pymor.core.logger import getLogger

                logger = getLogger("pymor.core.defaults._import_all")
                logger.warn("Failed to import " + p[1])
Beispiel #21
0
def _parse_options(options, default_options, default_solver, default_least_squares_solver, least_squares):
    if options is None:
        options = default_options[default_least_squares_solver] if least_squares else default_options[default_solver]
    elif isinstance(options, str):
        options = default_options[options]
    else:
        assert 'type' in options and options['type'] in default_options \
            and options.keys() <= default_options[options['type']].keys()
        user_options = options
        options = default_options[user_options['type']]
        options.update(user_options)

    if least_squares != ('least_squares' in options['type']):
        logger = getLogger('foo')
        if least_squares:
            logger.warning('Non-least squares solver selected for least squares problem.')
        else:
            logger.warning('Least squares solver selected for non-least squares probelm.')

    return options
Beispiel #22
0
def load_gmsh(gmsh_file):
    """Parse a Gmsh file and create a corresponding :class:`GmshGrid` and :class:`GmshBoundaryInfo`.

    Parameters
    ----------
    gmsh_file
        File handle of the Gmsh MSH-file.

    Returns
    -------
    grid
        The generated :class:`GmshGrid`.
    boundary_info
        The generated :class:`GmshBoundaryInfo`.
    """
    logger = getLogger("pymor.grids.gmsh.load_gmsh")

    logger.info("Parsing gmsh file ...")
    tic = time.time()
    sections = _parse_gmsh_file(gmsh_file)
    toc = time.time()
    t_parse = toc - tic

    logger.info("Create GmshGrid ...")
    tic = time.time()
    grid = GmshGrid(sections)
    toc = time.time()
    t_grid = toc - tic

    logger.info("Create GmshBoundaryInfo ...")
    tic = time.time()
    bi = GmshBoundaryInfo(grid, sections)
    toc = time.time()
    t_bi = toc - tic

    logger.info(
        "Parsing took {} s; Grid creation took {} s; BoundaryInfo creation took {} s".format(t_parse, t_grid, t_bi)
    )

    return grid, bi
Beispiel #23
0
def new_parallel_pool(ipython_num_engines=None, ipython_profile=None, allow_mpi=True):
    """Creates a new default |WorkerPool|.

    If `ipython_num_engines` or `ipython_profile` is provided as an argument or set as
    a |default|, an :class:`~pymor.parallel.ipython.IPythonPool` |WorkerPool| will
    be created using the given parameters via the `ipcluster` script.

    Otherwise, when `allow_mpi` is `True` and an MPI parallel run is detected,
    an :class:`~pymor.parallel.mpi.MPIPool` |WorkerPool| will be created.

    Otherwise, a sequential run is assumed and
    :attr:`pymor.parallel.dummy.dummy_pool <pymor.parallel.dummy.DummyPool>`
    is returned.
    """

    global _pool
    if _pool:
        logger = getLogger('pymor.parallel.default.new_parallel_pool')
        logger.warn('new_parallel_pool already called; returning old pool (this might not be what you want).')
        return _pool[1]
    if ipython_num_engines or ipython_profile:
        from pymor.parallel.ipython import new_ipcluster_pool
        nip = new_ipcluster_pool(profile=ipython_profile, num_engines=ipython_num_engines)
        pool = nip.__enter__()
        _pool = ('ipython', pool, nip)
        return pool
    elif allow_mpi:
        from pymor.tools import mpi
        if mpi.parallel:
            from pymor.parallel.mpi import MPIPool
            pool = MPIPool()
            _pool = ('mpi', pool)
            return pool
        else:
            _pool = ('dummy', dummy_pool)
            return dummy_pool
    else:
        _pool = ('dummy', dummy_pool)
        return dummy_pool
Beispiel #24
0
    def __init__(cls, name, bases, namespace):
        '''Metaclass of :class:`BasicInterface`.

        I tell base classes when I derive a new class from them. I publish
        a new contract type for each new class I create. I create a logger
        for each class I create. I add an `init_args` attribute to the class.
        '''
        # monkey a new contract into the decorator module so checking for that type at runtime can work
        if HAVE_CONTRACTS:
            dname = (cls.__module__ + '.' + name).replace('__main__.', 'main.').replace('.', '_')
            if not dname in decorators.__dict__:
                decorators.__dict__[dname] = contracts.new_contract(dname, lambda x: isinstance(x, cls))

        # all bases except object get the derived class' name appended
        for base in [b for b in bases if b != object]:
            derived = cls
            # mangle the name to the base scope
            attribute = '_%s__implementors' % base.__name__
            if hasattr(base, attribute):
                getattr(base, attribute).append(derived)
            else:
                setattr(base, attribute, [derived])
        cls._logger = logger.getLogger('{}.{}'.format(cls.__module__.replace('__main__', 'pymor'), name))
        abc.ABCMeta.__init__(cls, name, bases, namespace)
Beispiel #25
0
 def onerror(name):
     from pymor.core.logger import getLogger
     logger = getLogger('pymor.core.defaults._import_all')
     logger.warn('Failed to import ' + name)
Beispiel #26
0
def reduction_error_analysis(reduced_discretization,
                             discretization=None,
                             reconstructor=None,
                             test_mus=10,
                             basis_sizes=0,
                             random_seed=None,
                             estimator=True,
                             condition=False,
                             error_norms=(),
                             error_norm_names=None,
                             estimator_norm_index=0,
                             custom=(),
                             plot=False,
                             plot_custom_logarithmic=True,
                             pool=dummy_pool):
    """Analyze the model reduction error.

    The maximum model reduction error is estimated by solving the reduced
    |Discretization| for given random |Parameters|.

    Parameters
    ----------
    reduced_discretization
        The reduced |Discretization|.
    discretization
        The high-dimensional |Discretization|. Must be specified if
        `error_norms` are given.
    reconstructor
        The reconstructor for `reduced_discretization`. Must be specified
        if `error_norms` are given.
    test_mus
        Either a list of |Parameters| to compute the errors for, or
        the number of parameters which are sampled randomly from
        `parameter_space` (if given) or `reduced_discretization.parameter_space`.
    basis_sizes
        Either a list of reduced basis dimensions to consider, or
        the number of dimensions (which are then selected equidistantly,
        always including the maximum reduced space dimension).
        The dimensions are input for `~pymor.reductors.basic.reduce_to_subbasis`
        to quickly obtain smaller reduced |Discretizations| from
        `rb_discretization`.
    random_seed
        If `test_mus` is a number, use this value as random seed
        for drawing the |Parameters|.
    estimator
        If `True` evalute the error estimator of `reduced_discretization`
        on the test |Parameters|.
    condition
        If `True`, compute the condition of the reduced system matrix
        for the given test |Parameters|. (Can only be specified if
        `rb_discretization` is an instance of |StationaryDiscretization|
        and `rb_discretization.operator` is linear.
    error_norms
        List of norms in which to compute the model reduction error.
    error_norm_names
        Names of the norms given by `error_norms`. If `None`, the
        `name` attributes of the given norms are used.
    estimator_norm_index
        When `estimator` is `True` and `error_norms` are specified,
        this is the index of the norm in `error_norms` w.r.t. which
        to compute the effectivity of the estimator.
    custom
        List of custom functions which are evaluated for each test |Parameter|
        and basis size. The function must have the signature ::

            def custom_value(reduced_discretization, discretization=d,
                             reconstructor, mu, dim):
                pass

    plot
        If `True`, generate a plot of the computed quantities w.r.t.
        the basis size.
    plot_custom_logarithmic
        If `True`, use a logarithmic y-axis to plot the computed custom
        values.
    pool
        If not `None`, the |WorkerPool| to use for parallelization.

    Returns
    -------
    Dict with the following fields:

        :mus:                    The test |Parameters| which have been considered.
        :basis_sizes:            The reduced basis dimensions which have been considered.
        :norms:                  |Array| of the norms of the high-dimensional solutions
                                 w.r.t. all given test |Parameters|, reduced basis
                                 dimensions and norms in `error_norms`.
                                 (Only present when `error_norms` has been specified.)
        :max_norms:              Maxima of `norms` over the given test |Parameters|.
        :max_norm_mus:           |Parameters| corresponding to `max_norms`.
        :errors:                 |Array| of the norms of the model reduction errors
                                 w.r.t. all given test |Parameters|, reduced basis
                                 dimensions and norms in `error_norms`.
                                 (Only present when `error_norms` has been specified.)
        :max_errors:             Maxima of `errors` over the given test |Parameters|.
        :max_error_mus:          |Parameters| corresponding to `max_errors`.
        :rel_errors:             `errors` divided by `norms`.
                                 (Only present when `error_norms` has been specified.)
        :max_rel_errors:         Maxima of `rel_errors` over the given test |Parameters|.
        :max_rel_error_mus:      |Parameters| corresponding to `max_rel_errors`.
        :error_norm_names:       Names of the the given `error_norms`.
                                 (Only present when `error_norms` has been specified.)
        :estimates:              |Array| of the model reduction error estimates
                                 w.r.t. all given test |Parameters| and reduced basis
                                 dimensions.
                                 (Only present when `estimator` is `True`.)
        :max_estimate:           Maxima of `estimates` over the given test |Parameters|.
        :max_estimate_mus:       |Parameters| corresponding to `max_estimates`.
        :effectivities:          `errors` divided by `estimates`.
                                 (Only present when `estimator` is `True` and `error_norms`
                                 has been specified.)
        :min_effectivities:      Minima of `effectivities` over the given test |Parameters|.
        :min_effectivity_mus:    |Parameters| corresponding to `min_effectivities`.
        :max_effectivities:      Maxima of `effectivities` over the given test |Parameters|.
        :max_effectivity_mus:    |Parameters| corresponding to `max_effectivities`.
        :errors:                 |Array| of the reduced system matrix conditions
                                 w.r.t. all given test |Parameters| and reduced basis
                                 dimensions.
                                 (Only present when `conditions` is `True`.)
        :max_conditions:         Maxima of `conditions` over the given test |Parameters|.
        :max_condition_mus:      |Parameters| corresponding to `max_conditions`.
        :custom_values:          |Array| of custom function evaluations
                                 w.r.t. all given test |Parameters|, reduced basis
                                 dimensions and functions in `custom`.
                                 (Only present when `custom` has been specified.)
        :max_custom_values:      Maxima of `custom_values` over the given test |Parameters|.
        :max_custom_values_mus:  |Parameters| corresponding to `max_custom_values`.
        :time:                   Time (in seconds) needed for the error analysis.
        :summary:                String containing a summary of all computed quantities for
                                 the largest (last) considered basis size.
        :figure:                 The figure containing the generated plots.
                                 (Only present when `plot` is `True`.)
    """

    assert not error_norms or (discretization and reconstructor)
    assert error_norm_names is None or len(error_norm_names) == len(
        error_norms)
    assert not condition \
        or isinstance(reduced_discretization, StationaryDiscretization) and reduced_discretization.operator.linear

    logger = getLogger('pymor.algorithms.error')
    if pool is None or pool is dummy_pool:
        pool = dummy_pool
    else:
        logger.info('Using pool of {} workers for error analysis'.format(
            len(pool)))

    tic = time.time()

    d, rd, rc = discretization, reduced_discretization, reconstructor

    if isinstance(test_mus, Number):
        test_mus = reduced_discretization.parameter_space.sample_randomly(
            test_mus, seed=random_seed)
    if isinstance(basis_sizes, Number):
        if basis_sizes == 1:
            basis_sizes = [rd.solution_space.dim]
        else:
            if basis_sizes == 0:
                basis_sizes = rd.solution_space.dim + 1
            basis_sizes = min(rd.solution_space.dim + 1, basis_sizes)
            basis_sizes = np.linspace(0, rd.solution_space.dim,
                                      basis_sizes).astype(int)
    if error_norm_names is None:
        error_norm_names = tuple(norm.name for norm in error_norms)

    norms, estimates, errors, conditions, custom_values = \
        list(zip(*pool.map(_compute_errors, test_mus, d=d, rd=rd, rc=rc, estimator=estimator,
                      error_norms=error_norms, condition=condition, custom=custom, basis_sizes=basis_sizes)))
    print()

    result = {}
    result['mus'] = test_mus = np.array(test_mus)
    result['basis_sizes'] = basis_sizes

    summary = []
    summary.append(('number of samples', str(len(test_mus))))

    if error_norms:
        result['norms'] = norms = np.array(norms)
        result['max_norms'] = max_norms = np.max(norms, axis=0)
        result['max_norm_mus'] = max_norm_mus = test_mus[np.argmax(norms,
                                                                   axis=0)]
        result['errors'] = errors = np.array(errors)
        result['max_errors'] = max_errors = np.max(errors, axis=0)
        result['max_error_mus'] = max_error_mus = test_mus[np.argmax(errors,
                                                                     axis=0)]
        result['rel_errors'] = rel_errors = errors / norms[:, :, np.newaxis]
        result['max_rel_errors'] = np.max(rel_errors, axis=0)
        result['max_rel_error_mus'] = test_mus[np.argmax(rel_errors, axis=0)]
        for name, norm, norm_mu, error, error_mu in zip(
                error_norm_names, max_norms, max_norm_mus, max_errors[:, -1],
                max_error_mus[:, -1]):
            summary.append(('maximum {}-norm'.format(name),
                            '{:.7e} (mu = {})'.format(norm, error_mu)))
            summary.append(('maximum {}-error'.format(name),
                            '{:.7e} (mu = {})'.format(error, error_mu)))
        result['error_norm_names'] = error_norm_names

    if estimator:
        result['estimates'] = estimates = np.array(estimates)
        result['max_estimates'] = max_estimates = np.max(estimates, axis=0)
        result['max_estimate_mus'] = max_estimate_mus = test_mus[np.argmax(
            estimates, axis=0)]
        summary.append(('maximum estimated error',
                        '{:.7e} (mu = {})'.format(max_estimates[-1],
                                                  max_estimate_mus[-1])))

    if estimator and error_norms:
        result[
            'effectivities'] = effectivities = errors[:,
                                                      estimator_norm_index, :] / estimates
        result['max_effectivities'] = max_effectivities = np.max(effectivities,
                                                                 axis=0)
        result['max_effectivity_mus'] = max_effectivity_mus = test_mus[
            np.argmax(effectivities, axis=0)]
        result['min_effectivities'] = min_effectivities = np.min(effectivities,
                                                                 axis=0)
        result['min_effectivity_mus'] = min_effectivity_mus = test_mus[
            np.argmin(effectivities, axis=0)]
        summary.append(('minimum estimator effectivity',
                        '{:.7e} (mu = {})'.format(min_effectivities[-1],
                                                  min_effectivity_mus[-1])))
        summary.append(('maximum estimator effectivity',
                        '{:.7e} (mu = {})'.format(max_effectivities[-1],
                                                  max_effectivity_mus[-1])))

    if condition:
        result['conditions'] = conditions = np.array(conditions)
        result['max_conditions'] = max_conditions = np.max(conditions, axis=0)
        result['max_condition_mus'] = max_condition_mus = test_mus[np.argmax(
            conditions, axis=0)]
        summary.append(('maximum system matrix condition',
                        '{:.7e} (mu = {})'.format(max_conditions[-1],
                                                  max_condition_mus[-1])))

    if custom:
        result['custom_values'] = custom_values = np.array(custom_values)
        result['max_custom_values'] = max_custom_values = np.max(custom_values,
                                                                 axis=0)
        result['max_custom_values_mus'] = max_custom_values_mus = test_mus[
            np.argmax(custom_values, axis=0)]
        for i, (value, mu) in enumerate(
                zip(max_custom_values[:, -1], max_custom_values_mus[:, -1])):
            summary.append(('maximum custom value {}'.format(i),
                            '{:.7e} (mu = {})'.format(value, mu)))

    toc = time.time()
    result['time'] = toc - tic
    summary.append(('elapsed time', str(toc - tic)))

    summary_fields, summary_values = list(zip(*summary))
    summary_field_width = np.max(list(map(len, summary_fields))) + 2
    summary_lines = [
        '    {:{}} {}'.format(field + ':', summary_field_width, value)
        for field, value in zip(summary_fields, summary_values)
    ]
    summary = 'Stochastic error estimation:\n' + '\n'.join(summary_lines)
    result['summary'] = summary

    if plot:
        import matplotlib.pyplot as plt
        fig = plt.figure()
        num_plots = (int(bool(error_norms) or estimator) +
                     int(bool(error_norms) and estimator) + int(condition) +
                     int(bool(custom)))
        current_plot = 1

        if bool(error_norms) or estimator:
            ax = fig.add_subplot(1, num_plots, current_plot)
            legend = []
            if error_norms:
                for name, errors in zip(error_norm_names, max_errors):
                    ax.semilogy(basis_sizes, errors)
                    legend.append(name)
            if estimator:
                ax.semilogy(basis_sizes, max_estimates)
                legend.append('estimator')
            ax.legend(legend)
            ax.set_title('maximum errors')
            current_plot += 1

        if bool(error_norms) and estimator:
            ax = fig.add_subplot(1, num_plots, current_plot)
            ax.semilogy(basis_sizes, min_effectivities)
            ax.semilogy(basis_sizes, max_effectivities)
            ax.legend(('min', 'max'))
            ax.set_title('estimator effectivities')
            current_plot += 1

        if condition:
            ax = fig.add_subplot(1, num_plots, current_plot)
            ax.semilogy(basis_sizes, max_conditions)
            ax.set_title('maximum condition')
            current_plot += 1

        if custom:
            ax = fig.add_subplot(1, num_plots, current_plot)
            legend = []
            for i, values in enumerate(custom_values):
                if plot_custom_logarithmic:
                    ax.semilogy(basis_sizes, values)
                else:
                    ax.plot(basis_sizes, values)
                legend.append('value ' + str(i))
            ax.legend(legend)
            ax.set_title('maximum custom values')
            current_plot += 1

        result['figure'] = fig

    return result
Beispiel #27
0
    def solve_ricc_lrcf(A,
                        E,
                        B,
                        C,
                        R=None,
                        S=None,
                        trans=False,
                        options=None,
                        default_solver=None):
        """Compute an approximate low-rank solution of a Riccati equation.

        See :func:`pymor.algorithms.riccati.solve_ricc_lrcf` for a
        general description.

        This function uses `pymess.dense_nm_gmpcare` and `pymess.lrnm`.
        For both methods,
        :meth:`~pymor.vectorarrays.interfaces.VectorArrayInterface.to_numpy`
        and
        :meth:`~pymor.vectorarrays.interfaces.VectorSpaceInterface.from_numpy`
        need to be implemented for `A.source`.
        Additionally, since `dense_nm_gmpcare` is a dense solver, it
        expects :func:`~pymor.algorithms.to_matrix.to_matrix` to work
        for A and E.

        If the solver is not specified using the options or
        default_solver arguments, `dense_nm_gmpcare` is used for small
        problems (smaller than defined with
        :func:`~pymor.algorithms.lyapunov.mat_eqn_sparse_min_size`) and
        `lrnm` for large problems.

        Parameters
        ----------
        A
            The non-parametric |Operator| A.
        E
            The non-parametric |Operator| E or `None`.
        B
            The operator B as a |VectorArray| from `A.source`.
        C
            The operator C as a |VectorArray| from `A.source`.
        R
            The operator R as a 2D |NumPy array| or `None`.
        S
            The operator S as a |VectorArray| from `A.source` or `None`.
        trans
            Whether the first |Operator| in the Riccati equation is
            transposed.
        options
            The solver options to use (see
            :func:`ricc_lrcf_solver_options`).
        default_solver
            Default solver to use (pymess_lrnm,
            pymess_dense_nm_gmpcare).
            If `None`, chose solver depending on dimension `A`.

        Returns
        -------
        Z
            Low-rank Cholesky factor of the Riccati equation solution,
            |VectorArray| from `A.source`.
        """

        _solve_ricc_check_args(A, E, B, C, R, S, trans)
        if default_solver is None:
            default_solver = 'pymess_lrnm' if A.source.dim >= mat_eqn_sparse_min_size(
            ) else 'pymess_dense_nm_gmpcare'
        options = _parse_options(options, ricc_lrcf_solver_options(),
                                 default_solver, None, False)

        if options['type'] == 'pymess_dense_nm_gmpcare':
            X = _call_pymess_dense_nm_gmpare(A,
                                             E,
                                             B,
                                             C,
                                             R,
                                             S,
                                             trans=trans,
                                             options=options['opts'],
                                             plus=False,
                                             method_name='solve_ricc_lrcf')
            Z = _chol(X)
        elif options['type'] == 'pymess_lrnm':
            if S is not None:
                raise NotImplementedError
            if R is not None:
                import scipy.linalg as spla
                Rc = spla.cholesky(R)  # R = Rc^T * Rc
                Rci = spla.solve_triangular(Rc, np.eye(
                    Rc.shape[0]))  # R^{-1} = Rci * Rci^T
                if not trans:
                    C = C.lincomb(Rci.T)  # C <- Rci^T * C = (C^T * Rci)^T
                else:
                    B = B.lincomb(Rci.T)  # B <- B * Rci
            opts = options['opts']
            opts.type = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE
            eqn = RiccatiEquation(opts, A, E, B, C)
            Z, status = pymess.lrnm(eqn, opts)
            relres = status.res2_norm / status.res2_0
            if relres > opts.adi.res2_tol:
                logger = getLogger('pymor.bindings.pymess.solve_ricc_lrcf')
                logger.warning(
                    f'Desired relative residual tolerance was not achieved '
                    f'({relres:e} > {opts.adi.res2_tol:e}).')
        else:
            raise ValueError(
                f'Unexpected Riccati equation solver ({options["type"]}).')

        return A.source.from_numpy(Z.T)
Beispiel #28
0
def solve_lyap_lrcf(A, E, B, trans=False, options=None):
    """Compute an approximate low-rank solution of a Lyapunov equation.

    See :func:`pymor.algorithms.lyapunov.solve_lyap_lrcf` for a
    general description.

    This function uses the low-rank ADI iteration as described in
    Algorithm 4.3 in [PK16]_.

    Parameters
    ----------
    A
        The non-parametric |Operator| A.
    E
        The non-parametric |Operator| E or `None`.
    B
        The operator B as a |VectorArray| from `A.source`.
    trans
        Whether the first |Operator| in the Lyapunov equation is
        transposed.
    options
        The solver options to use (see
        :func:`lyap_lrcf_solver_options`).

    Returns
    -------
    Z
        Low-rank Cholesky factor of the Lyapunov equation solution,
        |VectorArray| from `A.source`.
    """

    _solve_lyap_lrcf_check_args(A, E, B, trans)
    options = _parse_options(options, lyap_lrcf_solver_options(), 'lradi',
                             None, False)
    logger = getLogger('pymor.algorithms.lradi.solve_lyap_lrcf')

    shift_options = options['shift_options'][options['shifts']]
    if shift_options['type'] == 'projection_shifts':
        init_shifts = projection_shifts_init
        iteration_shifts = projection_shifts
    else:
        raise ValueError('Unknown lradi shift strategy.')

    if E is None:
        E = IdentityOperator(A.source)

    Z = A.source.empty(reserve=len(B) * options['maxiter'])
    W = B.copy()

    j = 0
    j_shift = 0
    shifts = init_shifts(A, E, W, shift_options)
    res = np.linalg.norm(W.gramian(), ord=2)
    init_res = res
    Btol = res * options['tol']

    while res > Btol and j < options['maxiter']:
        if shifts[j_shift].imag == 0:
            AaE = A + shifts[j_shift].real * E
            if not trans:
                V = AaE.apply_inverse(W)
                W -= E.apply(V) * (2 * shifts[j_shift].real)
            else:
                V = AaE.apply_inverse_adjoint(W)
                W -= E.apply_adjoint(V) * (2 * shifts[j_shift].real)
            Z.append(V * np.sqrt(-2 * shifts[j_shift].real))
            j += 1
        else:
            AaE = A + shifts[j_shift] * E
            gs = -4 * shifts[j_shift].real
            d = shifts[j_shift].real / shifts[j_shift].imag
            if not trans:
                V = AaE.apply_inverse(W)
                W += E.apply(V.real + V.imag * d) * gs
            else:
                V = AaE.apply_inverse_adjoint(W).conj()
                W += E.apply_adjoint(V.real + V.imag * d) * gs
            g = np.sqrt(gs)
            Z.append((V.real + V.imag * d) * g)
            Z.append(V.imag * (g * np.sqrt(d**2 + 1)))
            j += 2
        j_shift += 1
        res = np.linalg.norm(W.gramian(), ord=2)
        logger.info(f'Relative residual at step {j}: {res/init_res:.5e}')
        if j_shift >= shifts.size:
            shifts = iteration_shifts(A, E, V, shifts)
            j_shift = 0

    if res > Btol:
        logger.warning(
            f'Prescribed relative residual tolerance was not achieved '
            f'({res/init_res:e} > {options["tol"]:e}) after '
            f'{options["maxiter"]} ADI steps.')

    return Z
Beispiel #29
0
def visualize_patch(grid, U, bounding_box=([0, 0], [1, 1]), codim=2, title=None, legend=None,
                    separate_colorbars=False, rescale_colorbars=False, backend='gl', block=False, columns=2):
    """Visualize scalar data associated to a two-dimensional |Grid| as a patch plot.

    The grid's |ReferenceElement| must be the triangle or square. The data can either
    be attached to the faces or vertices of the grid.

    Parameters
    ----------
    grid
        The underlying |Grid|.
    U
        |VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
        as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
        provided, in which case a subplot is created for each entry of the tuple. The
        lengths of all arrays have to agree.
    bounding_box
        A bounding box in which the grid is contained.
    codim
        The codimension of the entities the data in `U` is attached to (either 0 or 2).
    title
        Title of the plot.
    legend
        Description of the data that is plotted. Most useful if `U` is a tuple in which
        case `legend` has to be a tuple of strings of the same length.
    separate_colorbars
        If `True`, use separate colorbars for each subplot.
    rescale_colorbars
        If `True`, rescale colorbars to data in each frame.
    backend
        Plot backend to use ('gl' or 'matplotlib').
    block
        If `True`, block execution until the plot window is closed.
    columns
        The number of columns in the visualizer GUI in case multiple plots are displayed
        at the same time.
    """
    if not config.HAVE_QT:
        raise QtMissing()

    assert backend in {'gl', 'matplotlib'}

    if backend == 'gl':
        if not config.HAVE_GL:
            logger = getLogger('pymor.discretizers.builtin.gui.qt.visualize_patch')
            logger.warning('import of PyOpenGL failed, falling back to matplotlib; rendering will be slow')
            backend = 'matplotlib'
        elif not config.HAVE_QTOPENGL:
            logger = getLogger('pymor.discretizers.builtin.gui.qt.visualize_patch')
            logger.warning('import of Qt.QtOpenGL failed, falling back to matplotlib; rendering will be slow')
            backend = 'matplotlib'
        if backend == 'matplotlib' and not config.HAVE_MATPLOTLIB:
            raise ImportError('cannot visualize: import of matplotlib failed')
    else:
        if not config.HAVE_MATPLOTLIB:
            raise ImportError('cannot visualize: import of matplotlib failed')

    # TODO extract class
    class MainWindow(PlotMainWindow):
        def __init__(self, grid, U, bounding_box, codim, title, legend, separate_colorbars, rescale_colorbars, backend):

            assert isinstance(U, VectorArray) \
                or (isinstance(U, tuple) and all(isinstance(u, VectorArray) for u in U)
                    and all(len(u) == len(U[0]) for u in U))
            U = (U.to_numpy().astype(np.float64, copy=False),) if isinstance(U, VectorArray) else \
                tuple(u.to_numpy().astype(np.float64, copy=False) for u in U)
            if isinstance(legend, str):
                legend = (legend,)
            assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
            if backend == 'gl':
                widget = GLPatchWidget
                cbar_widget = ColorBarWidget
            else:
                widget = MatplotlibPatchWidget
                cbar_widget = None
                if not separate_colorbars and len(U) > 1:
                    l = getLogger('pymor.discretizers.builtin.gui.qt.visualize_patch')
                    l.warning('separate_colorbars=False not supported for matplotlib backend')
                separate_colorbars = True

            class PlotWidget(QWidget):
                def __init__(self):
                    super().__init__()
                    if separate_colorbars:
                        if rescale_colorbars:
                            self.vmins = tuple(np.min(u[0]) for u in U)
                            self.vmaxs = tuple(np.max(u[0]) for u in U)
                        else:
                            self.vmins = tuple(np.min(u) for u in U)
                            self.vmaxs = tuple(np.max(u) for u in U)
                    else:
                        if rescale_colorbars:
                            self.vmins = (min(np.min(u[0]) for u in U),) * len(U)
                            self.vmaxs = (max(np.max(u[0]) for u in U),) * len(U)
                        else:
                            self.vmins = (min(np.min(u) for u in U),) * len(U)
                            self.vmaxs = (max(np.max(u) for u in U),) * len(U)

                    layout = QHBoxLayout()
                    plot_layout = QGridLayout()
                    self.colorbarwidgets = [cbar_widget(self, vmin=vmin, vmax=vmax) if cbar_widget else None
                                            for vmin, vmax in zip(self.vmins, self.vmaxs)]
                    plots = [widget(self, grid, vmin=vmin, vmax=vmax, bounding_box=bounding_box, codim=codim)
                             for vmin, vmax in zip(self.vmins, self.vmaxs)]
                    if legend:
                        for i, plot, colorbar, l in zip(range(len(plots)), plots, self.colorbarwidgets, legend):
                            subplot_layout = QVBoxLayout()
                            caption = QLabel(l)
                            caption.setAlignment(Qt.AlignHCenter)
                            subplot_layout.addWidget(caption)
                            if not separate_colorbars or backend == 'matplotlib':
                                subplot_layout.addWidget(plot)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                if colorbar:
                                    hlayout.addWidget(colorbar)
                                subplot_layout.addLayout(hlayout)
                            plot_layout.addLayout(subplot_layout, int(i/columns), (i % columns), 1, 1)
                    else:
                        for i, plot, colorbar in zip(range(len(plots)), plots, self.colorbarwidgets):
                            if not separate_colorbars or backend == 'matplotlib':
                                plot_layout.addWidget(plot, int(i/columns), (i % columns), 1, 1)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                if colorbar:
                                    hlayout.addWidget(colorbar)
                                plot_layout.addLayout(hlayout, int(i/columns), (i % columns), 1, 1)
                    layout.addLayout(plot_layout)
                    if not separate_colorbars:
                        layout.addWidget(self.colorbarwidgets[0])
                        for w in self.colorbarwidgets[1:]:
                            w.setVisible(False)
                    self.setLayout(layout)
                    self.plots = plots

                def set(self, U, ind):
                    if rescale_colorbars:
                        if separate_colorbars:
                            self.vmins = tuple(np.min(u[ind]) for u in U)
                            self.vmaxs = tuple(np.max(u[ind]) for u in U)
                        else:
                            self.vmins = (min(np.min(u[ind]) for u in U),) * len(U)
                            self.vmaxs = (max(np.max(u[ind]) for u in U),) * len(U)

                    for u, plot, colorbar, vmin, vmax in zip(U, self.plots, self.colorbarwidgets, self.vmins,
                                                              self.vmaxs):
                        plot.set(u[ind], vmin=vmin, vmax=vmax)
                        if colorbar:
                            colorbar.set(vmin=vmin, vmax=vmax)

            super().__init__(U, PlotWidget(), title=title, length=len(U[0]))
            self.grid = grid
            self.codim = codim

        def save(self):
            if not config.HAVE_PYEVTK:
                msg = QMessageBox(QMessageBox.Critical, 'Error', 'VTK output disabled. Pleas install pyvtk.')
                msg.exec_()
                return
            filename = QFileDialog.getSaveFileName(self, 'Save as vtk file')[0]
            base_name = filename.split('.vtu')[0].split('.vtk')[0].split('.pvd')[0]
            if base_name:
                if len(self.U) == 1:
                    write_vtk(self.grid, NumpyVectorSpace.make_array(self.U[0]), base_name, codim=self.codim)
                else:
                    for i, u in enumerate(self.U):
                        write_vtk(self.grid, NumpyVectorSpace.make_array(u), f'{base_name}-{i}',
                                  codim=self.codim)

    _launch_qt_app(lambda: MainWindow(grid, U, bounding_box, codim, title=title, legend=legend,
                                      separate_colorbars=separate_colorbars, rescale_colorbars=rescale_colorbars,
                                      backend=backend),
                   block)
Beispiel #30
0
class TestInterface(object):
    logger = logger.getLogger(__name__)
Beispiel #31
0
def solve_ricc_lrcf(A, E, B, C, R=None, S=None, trans=False, options=None):
    """Compute an approximate low-rank solution of a Riccati equation.

    See :func:`pymor.algorithms.riccati.solve_ricc_lrcf` for a
    general description.

    This function is an implementation of Algorithm 2 in [BBKS18]_.

    Parameters
    ----------
    A
        The |Operator| A.
    E
        The |Operator| E or `None`.
    B
        The operator B as a |VectorArray| from `A.source`.
    C
        The operator C as a |VectorArray| from `A.source`.
    R
        The operator R as a 2D |NumPy array| or `None`.
    S
        The operator S as a |VectorArray| from `A.source` or `None`.
    trans
        Whether the first |Operator| in the Riccati equation is
        transposed.
    options
        The solver options to use. (see
        :func:`ricc_lrcf_solver_options`)

    Returns
    -------
    Z
        Low-rank Cholesky factor of the Riccati equation solution,
        |VectorArray| from `A.source`.
    """

    _solve_ricc_check_args(A, E, B, C, None, None, trans)
    options = _parse_options(options, ricc_lrcf_solver_options(), 'lrradi',
                             None, False)
    logger = getLogger('pymor.algorithms.lrradi.solve_ricc_lrcf')

    shift_options = options['shift_options'][options['shifts']]
    if shift_options['type'] == 'hamiltonian_shifts':
        init_shifts = hamiltonian_shifts_init
        iteration_shifts = hamiltonian_shifts
    else:
        raise ValueError('Unknown lrradi shift strategy.')

    if E is None:
        E = IdentityOperator(A.source)

    if S is not None:
        raise NotImplementedError

    if R is not None:
        Rc = spla.cholesky(R)  # R = Rc^T * Rc
        Rci = spla.solve_triangular(Rc, np.eye(
            Rc.shape[0]))  # R^{-1} = Rci * Rci^T
        if not trans:
            C = C.lincomb(Rci.T)  # C <- Rci^T * C = (C^T * Rci)^T
        else:
            B = B.lincomb(Rci.T)  # B <- B * Rci

    if not trans:
        B, C = C, B

    Z = A.source.empty(reserve=len(C) * options['maxiter'])
    Y = np.empty((0, 0))

    K = A.source.zeros(len(B))
    RF = C.copy()

    j = 0
    j_shift = 0
    shifts = init_shifts(A, E, B, C, shift_options)

    res = np.linalg.norm(RF.gramian(), ord=2)
    init_res = res
    Ctol = res * options['tol']

    while res > Ctol and j < options['maxiter']:
        if not trans:
            AsE = A + shifts[j_shift] * E
        else:
            AsE = A + np.conj(shifts[j_shift]) * E
        if j == 0:
            if not trans:
                V = AsE.apply_inverse(RF) * np.sqrt(-2 * shifts[j_shift].real)
            else:
                V = AsE.apply_inverse_adjoint(RF) * np.sqrt(
                    -2 * shifts[j_shift].real)
        else:
            if not trans:
                LN = AsE.apply_inverse(cat_arrays([RF, K]))
            else:
                LN = AsE.apply_inverse_adjoint(cat_arrays([RF, K]))
            L = LN[:len(RF)]
            N = LN[-len(K):]
            ImBN = np.eye(len(K)) - B.dot(N)
            ImBNKL = spla.solve(ImBN, B.dot(L))
            V = (L + N.lincomb(ImBNKL.T)) * np.sqrt(-2 * shifts[j_shift].real)

        if np.imag(shifts[j_shift]) == 0:
            Z.append(V)
            VB = V.dot(B)
            Yt = np.eye(len(C)) - (VB @ VB.T) / (2 * shifts[j_shift].real)
            Y = spla.block_diag(Y, Yt)
            if not trans:
                EVYt = E.apply(V).lincomb(np.linalg.inv(Yt))
            else:
                EVYt = E.apply_adjoint(V).lincomb(np.linalg.inv(Yt))
            RF.axpy(np.sqrt(-2 * shifts[j_shift].real), EVYt)
            K += EVYt.lincomb(VB.T)
            j += 1
        else:
            Z.append(V.real)
            Z.append(V.imag)
            Vr = V.real.dot(B)
            Vi = V.imag.dot(B)
            sa = np.abs(shifts[j_shift])
            F1 = np.vstack((-shifts[j_shift].real / sa * Vr -
                            shifts[j_shift].imag / sa * Vi,
                            shifts[j_shift].imag / sa * Vr -
                            shifts[j_shift].real / sa * Vi))
            F2 = np.vstack((Vr, Vi))
            F3 = np.vstack((shifts[j_shift].imag / sa * np.eye(len(C)),
                            shifts[j_shift].real / sa * np.eye(len(C))))
            Yt = spla.block_diag(np.eye(len(C)), 0.5 * np.eye(len(C))) \
                - (F1 @ F1.T) / (4 * shifts[j_shift].real)  \
                - (F2 @ F2.T) / (4 * shifts[j_shift].real)  \
                - (F3 @ F3.T) / 2
            Y = spla.block_diag(Y, Yt)
            EVYt = E.apply(cat_arrays([V.real,
                                       V.imag])).lincomb(np.linalg.inv(Yt))
            RF.axpy(np.sqrt(-2 * shifts[j_shift].real), EVYt[:len(C)])
            K += EVYt.lincomb(F2.T)
            j += 2
        j_shift += 1
        res = np.linalg.norm(RF.gramian(), ord=2)
        logger.info(f'Relative residual at step {j}: {res/init_res:.5e}')
        if j_shift >= shifts.size:
            shifts = iteration_shifts(A, E, B, RF, K, Z, shift_options)
            j_shift = 0
    # transform solution to lrcf
    cf = spla.cholesky(Y)
    Z_cf = Z.lincomb(spla.solve_triangular(cf, np.eye(len(Z))).T)
    return Z_cf
Beispiel #32
0
    def apply_inverse(self, V, mu=None, initial_guess=None, least_squares=False,
                      check_finite=True, default_sparse_solver_backend='scipy'):
        """Apply the inverse operator.

        Parameters
        ----------
        V
            |VectorArray| of vectors to which the inverse operator is applied.
        mu
            The |parameter values| for which to evaluate the inverse operator.
        initial_guess
            |VectorArray| with the same length as `V` containing initial guesses
            for the solution.  Some implementations of `apply_inverse` may
            ignore this parameter.  If `None` a solver-dependent default is used.
        least_squares
            If `True`, solve the least squares problem::

                u = argmin ||op(u) - v||_2.

            Since for an invertible operator the least squares solution agrees
            with the result of the application of the inverse operator,
            setting this option should, in general, have no effect on the result
            for those operators. However, note that when no appropriate
            |solver_options| are set for the operator, most implementations
            will choose a least squares solver by default which may be
            undesirable.
        check_finite
            Test if solution only contains finite values.
        default_sparse_solver_backend
            Default sparse solver backend to use (scipy, pyamg, generic).

        Returns
        -------
        |VectorArray| of the inverse operator evaluations.

        Raises
        ------
        InversionError
            The operator could not be inverted.
        """
        assert V in self.range
        assert initial_guess is None or initial_guess in self.source and len(initial_guess) == len(V)

        if V.dim == 0:
            if self.source.dim == 0 or least_squares:
                return self.source.make_array(np.zeros((len(V), self.source.dim)))
            else:
                raise InversionError

        if self.source.dim != self.range.dim and not least_squares:
            raise InversionError

        options = self.solver_options.get('inverse') if self.solver_options else None
        assert self.sparse or not options

        if self.sparse:
            if options:
                solver = options if isinstance(options, str) else options['type']
                backend = solver.split('_')[0]
            else:
                backend = default_sparse_solver_backend

            if backend == 'scipy':
                from pymor.bindings.scipy import apply_inverse as apply_inverse_impl
            elif backend == 'pyamg':
                if not config.HAVE_PYAMG:
                    raise RuntimeError('PyAMG support not enabled.')
                from pymor.bindings.pyamg import apply_inverse as apply_inverse_impl
            elif backend == 'generic':
                logger = getLogger('pymor.bindings.scipy.scipy_apply_inverse')
                logger.warning('You have selected a (potentially slow) generic solver for a NumPy matrix operator!')
                from pymor.algorithms.genericsolvers import apply_inverse as apply_inverse_impl
            else:
                raise NotImplementedError

            return apply_inverse_impl(self, V, initial_guess=initial_guess, options=options,
                                      least_squares=least_squares, check_finite=check_finite)

        else:
            if least_squares:
                try:
                    R, _, _, _ = np.linalg.lstsq(self.matrix, V.to_numpy().T)
                except np.linalg.LinAlgError as e:
                    raise InversionError(f'{str(type(e))}: {str(e)}')
                R = R.T
            else:
                try:
                    R = solve(self.matrix, V.to_numpy().T).T
                except np.linalg.LinAlgError as e:
                    raise InversionError(f'{str(type(e))}: {str(e)}')

            if check_finite:
                if not np.isfinite(np.sum(R)):
                    raise InversionError('Result contains non-finite values')

            return self.source.make_array(R)
Beispiel #33
0
def visualize_patch(grid, U, bounding_box=([0, 0], [1, 1]), codim=2, title=None, legend=None,
                    separate_colorbars=False, rescale_colorbars=False, backend='gl', block=False, columns=2):
    """Visualize scalar data associated to a two-dimensional |Grid| as a patch plot.

    The grid's |ReferenceElement| must be the triangle or square. The data can either
    be attached to the faces or vertices of the grid.

    Parameters
    ----------
    grid
        The underlying |Grid|.
    U
        |VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
        as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
        provided, in which case a subplot is created for each entry of the tuple. The
        lengths of all arrays have to agree.
    bounding_box
        A bounding box in which the grid is contained.
    codim
        The codimension of the entities the data in `U` is attached to (either 0 or 2).
    title
        Title of the plot.
    legend
        Description of the data that is plotted. Most useful if `U` is a tuple in which
        case `legend` has to be a tuple of strings of the same length.
    separate_colorbars
        If `True`, use separate colorbars for each subplot.
    rescale_colorbars
        If `True`, rescale colorbars to data in each frame.
    backend
        Plot backend to use ('gl' or 'matplotlib').
    block
        If `True`, block execution until the plot window is closed.
    columns
        The number of columns in the visualizer GUI in case multiple plots are displayed
        at the same time.
    """
    if not config.HAVE_QT:
        raise QtMissing()

    assert backend in {'gl', 'matplotlib'}

    if backend == 'gl':
        if not config.HAVE_GL:
            logger = getLogger('pymor.gui.qt.visualize_patch')
            logger.warning('import of PyOpenGL failed, falling back to matplotlib; rendering will be slow')
            backend = 'matplotlib'
        elif not config.HAVE_QTOPENGL:
            logger = getLogger('pymor.gui.qt.visualize_patch')
            logger.warning('import of Qt.QtOpenGL failed, falling back to matplotlib; rendering will be slow')
            backend = 'matplotlib'
        if backend == 'matplotlib' and not config.HAVE_MATPLOTLIB:
            raise ImportError('cannot visualize: import of matplotlib failed')
    else:
        if not config.HAVE_MATPLOTLIB:
            raise ImportError('cannot visualize: import of matplotlib failed')

    # TODO extract class
    class MainWindow(PlotMainWindow):
        def __init__(self, grid, U, bounding_box, codim, title, legend, separate_colorbars, rescale_colorbars, backend):

            assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
                or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
                    and all(len(u) == len(U[0]) for u in U))
            U = (U.data.astype(np.float64, copy=False),) if hasattr(U, 'data') else \
                tuple(u.data.astype(np.float64, copy=False) for u in U)
            if isinstance(legend, str):
                legend = (legend,)
            assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
            if backend == 'gl':
                widget = GLPatchWidget
                cbar_widget = ColorBarWidget
            else:
                widget = MatplotlibPatchWidget
                cbar_widget = None
                if not separate_colorbars and len(U) > 1:
                    l = getLogger('pymor.gui.qt.visualize_patch')
                    l.warn('separate_colorbars=False not supported for matplotlib backend')
                separate_colorbars = True

            class PlotWidget(QWidget):
                def __init__(self):
                    super().__init__()
                    if separate_colorbars:
                        if rescale_colorbars:
                            self.vmins = tuple(np.min(u[0]) for u in U)
                            self.vmaxs = tuple(np.max(u[0]) for u in U)
                        else:
                            self.vmins = tuple(np.min(u) for u in U)
                            self.vmaxs = tuple(np.max(u) for u in U)
                    else:
                        if rescale_colorbars:
                            self.vmins = (min(np.min(u[0]) for u in U),) * len(U)
                            self.vmaxs = (max(np.max(u[0]) for u in U),) * len(U)
                        else:
                            self.vmins = (min(np.min(u) for u in U),) * len(U)
                            self.vmaxs = (max(np.max(u) for u in U),) * len(U)

                    layout = QHBoxLayout()
                    plot_layout = QGridLayout()
                    self.colorbarwidgets = [cbar_widget(self, vmin=vmin, vmax=vmax) if cbar_widget else None
                                            for vmin, vmax in zip(self.vmins, self.vmaxs)]
                    plots = [widget(self, grid, vmin=vmin, vmax=vmax, bounding_box=bounding_box, codim=codim)
                             for vmin, vmax in zip(self.vmins, self.vmaxs)]
                    if legend:
                        for i, plot, colorbar, l in zip(range(len(plots)), plots, self.colorbarwidgets, legend):
                            subplot_layout = QVBoxLayout()
                            caption = QLabel(l)
                            caption.setAlignment(Qt.AlignHCenter)
                            subplot_layout.addWidget(caption)
                            if not separate_colorbars or backend == 'matplotlib':
                                subplot_layout.addWidget(plot)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                if colorbar:
                                    hlayout.addWidget(colorbar)
                                subplot_layout.addLayout(hlayout)
                            plot_layout.addLayout(subplot_layout, int(i/columns), (i % columns), 1, 1)
                    else:
                        for i, plot, colorbar in zip(range(len(plots)), plots, self.colorbarwidgets):
                            if not separate_colorbars or backend == 'matplotlib':
                                plot_layout.addWidget(plot, int(i/columns), (i % columns), 1, 1)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                if colorbar:
                                    hlayout.addWidget(colorbar)
                                plot_layout.addLayout(hlayout, int(i/columns), (i % columns), 1, 1)
                    layout.addLayout(plot_layout)
                    if not separate_colorbars:
                        layout.addWidget(self.colorbarwidgets[0])
                        for w in self.colorbarwidgets[1:]:
                            w.setVisible(False)
                    self.setLayout(layout)
                    self.plots = plots

                def set(self, U, ind):
                    if rescale_colorbars:
                        if separate_colorbars:
                            self.vmins = tuple(np.min(u[ind]) for u in U)
                            self.vmaxs = tuple(np.max(u[ind]) for u in U)
                        else:
                            self.vmins = (min(np.min(u[ind]) for u in U),) * len(U)
                            self.vmaxs = (max(np.max(u[ind]) for u in U),) * len(U)

                    for u, plot, colorbar, vmin, vmax in zip(U, self.plots, self.colorbarwidgets, self.vmins,
                                                              self.vmaxs):
                        plot.set(u[ind], vmin=vmin, vmax=vmax)
                        if colorbar:
                            colorbar.set(vmin=vmin, vmax=vmax)

            super().__init__(U, PlotWidget(), title=title, length=len(U[0]))
            self.grid = grid
            self.codim = codim

        def save(self):
            if not config.HAVE_PYVTK:
                msg = QMessageBox(QMessageBox.Critical, 'Error', 'VTK output disabled. Pleas install pyvtk.')
                msg.exec_()
                return
            filename = QFileDialog.getSaveFileName(self, 'Save as vtk file')[0]
            base_name = filename.split('.vtu')[0].split('.vtk')[0].split('.pvd')[0]
            if base_name:
                if len(self.U) == 1:
                    write_vtk(self.grid, NumpyVectorSpace.make_array(self.U[0]), base_name, codim=self.codim)
                else:
                    for i, u in enumerate(self.U):
                        write_vtk(self.grid, NumpyVectorSpace.make_array(u), '{}-{}'.format(base_name, i),
                                  codim=self.codim)

    _launch_qt_app(lambda: MainWindow(grid, U, bounding_box, codim, title=title, legend=legend,
                                      separate_colorbars=separate_colorbars, rescale_colorbars=rescale_colorbars,
                                      backend=backend),
                   block)
Beispiel #34
0
def apply_inverse(op, rhs, options=None):
    """Solve linear equation system.

    Applies the inverse of `op` to the vectors in `rhs`.

    Parameters
    ----------
    op
        The linear, non-parametric |Operator| to invert.
    rhs
        |VectorArray| of right-hand sides for the equation system.
    options
        |invert_options| to use. (See :func:`invert_options`.)

    Returns
    -------
    |VectorArray| of the solution vectors.
    """

    default_options = invert_options()

    if options is None:
        options = default_options.values()[0]
    elif isinstance(options, str):
        if options == 'least_squares':
            for k, v in default_options.iteritems():
                if k.startswith('least_squares'):
                    options = v
                    break
            assert not isinstance(options, str)
        else:
            options = default_options[options]
    else:
        assert 'type' in options and options['type'] in default_options \
            and options.viewkeys() <= default_options[options['type']].viewkeys()
        user_options = options
        options = default_options[user_options['type']]
        options.update(user_options)

    R = op.source.empty(reserve=len(rhs))

    if options['type'] == 'generic_lgmres':
        for i in xrange(len(rhs)):
            r, info = lgmres(op, rhs.copy(i),
                             tol=options['tol'],
                             maxiter=options['maxiter'],
                             inner_m=options['inner_m'],
                             outer_k=options['outer_k'])
            if info > 0:
                raise InversionError('lgmres failed to converge after {} iterations'.format(info))
            assert info == 0
            R.append(r)
    elif options['type'] == 'least_squares_generic_lsmr':
        for i in xrange(len(rhs)):
            r, info, itn, _, _, _, _, _ = lsmr(op, rhs.copy(i),
                                               damp=options['damp'],
                                               atol=options['atol'],
                                               btol=options['btol'],
                                               conlim=options['conlim'],
                                               maxiter=options['maxiter'],
                                               show=options['show'])
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError('lsmr failed to converge after {} iterations'.format(itn))
            getLogger('pymor.algorithms.genericsolvers.lsmr').info('Converged after {} iterations'.format(itn))
            R.append(r)
    elif options['type'] == 'least_squares_generic_lsqr':
        for i in xrange(len(rhs)):
            r, info, itn, _, _, _, _, _, _ = lsqr(op, rhs.copy(i),
                                                  damp=options['damp'],
                                                  atol=options['atol'],
                                                  btol=options['btol'],
                                                  conlim=options['conlim'],
                                                  iter_lim=options['iter_lim'],
                                                  show=options['show'])
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError('lsmr failed to converge after {} iterations'.format(itn))
            getLogger('pymor.algorithms.genericsolvers.lsqr').info('Converged after {} iterations'.format(itn))
            R.append(r)
    else:
        raise ValueError('Unknown solver type')

    return R
Beispiel #35
0
def estimate_image_hierarchical(operators=(),
                                vectors=(),
                                domain=None,
                                extends=None,
                                orthonormalize=True,
                                product=None,
                                riesz_representatives=False):
    """Estimate the image of given |Operators| for all mu.

    This is an extended version of :func:`estimate_image`, which calls
    :func:`estimate_image` individually for each vector of `domain`.

    As a result, the vectors in the returned `image` |VectorArray| will
    be ordered by the `domain` vector they correspond to (starting with
    vectors which correspond to the `functionals` and to |Operators| for
    which the image is estimated independently from `domain`).

    This function also returns an `image_dims` list, such that the first
    `image_dims[i+1]` vectors of `image` correspond to the first `i`
    vectors of `domain` (the first `image_dims[0]` vectors correspond
    to `vectors` and to |Operators| with fixed image estimate).

    Parameters
    ----------
    operators
        See :func:`estimate_image`.
    vectors
        See :func:`estimate_image`.
    domain
        See :func:`estimate_image`.
    extends
        When additional vectors have been appended to the `domain` |VectorArray|
        after :func:`estimate_image_hierarchical` has been called, and
        :func:`estimate_image_hierarchical` shall be called again for the extended
        `domain` array, `extends` can be set to `(image, image_dims)`, where
        `image`, `image_dims` are the return values of the last
        :func:`estimate_image_hierarchical` call. The old `domain` vectors will
        then be skipped during computation and `image`, `image_dims` will be
        modified in-place.
    orthonormalize
        See :func:`estimate_image`.
    product
        See :func:`estimate_image`.
    riesz_representatives
        See :func:`estimate_image`.

    Returns
    -------
    image
        See above.
    image_dims
        See above.

    Raises
    ------
    ImageCollectionError
        Is raised when for a given |Operator| no image estimate is possible.
    """
    assert operators or vectors
    domain_space = operators[0].source if operators else None
    image_space = operators[0].range if operators \
        else vectors[0].space if isinstance(vectors[0], VectorArrayInterface) \
        else vectors[0].range
    assert all(op.source == domain_space and op.range == image_space
               for op in operators)
    assert all(
        isinstance(v, VectorArrayInterface) and (v in image_space)
        or isinstance(v, OperatorInterface) and
        (v.range == image_space and isinstance(v.source, NumpyVectorSpace)
         and v.linear) for v in vectors)
    assert domain is None or domain_space is None or domain in domain_space
    assert product is None or product.source == product.range == image_space
    assert extends is None or len(extends) == 2

    logger = getLogger('pymor.algorithms.image.estimate_image_hierarchical')

    if operators and domain is None:
        domain = domain_space.empty()

    if extends:
        image = extends[0]
        image_dims = extends[1]
        ind_range = range(len(image_dims) -
                          1, len(domain)) if operators else range(
                              len(image_dims) - 1, 0)
    else:
        image = image_space.empty()
        image_dims = []
        ind_range = range(-1, len(domain)) if operators else [-1]

    for i in ind_range:
        logger.info('Estimating image for basis vector {} ...'.format(i))
        if i == -1:
            new_image = estimate_image(
                operators,
                vectors,
                None,
                extends=False,
                orthonormalize=False,
                product=product,
                riesz_representatives=riesz_representatives)
        else:
            new_image = estimate_image(
                operators, [],
                domain[i],
                extends=True,
                orthonormalize=False,
                product=product,
                riesz_representatives=riesz_representatives)

        gram_schmidt_offset = len(image)
        image.append(new_image, remove_from_other=True)
        if orthonormalize:
            with logger.block('Orthonormalizing ...'):
                gram_schmidt(image,
                             offset=gram_schmidt_offset,
                             product=product,
                             copy=False)
            image_dims.append(len(image))

    return image, image_dims
Beispiel #36
0
 def set(self, key, value):
     has_key = key in self._cache
     if has_key:
         getLogger('pymor.core.cache.DiskRegion').warning('Key already present in cache region, ignoring.')
         return
     self._cache.set(key, value)
Beispiel #37
0
        int(size[:-1]) * 1024 ** 3 if size[-1] == 'G' else \
        int(size)

    if isinstance(disk_max_size, str):
        disk_max_size = parse_size_string(disk_max_size)

    cache_regions['disk'] = DiskRegion(path=disk_path, max_size=disk_max_size, persistent=False)
    cache_regions['persistent'] = DiskRegion(path=persistent_path, max_size=persistent_max_size, persistent=True)
    cache_regions['memory'] = MemoryRegion(memory_max_keys)


cache_regions = {}

_caching_disabled = int(os.environ.get('PYMOR_CACHE_DISABLE', 0)) == 1
if _caching_disabled:
    getLogger('pymor.core.cache').warning('caching globally disabled by environment')


def enable_caching():
    """Globally enable caching."""
    global _caching_disabled
    _caching_disabled = int(os.environ.get('PYMOR_CACHE_DISABLE', 0)) == 1


def disable_caching():
    """Globally disable caching."""
    global _caching_disabled
    _caching_disabled = True


def clear_caches():
Beispiel #38
0
 def _solve_check(dtype, solver, sep, ferr):
     if ferr > 1e-1:
         logger = getLogger(solver)
         logger.warning(
             f'Estimated forward relative error bound is large (ferr={ferr:e}, sep={sep:e}). '
             f'Result may not be accurate.')
Beispiel #39
0
 def __init__(self, section, log=getLogger(__name__)):
     self._section = section
     self._log = log
     self._start = 0
Beispiel #40
0
def newton(operator,
           rhs,
           initial_guess=None,
           mu=None,
           error_norm=None,
           least_squares=False,
           miniter=0,
           maxiter=100,
           rtol=-1.,
           atol=-1.,
           stagnation_window=3,
           stagnation_threshold=0.9,
           return_stages=False,
           return_residuals=False):
    """Basic Newton algorithm.

    This method solves the nonlinear equation ::

        A(U, mu) = V

    for `U` using the Newton method.

    Parameters
    ----------
    operator
        The |Operator| `A`. `A` must implement the
        :meth:`~pymor.operators.interfaces.OperatorInterface.jacobian` interface method.
    rhs
        |VectorArray| of length 1 containing the vector `V`.
    initial_guess
        If not `None`, a |VectorArray| of length 1 containing an initial guess for the
        solution `U`.
    mu
        The |Parameter| for which to solve the equation.
    error_norm
        The norm with which the norm of the residual is computed. If `None`, the
        Euclidean norm is used.
    least_squares
        If `True`, use a least squares linear solver (e.g. for residual minimization).
    miniter
        Minimum amount of iterations to perform.
    maxiter
        Fail if the iteration count reaches this value without converging.
    rtol
        Finish when the residual norm has been reduced by this factor relative to the
        norm of the initial residual.
    atol
        Finish when the residual norm is below this threshold.
    stagnation_window
        Finish when the residual norm has not been reduced by a factor of
        `stagnation_threshold` during the last `stagnation_window` iterations.
    stagnation_threshold
        See `stagnation_window`.
    return_stages
        If `True`, return a |VectorArray| of the intermediate approximations of `U`
        after each iteration.
    return_residuals
        If `True`, return a |VectorArray| of all residual vectors which have been computed
        during the Newton iterations.

    Returns
    -------
    U
        |VectorArray| of length 1 containing the computed solution
    data
        Dict containing the following fields:

            :error_sequence:  |NumPy array| containing the residual norms after each iteration.
            :stages:          See `return_stages`.
            :residuals:       See `return_residuals`.

    Raises
    ------
    NewtonError
        Raised if the Netwon algorithm failed to converge.
    """
    logger = getLogger('pymor.algorithms.newton')

    data = {}

    if initial_guess is None:
        initial_guess = operator.source.zeros()

    if return_stages:
        data['stages'] = operator.source.empty()

    if return_residuals:
        data['residuals'] = operator.range.empty()

    U = initial_guess.copy()
    residual = rhs - operator.apply(U, mu=mu)

    err = residual.l2_norm()[0] if error_norm is None else error_norm(
        residual)[0]
    logger.info(f'      Initial Residual: {err:5e}')

    iteration = 0
    error_sequence = [err]
    while True:
        if iteration >= miniter:
            if err <= atol:
                logger.info(f'Absolute limit of {atol} reached. Stopping.')
                break
            if err / error_sequence[0] <= rtol:
                logger.info(
                    f'Prescribed total reduction of {rtol} reached. Stopping.')
                break
            if (len(error_sequence) >= stagnation_window + 1
                    and err / max(error_sequence[-stagnation_window - 1:]) >=
                    stagnation_threshold):
                logger.info(
                    f'Error is stagnating (threshold: {stagnation_threshold:5e}, window: {stagnation_window}). '
                    f'Stopping.')
                break
            if iteration >= maxiter:
                raise NewtonError('Failed to converge')
        if iteration > 0 and return_stages:
            data['stages'].append(U)
        if return_residuals:
            data['residuals'].append(residual)
        iteration += 1
        jacobian = operator.jacobian(U, mu=mu)
        try:
            correction = jacobian.apply_inverse(residual,
                                                least_squares=least_squares)
        except InversionError:
            raise NewtonError('Could not invert jacobian')
        U += correction
        residual = rhs - operator.apply(U, mu=mu)

        err = residual.l2_norm()[0] if error_norm is None else error_norm(
            residual)[0]
        logger.info(
            f'Iteration {iteration:2}: Residual: {err:5e},  '
            f'Reduction: {err / error_sequence[-1]:5e}, Total Reduction: {err / error_sequence[0]:5e}'
        )
        error_sequence.append(err)
        if not np.isfinite(err):
            raise NewtonError('Failed to converge')

    data['error_sequence'] = np.array(error_sequence)

    return U, data
Beispiel #41
0
def armijo(f,
           starting_point,
           direction,
           grad=None,
           initial_value=None,
           alpha_init=1.0,
           tau=0.5,
           beta=0.0001,
           maxiter=10):
    """Armijo line search algorithm.

    This method computes a step size such that the Armijo condition (see [NW06]_, p. 33)
    is fulfilled.

    Parameters
    ----------
    f
        Real-valued function that can be evaluated for its value.
    starting_point
        A |VectorArray| of length 1 containing the starting point of the line search.
    direction
        Descent direction along which the line search is performed.
    grad
        Gradient of `f` in the point `starting_point`.
    initial_value
        Value of `f` in the point `starting_point`.
    alpha_init
        Initial step size that is gradually reduced.
    tau
        The fraction by which the step size is reduced in each iteration.
    beta
        Control parameter to adjust the required decrease of the function value of `f`.
    maxiter
        Use `alpha_init` as default if the iteration count reaches this value without
        finding a point fulfilling the Armijo condition.

    Returns
    -------
    alpha
        Step size computed according to the Armijo condition.
    """
    assert alpha_init > 0
    assert 0 < tau < 1
    assert maxiter > 0

    # Start line search with step size of alpha_init
    alpha = alpha_init

    # Compute initial function value
    if initial_value is None:
        initial_value = f(starting_point)

    iteration = 0
    slope = 0.0

    # Compute slope if gradient is provided
    if grad:
        slope = min(grad.dot(direction), 0.0)

    while True:
        # Compute new function value
        current_value = f(starting_point + alpha * direction)
        # Check the Armijo condition
        if current_value < initial_value + alpha * beta * slope:
            break
        # Check if maxiter is reached
        if iteration >= maxiter:
            # Use default value as step size
            alpha = alpha_init
            # Log warning
            logger = getLogger('pymor.algorithms.line_search.armijo')
            logger.warning(
                f'Reached maximum number of line search steps; using initial step size of {alpha_init} instead'
            )
            break
        iteration += 1
        # Adjust step size
        alpha *= tau

    return alpha
Beispiel #42
0
 def __init__(self):
     self.memo = {}
     self.logger = logger.getLogger('pymor.core.interfaces')
Beispiel #43
0
def adaptive_greedy(discretization,
                    reductor,
                    parameter_space=None,
                    initial_basis=None,
                    use_estimator=True,
                    error_norm=None,
                    extension_algorithm=gram_schmidt_basis_extension,
                    target_error=None,
                    max_extensions=None,
                    validation_mus=0,
                    rho=1.1,
                    gamma=0.2,
                    theta=0.,
                    visualize=False,
                    visualize_vertex_size=80,
                    pool=dummy_pool):
    """Greedy basis generation algorithm with adaptively refined training set.

    This method extends pyMOR's default :func:`~pymor.algorithms.greedy.greedy`
    greedy basis generation algorithm by adaptive refinement of the
    parameter training set according to [HDO11]_ to prevent overfitting
    of the reduced basis to the training set. This is achieved by
    estimating the reduction error on an additional validation set of
    parameters. If the ratio between the estimated errors on the validation
    set and the validation set is larger than `rho`, the training set
    is refined using standard grid refinement techniques.

    .. [HDO11] Haasdonk, B.; Dihlmann, M. & Ohlberger, M.,
               A training set and multiple bases generation approach for
               parameterized model reduction based on adaptive grids in
               parameter space,
               Math. Comput. Model. Dyn. Syst., 2011, 17, 423-442

    Parameters
    ----------
    discretization
        See :func:`~pymor.algorithms.greedy.greedy`.
    reductor
        See :func:`~pymor.algorithms.greedy.greedy`.
    parameter_space
        The |ParameterSpace| for which to compute the reduced model. If `None`
        the parameter space of the `discretization` is used.
    initial_basis
        See :func:`~pymor.algorithms.greedy.greedy`.
    use_estimator
        See :func:`~pymor.algorithms.greedy.greedy`.
    error_norm
        See :func:`~pymor.algorithms.greedy.greedy`.
    extension_algorithm
        See :func:`~pymor.algorithms.greedy.greedy`.
    target_error
        See :func:`~pymor.algorithms.greedy.greedy`.
    max_extensions
        See :func:`~pymor.algorithms.greedy.greedy`.
    validation_mus
        One of the following:
          - a list of |Parameters| to use as validation set,
          - a positive number indicating the number of random parameters
            to use as validation set,
          - a non-positive number, indicating the negative number of random
            parameters to use as validation set in addition to the centers
            of the elements of the adaptive training set.
    rho
        Maximum allowed ratio between maximum estimated error on validation
        set vs. maximum estimated error on training set. If the ratio is
        larger, the training set is refined.
    gamma
        Weight of the age penalty term of the training set refinement
        indicators.
    theta
        Ratio of training set elements to select for refinement.
        (One element is always refined.)
    visualize
        If `True`, visualize the refinement indicators. (Only available
        for 2 and 3 dimensional parameter spaces.)
    visualize_vertex_size
        Size of the vertices in the visualization.
    pool
        See :func:`~pymor.algorithms.greedy.greedy`.

    Returns
    -------
    Dict with the following fields:

        :basis:                  The reduced basis.
        :reduced_discretization: The reduced |Discretization| obtained for the
                                 computed basis.
        :reconstructor:          Reconstructor for `reduced_discretization`.
        :max_errs:               Sequence of maximum errors during the greedy run.
        :max_err_mus:            The parameters corresponding to `max_errs`.
        :max_val_errs:           Sequence of maximum errors on the validation set.
        :max_val_err_mus:        The parameters corresponding to `max_val_errs`.
        :refinements:            Number of refinements made in each extension step.
        :training_set_sizes:     The final size of the training set in each extension step.
    """
    def estimate(mus):
        if use_estimator:
            errors = pool.map(_estimate, mus, rd=rd)
        else:
            errors = pool.map(_estimate,
                              mus,
                              rd=rd,
                              d=d,
                              rc=rc,
                              error_norm=error_norm)
        # most error_norms will return an array of length 1 instead of a number, so we extract the numbers
        # if necessary
        return np.array([x[0] if hasattr(x, '__len__') else x for x in errors])

    logger = getLogger('pymor.algorithms.adaptivegreedy.adaptive_greedy')

    if pool is None or pool is dummy_pool:
        pool = dummy_pool
    else:
        logger.info(
            'Using pool of {} workers for parallel greedy search'.format(
                len(pool)))

    with RemoteObjectManager() as rom:
        # Push everything we need during the greedy search to the workers.
        if not use_estimator:
            rom.manage(pool.push(discretization))
            if error_norm:
                rom.manage(pool.push(error_norm))

        tic = time.time()

        # initial setup for main loop
        d = discretization
        basis = initial_basis
        rd, rc, reduction_data = None, None, None
        hierarchic = False

        # setup training and validation sets
        parameter_space = parameter_space or d.parameter_space
        sample_set = AdaptiveSampleSet(parameter_space)
        if validation_mus <= 0:
            validation_set = sample_set.center_mus + parameter_space.sample_randomly(
                -validation_mus)
        else:
            validation_set = parameter_space.sample_randomly(validation_mus)
        if visualize and sample_set.dim not in (2, 3):
            raise NotImplementedError
        logger.info('Training set size: {}. Validation set size: {}'.format(
            len(sample_set.vertex_mus), len(validation_set)))

        extensions = 0
        max_errs = []
        max_err_mus = []
        max_val_errs = []
        max_val_err_mus = []
        refinements = []
        training_set_sizes = []

        while True:  # main loop
            with logger.block('Reducing ...'):
                rd, rc, reduction_data = reductor(discretization, basis) if not hierarchic \
                    else reductor(discretization, basis, extends=(rd, rc, reduction_data))

            current_refinements = 0
            while True:  # estimate reduction errors and refine training set until no overfitting is detected

                # estimate on training set
                with logger.block('Estimating errors ...'):
                    errors = estimate(sample_set.vertex_mus)
                max_err_ind = np.argmax(errors)
                max_err, max_err_mu = errors[
                    max_err_ind], sample_set.vertex_mus[max_err_ind]
                logger.info(
                    'Maximum error after {} extensions: {} (mu = {})'.format(
                        extensions, max_err, max_err_mu))

                # estimate on validation set
                val_errors = estimate(validation_set)
                max_val_err_ind = np.argmax(val_errors)
                max_val_err, max_val_err_mu = val_errors[
                    max_val_err_ind], validation_set[max_val_err_ind]
                logger.info('Maximum validation error: {}'.format(max_val_err))
                logger.info(
                    'Validation error to training error ratio: {:.3e}'.format(
                        max_val_err / max_err))

                if max_val_err >= max_err * rho:  # overfitting?

                    # compute element indicators for training set refinement
                    if current_refinements == 0:
                        logger.info2(
                            'Overfitting detected. Computing element indicators ...'
                        )
                    else:
                        logger.info3(
                            'Overfitting detected after refinement. Computing element indicators ...'
                        )
                    vertex_errors = np.max(errors[sample_set.vertex_ids],
                                           axis=1)
                    center_errors = estimate(sample_set.center_mus)
                    indicators_age_part = (gamma * sample_set.volumes /
                                           sample_set.total_volume *
                                           (sample_set.refinement_count -
                                            sample_set.creation_times))
                    indicators_error_part = np.max(
                        [vertex_errors, center_errors], axis=0) / max_err
                    indicators = indicators_age_part + indicators_error_part

                    # select elements
                    sorted_indicators_inds = np.argsort(indicators)[::-1]
                    refinement_elements = sorted_indicators_inds[:max(
                        int(len(sorted_indicators_inds) * theta), 1)]
                    logger.info('Refining {} elements: {}'.format(
                        len(refinement_elements), refinement_elements))

                    # visualization
                    if visualize:
                        from mpl_toolkits.mplot3d import Axes3D  # NOQA
                        import matplotlib.pyplot as plt
                        plt.figure()
                        plt.subplot(
                            2,
                            2,
                            1,
                            projection=None if sample_set.dim == 2 else '3d')
                        plt.title('estimated errors')
                        sample_set.visualize(vertex_data=errors,
                                             center_data=center_errors,
                                             new_figure=False)
                        plt.subplot(
                            2,
                            2,
                            2,
                            projection=None if sample_set.dim == 2 else '3d')
                        plt.title('indicators_error_part')
                        vmax = np.max([
                            indicators_error_part, indicators_age_part,
                            indicators
                        ])
                        data = {
                            ('volume_data' if sample_set.dim == 2 else 'center_data'):
                            indicators_error_part
                        }
                        sample_set.visualize(vertex_size=visualize_vertex_size,
                                             vmin=0,
                                             vmax=vmax,
                                             new_figure=False,
                                             **data)
                        plt.subplot(
                            2,
                            2,
                            3,
                            projection=None if sample_set.dim == 2 else '3d')
                        plt.title('indicators_age_part')
                        data = {
                            ('volume_data' if sample_set.dim == 2 else 'center_data'):
                            indicators_age_part
                        }
                        sample_set.visualize(vertex_size=visualize_vertex_size,
                                             vmin=0,
                                             vmax=vmax,
                                             new_figure=False,
                                             **data)
                        plt.subplot(
                            2,
                            2,
                            4,
                            projection=None if sample_set.dim == 2 else '3d')
                        if sample_set.dim == 2:
                            plt.title('indicators')
                            sample_set.visualize(
                                volume_data=indicators,
                                center_data=np.zeros(len(refinement_elements)),
                                center_inds=refinement_elements,
                                vertex_size=visualize_vertex_size,
                                vmin=0,
                                vmax=vmax,
                                new_figure=False)
                        else:
                            plt.title('selected cells')
                            sample_set.visualize(
                                center_data=np.zeros(len(refinement_elements)),
                                center_inds=refinement_elements,
                                vertex_size=visualize_vertex_size,
                                vmin=0,
                                vmax=vmax,
                                new_figure=False)
                        plt.show()

                    # refine training set
                    sample_set.refine(refinement_elements)
                    current_refinements += 1

                    # update validation set if needed
                    if validation_mus <= 0:
                        validation_set = sample_set.center_mus + parameter_space.sample_randomly(
                            -validation_mus)

                    logger.info(
                        'New training set size: {}. New validation set size: {}'
                        .format(len(sample_set.vertex_mus),
                                len(validation_set)))
                    logger.info('Number of refinements: {}'.format(
                        sample_set.refinement_count))
                    logger.info('')
                else:
                    break  # no overfitting, leave the refinement loop

            max_errs.append(max_err)
            max_err_mus.append(max_err_mu)
            max_val_errs.append(max_val_err)
            max_val_err_mus.append(max_val_err_mu)
            refinements.append(current_refinements)
            training_set_sizes.append(len(sample_set.vertex_mus))

            # break if traget error reached
            if target_error is not None and max_err <= target_error:
                logger.info(
                    'Reached maximal error on snapshots of {} <= {}'.format(
                        max_err, target_error))
                break

            # basis extension
            with logger.block(
                    'Computing solution snapshot for mu = {} ...'.format(
                        max_err_mu)):
                U = discretization.solve(max_err_mu)
            with logger.block('Extending basis with solution snapshot ...'):
                try:
                    basis, extension_data = extension_algorithm(basis, U)
                except ExtensionError:
                    logger.info('Extension failed. Stopping now.')
                    break
            extensions += 1
            if 'hierarchic' not in extension_data:
                logger.warn(
                    'Extension algorithm does not report if extension was hierarchic. Assuming it was\'nt ..'
                )
                hierarchic = False
            else:
                hierarchic = extension_data['hierarchic']

            logger.info('')

            # break if prescribed basis size reached
            if max_extensions is not None and extensions >= max_extensions:
                logger.info('Maximum number of {} extensions reached.'.format(
                    max_extensions))
                with logger.block('Reducing once more ...'):
                    rd, rc, reduction_data = reductor(discretization, basis) if not hierarchic \
                        else reductor(discretization, basis, extends=(rd, rc, reduction_data))
                break

    tictoc = time.time() - tic
    logger.info('Greedy search took {} seconds'.format(tictoc))
    return {
        'basis': basis,
        'reduced_discretization': rd,
        'reconstructor': rc,
        'max_errs': max_errs,
        'max_err_mus': max_err_mus,
        'extensions': extensions,
        'max_val_errs': max_val_errs,
        'max_val_err_mus': max_val_err_mus,
        'refinements': refinements,
        'training_set_sizes': training_set_sizes,
        'time': tictoc,
        'reduction_data': reduction_data
    }
Beispiel #44
0
def samdp(A,
          E,
          B,
          C,
          nwanted,
          init_shifts=None,
          which='LR',
          tol=1e-10,
          imagtol=1e-6,
          conjtol=1e-8,
          dorqitol=1e-4,
          rqitol=1e-10,
          maxrestart=100,
          krestart=20,
          rqi_maxiter=10,
          seed=0):
    """Compute the dominant pole triplets and residues of the transfer function of an LTI system.

    This function uses the subspace accelerated dominant pole (SAMDP) algorithm as described in
    [RM06]_ in Algorithm 2 in order to compute dominant pole triplets and residues of the transfer
    function

    .. math::
        H(s) = C (s E - A)^{-1} B

    of an LTI system. It is possible to take advantage of prior knowledge about the poles
    by specifying shift parameters, which are injected after a new pole has been found.

    Parameters
    ----------
    A
        The |Operator| A.
    E
        The |Operator| E or `None`.
    B
        The operator B as a |VectorArray| from `A.source`.
    C
        The operator C as a |VectorArray| from `A.source`.
    nwanted
        The number of dominant poles that should be computed.
    init_shifts
        A |NumPy array| containing shifts which are injected after a new pole has been found.
    which
        A string specifying the strategy by which the dominant poles and residues are selected.
        Possible values are:

        - `'LR'`: select poles with largest norm(residual) / abs(Re(pole))
        - `'LS'`: select poles with largest norm(residual) / abs(pole)
        - `'LM'`: select poles with largest norm(residual)
    tol
        Tolerance for the residual of the poles.
    imagtol
        Relative tolerance for imaginary parts of pairs of complex conjugate eigenvalues.
    conjtol
        Tolerance for the residual of the complex conjugate of a pole.
    dorqitol
        If the residual is smaller than dorqitol the two-sided Rayleigh quotient iteration
        is executed.
    rqitol
        Tolerance for the relative change of a pole in the two-sided Rayleigh quotient
        iteration.
    maxrestart
        The maximum number of restarts.
    krestart
        Maximum dimension of search space before performing a restart.
    rqi_maxiter
        Maximum number of iterations for the two-sided Rayleigh quotient iteration.
    seed
        Random seed which is used for computing the initial shift and random restarts.

    Returns
    -------
    poles
        A 1D |NumPy array| containing the computed dominant poles.
    residues
        A 3D |NumPy array| of shape `(len(poles), len(C), len(B))` containing the computed residues.
    rightev
        A |VectorArray| containing the right eigenvectors of the computed poles.
    leftev
        A |VectorArray| containing the left eigenvectors of the computed poles.
    """

    logger = getLogger('pymor.algorithms.samdp.samdp')

    if E is None:
        E = IdentityOperator(A.source)

    assert isinstance(A, Operator) and A.linear
    assert not A.parametric
    assert A.source == A.range
    if E is not None:
        assert isinstance(E, Operator) and E.linear
        assert not E.parametric
        assert E.source == E.range
        assert E.source == A.source
    assert B in A.source
    assert C in A.source

    B_defl = B.copy()
    C_defl = C.copy()

    k = 0
    nrestart = 0
    nr_converged = 0
    np.random.seed(seed)

    X = A.source.empty()
    Q = A.source.empty()
    Qt = A.source.empty()
    Qs = A.source.empty()
    Qts = A.source.empty()
    AX = A.source.empty()
    V = A.source.empty()

    H = np.empty((0, 1))
    G = np.empty((0, 1))
    poles = np.empty(0)

    if init_shifts is None:
        st = np.random.uniform() * 10.j
        shift_nr = 0
        nr_shifts = 0
    else:
        st = init_shifts[0]
        shift_nr = 1
        nr_shifts = len(init_shifts)

    shifts = init_shifts

    while nrestart < maxrestart:
        k += 1

        sEmA = st * E - A
        sEmAB = sEmA.apply_inverse(B_defl)
        Hs = C_defl.inner(sEmAB)

        y_all, _, u_all = spla.svd(Hs)

        u = u_all.conj()[0]
        y = y_all[:, 0]

        x = sEmAB.lincomb(u)
        v = sEmA.apply_inverse_adjoint(C_defl.lincomb(y.T))

        X.append(x)
        V.append(v)
        gram_schmidt(V, atol=0, rtol=0, copy=False)
        gram_schmidt(X, atol=0, rtol=0, copy=False)

        AX.append(A.apply(X[k - 1]))

        if k > 1:
            H = np.hstack((H, V[0:k - 1].inner(AX[k - 1])))
        H = np.vstack((H, V[k - 1].inner(AX)))
        EX = E.apply(X)
        if k > 1:
            G = np.hstack((G, V[0:k - 1].inner(EX[k - 1])))
        G = np.vstack((G, V[k - 1].inner(EX)))

        SH, UR, URt, res = _select_max_eig(H, G, X, V, B_defl, C_defl, which)

        if np.all(res < np.finfo(float).eps):
            st = np.random.uniform() * 10.j
            found = False
        else:
            found = True

        do_rqi = True
        while found:
            theta = SH[0, 0]
            schurvec = X.lincomb(UR[:, 0])
            schurvec.scal(1 / schurvec.norm())
            lschurvec = V.lincomb(URt[:, 0])
            lschurvec.scal(1 / lschurvec.norm())

            st = theta

            nres = (A.apply(schurvec) - (E.apply(schurvec) * theta)).norm()[0]

            logger.info(f'Step: {k}, Theta: {theta:.5e}, Residual: {nres:.5e}')

            if np.abs(np.imag(theta)) / np.abs(theta) < imagtol:
                rres = A.apply(
                    schurvec.real) - E.apply(schurvec.real) * np.real(theta)
                nrr = rres.norm() / np.abs(np.real(theta))
                if nrr - nres < np.finfo(float).eps:
                    schurvec = schurvec.real
                    lschurvec = lschurvec.real
                    theta = np.real(theta)
                    nres = nrr

            if nres < dorqitol and do_rqi and nres >= tol:
                schurvec, lschurvec, theta, nres = _twosided_rqi(
                    A, E, schurvec, lschurvec, theta, nres, imagtol, rqitol,
                    rqi_maxiter)
                do_rqi = False
                if np.abs(np.imag(theta)) / np.abs(theta) < imagtol:
                    rres = A.apply(schurvec.real) - E.apply(
                        schurvec.real) * np.real(theta)
                    nrr = rres.norm() / np.abs(np.real(theta))
                    if nrr - nres < np.finfo(float).eps:
                        schurvec = schurvec.real
                        lschurvec = lschurvec.real
                        theta = np.real(theta)
                        nres = nrr
                if nres >= tol:
                    logger.warning(
                        'Two-sided RQI did not reach desired tolerance.')

            found = nr_converged < nwanted and nres < tol

            if found:
                poles = np.append(poles, theta)
                logger.info(f'Pole: {theta:.5e}')

                Q.append(schurvec)
                Qt.append(lschurvec)
                Esch = E.apply(schurvec)
                Qs.append(Esch)
                Qts.append(E.apply_adjoint(lschurvec))

                nqqt = lschurvec.inner(Esch)[0][0]
                Q[-1].scal(1 / nqqt)
                Qs[-1].scal(1 / nqqt)

                nr_converged += 1

                if k > 1:
                    X = X.lincomb(UR[:, 1:k].T)
                    V = V.lincomb(URt[:, 1:k].T)
                else:
                    X = A.source.empty()
                    V = A.source.empty()

                if np.abs(np.imag(theta)) / np.abs(theta) < imagtol:
                    gram_schmidt(V, atol=0, rtol=0, copy=False)
                    gram_schmidt(X, atol=0, rtol=0, copy=False)

                B_defl -= E.apply(Q[-1].lincomb(Qt[-1].inner(B_defl).T))
                C_defl -= E.apply_adjoint(Qt[-1].lincomb(
                    Q[-1].inner(C_defl).T))

                k -= 1

                cce = theta.conj()
                if np.abs(np.imag(cce)) / np.abs(cce) >= imagtol:

                    ccv = schurvec.conj()
                    ccv.scal(1 / ccv.norm())

                    r = A.apply(ccv) - E.apply(ccv) * cce

                    if r.norm() / np.abs(cce) < conjtol:
                        logger.info(f'Conjugate Pole: {cce:.5e}')
                        poles = np.append(poles, cce)

                        Q.append(ccv)
                        ccvt = lschurvec.conj()
                        Qt.append(ccvt)

                        Esch = E.apply(ccv)
                        Qs.append(Esch)
                        Qts.append(E.apply_adjoint(ccvt))

                        nqqt = ccvt.inner(E.apply(ccv))[0][0]
                        Q[-1].scal(1 / nqqt)
                        Qs[-1].scal(1 / nqqt)

                        gram_schmidt(V, atol=0, rtol=0, copy=False)
                        gram_schmidt(X, atol=0, rtol=0, copy=False)

                        B_defl -= E.apply(Q[-1].lincomb(
                            Qt[-1].inner(B_defl).T))
                        C_defl -= E.apply_adjoint(Qt[-1].lincomb(
                            Q[-1].inner(C_defl).T))

                AX = A.apply(X)
                if k > 0:
                    G = V.inner(E.apply(X))
                    H = V.inner(AX)
                    SH, UR, URt, residues = _select_max_eig(
                        H, G, X, V, B_defl, C_defl, which)
                    found = np.any(res >= np.finfo(float).eps)
                else:
                    G = np.empty((0, 1))
                    H = np.empty((0, 1))
                    found = False

                if nr_converged < nwanted:
                    if found:
                        st = SH[0, 0]
                    else:
                        st = np.random.uniform() * 10.j

                    if shift_nr < nr_shifts:
                        st = shifts[shift_nr]
                        shift_nr += 1
            elif k >= krestart:
                logger.info('Perform restart...')
                EX = E.apply(X)
                RR = AX.lincomb(UR.T) - EX.lincomb(UR.T).lincomb(SH.T)

                minidx = RR.norm().argmin()
                k = 1

                X = X.lincomb(UR[:, minidx])
                V = V.lincomb(URt[:, minidx])

                gram_schmidt(V, atol=0, rtol=0, copy=False)
                gram_schmidt(X, atol=0, rtol=0, copy=False)

                G = V.inner(E.apply(X))
                AX = A.apply(X)
                H = V.inner(AX)
                nrestart += 1

        if k >= krestart:
            logger.info('Perform restart...')
            EX = E.apply(X)
            RR = AX.lincomb(UR.T) - EX.lincomb(UR.T).lincomb(SH.T)

            minidx = RR.norm().argmin()
            k = 1

            X = X.lincomb(UR[:, minidx])
            V = V.lincomb(URt[:, minidx])

            gram_schmidt(V, atol=0, rtol=0, copy=False)
            gram_schmidt(X, atol=0, rtol=0, copy=False)

            G = V.inner(E.apply(X))
            AX = A.apply(X)
            H = V.inner(AX)
            nrestart += 1

        if nr_converged == nwanted or nrestart == maxrestart:
            rightev = Q
            leftev = Qt
            absres = np.empty(len(poles))
            residues = []
            for i in range(len(poles)):
                leftev[i].scal(1 / leftev[i].inner(E.apply(rightev[i]))[0][0])
                residues.append(C.inner(rightev[i]) @ leftev[i].inner(B))
                absres[i] = spla.norm(residues[-1], ord=2)
            residues = np.array(residues)

            if which == 'LR':
                idx = np.argsort(-absres / np.abs(np.real(poles)))
            elif which == 'LS':
                idx = np.argsort(-absres / np.abs(poles))
            elif which == 'LM':
                idx = np.argsort(-absres)
            else:
                raise ValueError('Unknown SAMDP selection strategy.')

            residues = residues[idx]
            poles = poles[idx]
            rightev = rightev[idx]
            leftev = leftev[idx]
            if nr_converged < nwanted:
                logger.warning(
                    'The specified number of poles could not be computed.')
            break

    return poles, residues, rightev, leftev
Beispiel #45
0
def eigs(A,
         E=None,
         k=3,
         which='LM',
         b=None,
         l=None,
         maxiter=1000,
         tol=1e-13,
         imag_tol=1e-12,
         complex_pair_tol=1e-12,
         seed=0):
    """Approximate a few eigenvalues of a linear |Operator|.

    Computes `k` eigenvalues `w` with corresponding eigenvectors `v` which solve
    the eigenvalue problem

    .. math::
        A v_i = w_i v_i

    or the generalized eigenvalue problem

    .. math::
        A v_i = w_i E v_i

    if `E` is not `None`.

    The implementation is based on Algorithm 4.2 in [RL95]_.

    Parameters
    ----------
    A
        The real linear |Operator| for which the eigenvalues are to be computed.
    E
        The real linear |Operator| which defines the generalized eigenvalue problem.
    k
        The number of eigenvalues and eigenvectors which are to be computed.
    which
        A string specifying which `k` eigenvalues and eigenvectors to compute:

        - `'LM'`: select eigenvalues with largest magnitude
        - `'SM'`: select eigenvalues with smallest magnitude
        - `'LR'`: select eigenvalues with largest real part
        - `'SR'`: select eigenvalues with smallest real part
        - `'LI'`: select eigenvalues with largest imaginary part
        - `'SI'`: select eigenvalues with smallest imaginary part
    b
        Initial vector for Arnoldi iteration. Default is a random vector.
    l
        The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
    maxiter
        The maximum number of iterations.
    tol
        The relative error tolerance for the Ritz estimates.
    imag_tol
        Relative imaginary parts below this tolerance are set to 0.
    complex_pair_tol
        Tolerance for detecting pairs of complex conjugate eigenvalues.
    seed
        Random seed which is used for computing the initial vector for the Arnoldi
        iteration.

    Returns
    -------
    w
        A 1D |NumPy array| which contains the computed eigenvalues.
    v
        A |VectorArray| which contains the computed eigenvectors.
    """

    logger = getLogger('pymor.algorithms.eigs.eigs')

    assert isinstance(A, Operator) and A.linear
    assert not A.parametric
    assert A.source == A.range

    if E is None:
        E = IdentityOperator(A.source)
    else:
        assert isinstance(E, Operator) and E.linear
        assert not E.parametric
        assert E.source == E.range
        assert E.source == A.source

    if b is None:
        b = A.source.random(seed=seed)
    else:
        assert b in A.source

    n = A.source.dim
    l_min = 20

    if l is None:
        l = min(n - 1, max(2 * k + 1, l_min))

    assert k < n
    assert l > k

    V, H, f = _arnoldi(A, E, k, b)
    k0 = k
    i = 0

    while True:
        i += 1

        V, H, f = _extend_arnoldi(A, E, V, H, f, l - k)

        ew, ev = spla.eig(H)

        # truncate small imaginary parts
        ew.imag[np.abs(ew.imag) / np.abs(ew) < imag_tol] = 0

        if which == 'LM':
            idx = np.argsort(-np.abs(ew))
        elif which == 'SM':
            idx = np.argsort(np.abs(ew))
        elif which == 'LR':
            idx = np.argsort(-ew.real)
        elif which == 'SR':
            idx = np.argsort(ew.real)
        elif which == 'LI':
            idx = np.argsort(-np.abs(ew.imag))
        elif which == 'SI':
            idx = np.argsort(np.abs(ew.imag))

        k = k0
        ews = ew[idx]
        evs = ev[:, idx]

        rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)

        # increase k by one in order to keep complex conjugate pairs together
        if ews[k - 1].imag != 0 and ews[
                k - 1].imag + ews[k].imag < complex_pair_tol:
            k += 1

        logger.info(
            f'Maximum of relative Ritz estimates at step {i}: {rres[:k].max():.5e}'
        )

        if np.all(rres[:k] <= tol) or i >= maxiter:
            break

        # increase k in order to prevent stagnation
        k = min(l - 1, k + min(np.count_nonzero(rres[:k] <= tol),
                               (l - k) // 2))

        # sort shifts for QR iteration based on their residual
        shifts = ews[k:l]
        srres = rres[k:l]
        idx = np.argsort(-srres)
        srres = srres[idx]
        shifts = shifts[idx]

        # don't use converged unwanted Ritz values as shifts
        shifts = shifts[srres != 0]
        k += np.count_nonzero(srres == 0)
        if shifts[0].imag != 0 and shifts[0].imag + ews[
                1].imag >= complex_pair_tol:
            shifts = shifts[1:]
            k += 1

        H, Qs = _qr_iteration(H, shifts)

        V = V.lincomb(Qs.T)
        f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
        V = V[:k]
        H = H[:k, :k]

    return ews[:k0], V.lincomb(evs[:, :k0].T)
Beispiel #46
0
def apply_inverse(op, V, initial_guess=None, options=None, least_squares=False, check_finite=True,
                  default_solver='scipy_spsolve', default_least_squares_solver='scipy_least_squares_lsmr'):
    """Solve linear equation system.

    Applies the inverse of `op` to the vectors in `V` using SciPy.

    Parameters
    ----------
    op
        The linear, non-parametric |Operator| to invert.
    V
        |VectorArray| of right-hand sides for the equation system.
    initial_guess
        |VectorArray| with the same length as `V` containing initial guesses
        for the solution.  Some implementations of `apply_inverse` may
        ignore this parameter.  If `None` a solver-dependent default is used.
    options
        The |solver_options| to use (see :func:`solver_options`).
    least_squares
        If `True`, return least squares solution.
    check_finite
        Test if solution only contains finite values.
    default_solver
        Default solver to use (scipy_spsolve, scipy_bicgstab, scipy_bicgstab_spilu,
        scipy_lgmres, scipy_least_squares_lsmr, scipy_least_squares_lsqr).
    default_least_squares_solver
        Default solver to use for least squares problems (scipy_least_squares_lsmr,
        scipy_least_squares_lsqr).

    Returns
    -------
    |VectorArray| of the solution vectors.
    """

    assert V in op.range
    assert initial_guess is None or initial_guess in op.source and len(initial_guess) == len(V)

    if isinstance(op, NumpyMatrixOperator):
        matrix = op.matrix
    else:
        from pymor.algorithms.to_matrix import to_matrix
        matrix = to_matrix(op)

    options = _parse_options(options, solver_options(), default_solver, default_least_squares_solver, least_squares)

    V = V.to_numpy()
    initial_guess = initial_guess.to_numpy() if initial_guess is not None else None
    promoted_type = np.promote_types(matrix.dtype, V.dtype)
    R = np.empty((len(V), matrix.shape[1]), dtype=promoted_type)

    if options['type'] == 'scipy_bicgstab':
        for i, VV in enumerate(V):
            R[i], info = bicgstab(matrix, VV, initial_guess[i] if initial_guess is not None else None,
                                  tol=options['tol'], maxiter=options['maxiter'])
            if info != 0:
                if info > 0:
                    raise InversionError(f'bicgstab failed to converge after {info} iterations')
                else:
                    raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.
                                         format(info))
    elif options['type'] == 'scipy_bicgstab_spilu':
        if Version(scipy.version.version) >= Version('0.19'):
            ilu = spilu(matrix, drop_tol=options['spilu_drop_tol'], fill_factor=options['spilu_fill_factor'],
                        drop_rule=options['spilu_drop_rule'], permc_spec=options['spilu_permc_spec'])
        else:
            if options['spilu_drop_rule']:
                logger = getLogger('pymor.operators.numpy._apply_inverse')
                logger.error("ignoring drop_rule in ilu factorization due to old SciPy")
            ilu = spilu(matrix, drop_tol=options['spilu_drop_tol'], fill_factor=options['spilu_fill_factor'],
                        permc_spec=options['spilu_permc_spec'])
        precond = LinearOperator(matrix.shape, ilu.solve)
        for i, VV in enumerate(V):
            R[i], info = bicgstab(matrix, VV, initial_guess[i] if initial_guess is not None else None,
                                  tol=options['tol'], maxiter=options['maxiter'], M=precond)
            if info != 0:
                if info > 0:
                    raise InversionError(f'bicgstab failed to converge after {info} iterations')
                else:
                    raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.
                                         format(info))
    elif options['type'] == 'scipy_spsolve':
        try:
            # maybe remove unusable factorization:
            if hasattr(matrix, 'factorization'):
                fdtype = matrix.factorizationdtype
                if not np.can_cast(V.dtype, fdtype, casting='safe'):
                    del matrix.factorization

            if Version(scipy.version.version) >= Version('0.14'):
                if hasattr(matrix, 'factorization'):
                    # we may use a complex factorization of a real matrix to
                    # apply it to a real vector. In that case, we downcast
                    # the result here, removing the imaginary part,
                    # which should be zero.
                    R = matrix.factorization.solve(V.T).T.astype(promoted_type, copy=False)
                elif options['keep_factorization']:
                    # the matrix is always converted to the promoted type.
                    # if matrix.dtype == promoted_type, this is a no_op
                    matrix.factorization = splu(matrix_astype_nocopy(matrix.tocsc(), promoted_type),
                                                permc_spec=options['permc_spec'])
                    matrix.factorizationdtype = promoted_type
                    R = matrix.factorization.solve(V.T).T
                else:
                    # the matrix is always converted to the promoted type.
                    # if matrix.dtype == promoted_type, this is a no_op
                    R = spsolve(matrix_astype_nocopy(matrix, promoted_type), V.T, permc_spec=options['permc_spec']).T
            else:
                # see if-part for documentation
                if hasattr(matrix, 'factorization'):
                    for i, VV in enumerate(V):
                        R[i] = matrix.factorization.solve(VV).astype(promoted_type, copy=False)
                elif options['keep_factorization']:
                    matrix.factorization = splu(matrix_astype_nocopy(matrix.tocsc(), promoted_type),
                                                permc_spec=options['permc_spec'])
                    matrix.factorizationdtype = promoted_type
                    for i, VV in enumerate(V):
                        R[i] = matrix.factorization.solve(VV)
                elif len(V) > 1:
                    factorization = splu(matrix_astype_nocopy(matrix.tocsc(), promoted_type),
                                         permc_spec=options['permc_spec'])
                    for i, VV in enumerate(V):
                        R[i] = factorization.solve(VV)
                else:
                    R = spsolve(matrix_astype_nocopy(matrix, promoted_type), V.T,
                                permc_spec=options['permc_spec']).reshape((1, -1))
        except RuntimeError as e:
            raise InversionError(e)
    elif options['type'] == 'scipy_lgmres':
        for i, VV in enumerate(V):
            R[i], info = lgmres(matrix, VV, initial_guess[i] if initial_guess is not None else None,
                                tol=options['tol'],
                                atol=options['tol'],
                                maxiter=options['maxiter'],
                                inner_m=options['inner_m'],
                                outer_k=options['outer_k'])
            if info > 0:
                raise InversionError(f'lgmres failed to converge after {info} iterations')
            assert info == 0
    elif options['type'] == 'scipy_least_squares_lsmr':
        from scipy.sparse.linalg import lsmr
        for i, VV in enumerate(V):
            R[i], info, itn, _, _, _, _, _ = lsmr(matrix, VV,
                                                  damp=options['damp'],
                                                  atol=options['atol'],
                                                  btol=options['btol'],
                                                  conlim=options['conlim'],
                                                  maxiter=options['maxiter'],
                                                  show=options['show'],
                                                  x0=initial_guess[i] if initial_guess is not None else None)
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError(f'lsmr failed to converge after {itn} iterations')
    elif options['type'] == 'scipy_least_squares_lsqr':
        for i, VV in enumerate(V):
            R[i], info, itn, _, _, _, _, _, _, _ = lsqr(matrix, VV,
                                                        damp=options['damp'],
                                                        atol=options['atol'],
                                                        btol=options['btol'],
                                                        conlim=options['conlim'],
                                                        iter_lim=options['iter_lim'],
                                                        show=options['show'],
                                                        x0=initial_guess[i] if initial_guess is not None else None)
            assert 0 <= info <= 7
            if info == 7:
                raise InversionError(f'lsmr failed to converge after {itn} iterations')
    else:
        raise ValueError('Unknown solver type')

    if check_finite:
        if not np.isfinite(np.sum(R)):
            raise InversionError('Result contains non-finite values')

    return op.source.from_numpy(R)
Beispiel #47
0
    def solve_lyap_lrcf(A,
                        E,
                        B,
                        trans=False,
                        options=None,
                        default_solver=None):
        """Compute an approximate low-rank solution of a Lyapunov equation.

        See :func:`pymor.algorithms.lyapunov.solve_lyap_lrcf` for a
        general description.

        This function uses `pymess.glyap` and `pymess.lradi`.
        For both methods,
        :meth:`~pymor.vectorarrays.interfaces.VectorArrayInterface.to_numpy`
        and
        :meth:`~pymor.vectorarrays.interfaces.VectorSpaceInterface.from_numpy`
        need to be implemented for `A.source`.
        Additionally, since `glyap` is a dense solver, it expects
        :func:`~pymor.algorithms.to_matrix.to_matrix` to work for A and
        E.

        If the solver is not specified using the options or
        default_solver arguments, `glyap` is used for small problems
        (smaller than defined with
        :func:`~pymor.algorithms.lyapunov.mat_eqn_sparse_min_size`) and
        `lradi` for large problems.

        Parameters
        ----------
        A
            The non-parametric |Operator| A.
        E
            The non-parametric |Operator| E or `None`.
        B
            The operator B as a |VectorArray| from `A.source`.
        trans
            Whether the first |Operator| in the Lyapunov equation is
            transposed.
        options
            The solver options to use (see
            :func:`lyap_lrcf_solver_options`).
        default_solver
            Default solver to use (pymess_lradi, pymess_glyap).
            If `None`, choose solver depending on the dimension of A.

        Returns
        -------
        Z
            Low-rank Cholesky factor of the Lyapunov equation solution,
            |VectorArray| from `A.source`.
        """

        _solve_lyap_lrcf_check_args(A, E, B, trans)
        if default_solver is None:
            default_solver = 'pymess_lradi' if A.source.dim >= mat_eqn_sparse_min_size(
            ) else 'pymess_glyap'
        options = _parse_options(options, lyap_lrcf_solver_options(),
                                 default_solver, None, False)

        if options['type'] == 'pymess_glyap':
            X = solve_lyap_dense(to_matrix(A, format='dense'),
                                 to_matrix(E, format='dense') if E else None,
                                 B.to_numpy().T if not trans else B.to_numpy(),
                                 trans=trans,
                                 options=options)
            Z = _chol(X)
        elif options['type'] == 'pymess_lradi':
            opts = options['opts']
            opts.type = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE
            eqn = LyapunovEquation(opts, A, E, B)
            Z, status = pymess.lradi(eqn, opts)
            relres = status.res2_norm / status.res2_0
            if relres > opts.adi.res2_tol:
                logger = getLogger('pymor.bindings.pymess.solve_lyap_lrcf')
                logger.warning(
                    f'Desired relative residual tolerance was not achieved '
                    f'({relres:e} > {opts.adi.res2_tol:e}).')
        else:
            raise ValueError(
                f'Unexpected Lyapunov equation solver ({options["type"]}).')

        return A.source.from_numpy(Z.T)
Beispiel #48
0
        def __init__(self, grid, U, bounding_box, codim, title, legend,
                     separate_colorbars, rescale_colorbars, backend):

            assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
                or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
                    and all(len(u) == len(U[0]) for u in U))
            U = (U.data, ) if hasattr(U, 'data') else tuple(u.data for u in U)
            if isinstance(legend, str):
                legend = (legend, )
            assert legend is None or isinstance(
                legend, tuple) and len(legend) == len(U)
            if backend == 'gl':
                widget = GLPatchWidget
            else:
                widget = MatplotlibPatchWidget
                if not separate_colorbars and len(U) > 1:
                    l = getLogger('pymor.gui.qt.visualize_patch')
                    l.warn(
                        'separate_colorbars=False not supported for matplotlib backend'
                    )
                separate_colorbars = True

            class PlotWidget(QWidget):
                def __init__(self):
                    super(PlotWidget, self).__init__()
                    if separate_colorbars:
                        if rescale_colorbars:
                            self.vmins = tuple(np.min(u[0]) for u in U)
                            self.vmaxs = tuple(np.max(u[0]) for u in U)
                        else:
                            self.vmins = tuple(np.min(u) for u in U)
                            self.vmaxs = tuple(np.max(u) for u in U)
                    else:
                        if rescale_colorbars:
                            self.vmins = (min(np.min(u[0])
                                              for u in U), ) * len(U)
                            self.vmaxs = (max(np.max(u[0])
                                              for u in U), ) * len(U)
                        else:
                            self.vmins = (min(np.min(u) for u in U), ) * len(U)
                            self.vmaxs = (max(np.max(u) for u in U), ) * len(U)

                    layout = QHBoxLayout()
                    plot_layout = QGridLayout()
                    self.colorbarwidgets = [
                        ColorBarWidget(self, vmin=vmin, vmax=vmax)
                        for vmin, vmax in izip(self.vmins, self.vmaxs)
                    ]
                    plots = [
                        widget(self,
                               grid,
                               vmin=vmin,
                               vmax=vmax,
                               bounding_box=bounding_box,
                               codim=codim)
                        for vmin, vmax in izip(self.vmins, self.vmaxs)
                    ]
                    if legend:
                        for i, plot, colorbar, l in izip(
                                xrange(len(plots)), plots,
                                self.colorbarwidgets, legend):
                            subplot_layout = QVBoxLayout()
                            caption = QLabel(l)
                            caption.setAlignment(Qt.AlignHCenter)
                            subplot_layout.addWidget(caption)
                            if not separate_colorbars or backend == 'matplotlib':
                                subplot_layout.addWidget(plot)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                hlayout.addWidget(colorbar)
                                subplot_layout.addLayout(hlayout)
                            plot_layout.addLayout(subplot_layout,
                                                  int(i / columns),
                                                  (i % columns), 1, 1)
                    else:
                        for i, plot, colorbar in izip(xrange(len(plots)),
                                                      plots,
                                                      self.colorbarwidgets):
                            if not separate_colorbars or backend == 'matplotlib':
                                plot_layout.addWidget(plot, int(i / columns),
                                                      (i % columns), 1, 1)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                hlayout.addWidget(colorbar)
                                plot_layout.addLayout(hlayout,
                                                      int(i / columns),
                                                      (i % columns), 1, 1)
                    layout.addLayout(plot_layout)
                    if not separate_colorbars:
                        layout.addWidget(self.colorbarwidgets[0])
                        for w in self.colorbarwidgets[1:]:
                            w.setVisible(False)
                    self.setLayout(layout)
                    self.plots = plots

                def set(self, U, ind):
                    if rescale_colorbars:
                        if separate_colorbars:
                            self.vmins = tuple(np.min(u[ind]) for u in U)
                            self.vmaxs = tuple(np.max(u[ind]) for u in U)
                        else:
                            self.vmins = (min(np.min(u[ind])
                                              for u in U), ) * len(U)
                            self.vmaxs = (max(np.max(u[ind])
                                              for u in U), ) * len(U)

                    for u, plot, colorbar, vmin, vmax in izip(
                            U, self.plots, self.colorbarwidgets, self.vmins,
                            self.vmaxs):
                        plot.set(u[ind], vmin=vmin, vmax=vmax)
                        colorbar.set(vmin=vmin, vmax=vmax)

            super(MainWindow, self).__init__(U,
                                             PlotWidget(),
                                             title=title,
                                             length=len(U[0]))
            self.grid = grid
            self.codim = codim
Beispiel #49
0
        int(size[:-1]) * 1024 ** 3 if size[-1] == 'G' else \
        int(size)

    if isinstance(disk_max_size, str):
        disk_max_size = parse_size_string(disk_max_size)

    cache_regions['disk'] = SQLiteRegion(path=disk_path, max_size=disk_max_size, persistent=False)
    cache_regions['persistent'] = SQLiteRegion(path=persistent_path, max_size=persistent_max_size, persistent=True)
    cache_regions['memory'] = MemoryRegion(memory_max_keys)

cache_regions = {}

_caching_disabled = int(os.environ.get('PYMOR_CACHE_DISABLE', 0)) == 1
if _caching_disabled:
    from pymor.core.logger import getLogger
    getLogger('pymor.core.cache').warn('caching globally disabled by environment')


def enable_caching():
    """Globally enable caching."""
    global _caching_disabled
    _caching_disabled = int(os.environ.get('PYMOR_CACHE_DISABLE', 0)) == 1


def disable_caching():
    """Globally disable caching."""
    global _caching_disabled
    _caching_disabled = True


def clear_caches():
Beispiel #50
0
 def _ricc_rcond_check(solver, rcond):
     if rcond < np.finfo(np.float64).eps:
         logger = getLogger(solver)
         logger.warning(
             f'Estimated reciprocal condition number is small (rcond={rcond:e}). '
             f'Result may not be accurate.')
Beispiel #51
0
    def _call_pymess_dense_nm_gmpare(A,
                                     E,
                                     B,
                                     C,
                                     R,
                                     S,
                                     trans=False,
                                     options=None,
                                     plus=False,
                                     method_name=''):
        """Return the solution from pymess.dense_nm_gmpare solver."""
        A = to_matrix(A, format='dense')
        E = to_matrix(E, format='dense') if E else None
        B = B.to_numpy().T
        C = C.to_numpy()
        S = S.to_numpy().T if S else None

        Q = B.dot(B.T) if not trans else C.T.dot(C)
        pymess_trans = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE
        if not trans:
            RinvC = spla.solve(R, C) if R is not None else C
            G = C.T.dot(RinvC)
            if S is not None:
                RinvST = spla.solve(R, S.T) if R is not None else S.T
                if not plus:
                    A -= S.dot(RinvC)
                    Q -= S.dot(RinvST)
                else:
                    A += S.dot(RinvC)
                    Q += S.dot(RinvST)
        else:
            RinvBT = spla.solve(R, B.T) if R is not None else B.T
            G = B.dot(RinvBT)
            if S is not None:
                RinvST = spla.solve(R, S.T) if R is not None else S.T
                if not plus:
                    A -= RinvBT.T.dot(S.T)
                    Q -= S.dot(RinvST)
                else:
                    A += RinvBT.T.dot(S.T)
                    Q += S.dot(RinvST)
        X, absres, relres = pymess.dense_nm_gmpare(
            None,
            A,
            E,
            Q,
            G,
            plus=plus,
            trans=pymess_trans,
            linesearch=options['linesearch'],
            maxit=options['maxit'],
            absres_tol=options['absres_tol'],
            relres_tol=options['relres_tol'],
            nrm=options['nrm'])
        if absres > options['absres_tol']:
            logger = getLogger('pymor.bindings.pymess.' + method_name)
            logger.warning(
                f'Desired absolute residual tolerance was not achieved '
                f'({absres:e} > {options["absres_tol"]:e}).')
        if relres > options['relres_tol']:
            logger = getLogger('pymor.bindings.pymess.' + method_name)
            logger.warning(
                f'Desired relative residual tolerance was not achieved '
                f'({relres:e} > {options["relres_tol"]:e}).')

        return X
Beispiel #52
0
from pymor.playground.operators.block import BlockOperator
from pymor.playground.reductors import GenericBlockRBReconstructor
from pymor.reductors.basic import reduce_generic_rb
from pymor.vectorarrays.block import BlockVectorArray
from pymor.vectorarrays.list import ListVectorArray
from pymor.vectorarrays.numpy import NumpyVectorArray
import pymor.core.logger

from dune.pymor.la.container import make_listvectorarray

from simdb.run import new_dataset, add_values, add_logfile

logfile = NamedTemporaryFile(delete=False).name
pymor.core.logger.FILENAME = logfile

logger = getLogger('.ORS2016__3_3.main')
logger.setLevel('INFO')


class InstationaryDuneVisualizer(object):
    def __init__(self, disc, prefix):
        self.disc = disc
        self.prefix = prefix

    def visualize(self, U, *args, **kwargs):
        import numpy as np
        dune_disc = self.disc._impl
        assert isinstance(U, ListVectorArray)
        filename = kwargs['filename'] if 'filename' in kwargs else self.prefix
        size = len(U)
        pad = len(str(size))
Beispiel #53
0
def gram_schmidt(A,
                 product=None,
                 atol=1e-13,
                 rtol=1e-13,
                 offset=0,
                 find_duplicates=True,
                 reiterate=True,
                 reiteration_threshold=1e-1,
                 check=True,
                 check_tol=1e-3,
                 copy=True):
    """Orthonormalize a |VectorArray| using the stabilized Gram-Schmidt algorithm.

    Parameters
    ----------
    A
        The |VectorArray| which is to be orthonormalized.
    product
        The inner product |Operator| w.r.t. which to orthonormalize.
        If `None`, the Euclidean product is used.
    atol
        Vectors of norm smaller than `atol` are removed from the array.
    rtol
        Relative tolerance used to detect linear dependent vectors
        (which are then removed from the array).
    offset
        Assume that the first `offset` vectors are already orthonormal and start the
        algorithm at the `offset + 1`-th vector.
    reiterate
        If `True`, orthonormalize again if the norm of the orthogonalized vector is
        much smaller than the norm of the original vector.
    reiteration_threshold
        If `reiterate` is `True`, re-orthonormalize if the ratio between the norms of
        the orthogonalized vector and the original vector is smaller than this value.
    check
        If `True`, check if the resulting |VectorArray| is really orthonormal.
    check_tol
        Tolerance for the check.
    copy
        If `True`, create a copy of `A` instead of modifying `A` in-place.
    find_duplicates
        unused


    Returns
    -------
    The orthonormalized |VectorArray|.
    """

    logger = getLogger('pymor.algorithms.gram_schmidt.gram_schmidt')

    if copy:
        A = A.copy()

    # main loop
    remove = []
    for i in range(offset, len(A)):
        # first calculate norm
        if product is None:
            initial_norm = A[i].l2_norm()[0]
        else:
            initial_norm = np.sqrt(product.pairwise_apply2(A[i], A[i]))[0]

        if initial_norm < atol:
            logger.info("Removing vector {} of norm {}".format(
                i, initial_norm))
            remove.append(i)
            continue

        if i == 0:
            A[0].scal(1 / initial_norm)

        else:
            first_iteration = True
            norm = initial_norm
            # If reiterate is True, reiterate as long as the norm of the vector changes
            # strongly during orthonormalization (due to Andreas Buhr).
            while first_iteration or reiterate and norm / old_norm < reiteration_threshold:

                if first_iteration:
                    first_iteration = False
                else:
                    logger.info('Orthonormalizing vector {} again'.format(i))

                # orthogonalize to all vectors left
                for j in range(i):
                    if j in remove:
                        continue
                    if product is None:
                        p = A[i].pairwise_dot(A[j])[0]
                    else:
                        p = product.pairwise_apply2(A[i], A[j])[0]
                    A[i].axpy(-p, A[j])

                # calculate new norm
                if product is None:
                    old_norm, norm = norm, A[i].l2_norm()[0]
                else:
                    old_norm, norm = norm, np.sqrt(
                        product.pairwise_apply2(A[i], A[i])[0])

                # remove vector if it got too small:
                if norm / initial_norm < rtol:
                    logger.info(
                        "Removing linear dependent vector {}".format(i))
                    remove.append(i)
                    break

            if norm > 0:
                A[i].scal(1 / norm)

    if remove:
        del A[remove]

    if check:
        if product:
            error_matrix = product.apply2(A[offset:len(A)], A)
        else:
            error_matrix = A[offset:len(A)].dot(A)
        error_matrix[:len(A) - offset,
                     offset:len(A)] -= np.eye(len(A) - offset)
        if error_matrix.size > 0:
            err = np.max(np.abs(error_matrix))
            if err >= check_tol:
                raise AccuracyError(
                    'result not orthogonal (max err={})'.format(err))

    return A
    'estimator_return': 'eta_red',
    'num_test_samples': 10,
    'estimate_some_errors': True,
    'uniform_enrichment_factor': 10,
    'local_indicators': 'eta_red',
    'marking_strategy': 'doerfler_and_age',
    'marking_max_age': 4,
    'doerfler_marking_theta': 0.33,
    'local_boundary_values': 'dirichlet',
    'online_target_error': 0.05,
    'online_max_extensions': 20
}
DATASET_ID = 'OS2015_SISC__6_2__academic_example'

pymor.core.logger.MAX_HIERACHY_LEVEL = 2
getLogger('pymor.WrappedDiscretization').setLevel('WARN')
getLogger('pymor.algorithms').setLevel('INFO')
getLogger('dune.pymor.discretizations').setLevel('WARN')

if __name__ == '__main__':

    logfile = NamedTemporaryFile(delete=False).name
    pymor.core.logger.FILENAME = logfile
    new_dataset(DATASET_ID, **config)

    detailed_data = prepare(config)
    print('')
    offline_data = offline_phase(config, detailed_data)
    print('')
    _ = online_phase(config, detailed_data, offline_data)
Beispiel #55
0
def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
           inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True):
    if A.source != A.range:
        raise InversionError
    from scipy.linalg.basic import lstsq
    x = A.source.zeros() if x0 is None else x0.copy()

    # psolve = M.matvec

    if outer_v is None:
        outer_v = []

    b_norm = b.l2_norm()[0]
    if b_norm == 0:
        b_norm = 1

    for k_outer in xrange(maxiter):
        r_outer = A.apply(x) - b

        # -- callback
        if callback is not None:
            callback(x)

        # -- check stopping condition
        r_norm = r_outer.l2_norm()[0]
        if r_norm < tol * b_norm or r_norm < tol:
            break

        # -- inner LGMRES iteration
        vs0 = -r_outer   # -psolve(r_outer)
        inner_res_0 = vs0.l2_norm()[0]

        if inner_res_0 == 0:
            rnorm = r_outer.l2_norm()[0]
            raise RuntimeError("Preconditioner returned a zero vector; "
                               "|v| ~ %.1g, |M v| = 0" % rnorm)

        vs0.scal(1.0/inner_res_0)
        hs = []
        vs = [vs0]
        ws = []
        y = None

        for j in xrange(1, 1 + inner_m + len(outer_v)):
            # -- Arnoldi process:
            #
            #    Build an orthonormal basis V and matrices W and H such that
            #        A W = V H
            #    Columns of W, V, and H are stored in `ws`, `vs` and `hs`.
            #
            #    The first column of V is always the residual vector, `vs0`;
            #    V has *one more column* than the other of the three matrices.
            #
            #    The other columns in V are built by feeding in, one
            #    by one, some vectors `z` and orthonormalizing them
            #    against the basis so far. The trick here is to
            #    feed in first some augmentation vectors, before
            #    starting to construct the Krylov basis on `v0`.
            #
            #    It was shown in [BJM]_ that a good choice (the LGMRES choice)
            #    for these augmentation vectors are the `dx` vectors obtained
            #    from a couple of the previous restart cycles.
            #
            #    Note especially that while `vs0` is always the first
            #    column in V, there is no reason why it should also be
            #    the first column in W. (In fact, below `vs0` comes in
            #    W only after the augmentation vectors.)
            #
            #    The rest of the algorithm then goes as in GMRES, one
            #    solves a minimization problem in the smaller subspace
            #    spanned by W (range) and V (image).
            #
            #    XXX: Below, I'm lazy and use `lstsq` to solve the
            #    small least squares problem. Performance-wise, this
            #    is in practice acceptable, but it could be nice to do
            #    it on the fly with Givens etc.
            #

            #     ++ evaluate
            v_new = None
            if j < len(outer_v) + 1:
                z, v_new = outer_v[j-1]
            elif j == len(outer_v) + 1:
                z = vs0
            else:
                z = vs[-1]

            if v_new is None:
                v_new = A.apply(z)  # psolve(matvec(z))
            else:
                # Note: v_new is modified in-place below. Must make a
                # copy to ensure that the outer_v vectors are not
                # clobbered.
                v_new = v_new.copy()

            #     ++ orthogonalize
            hcur = []
            for v in vs:
                alpha = v.dot(v_new)[0, 0]
                hcur.append(alpha)
                v_new.axpy(-alpha, v)  # v_new -= alpha*v
            hcur.append(v_new.l2_norm()[0])

            if hcur[-1] == 0:
                # Exact solution found; bail out.
                # Zero basis vector (v_new) in the least-squares problem
                # does no harm, so we can just use the same code as usually;
                # it will give zero (inner) residual as a result.
                bailout = True
            else:
                bailout = False
                v_new.scal(1.0/hcur[-1])

            vs.append(v_new)
            hs.append(hcur)
            ws.append(z)

            # XXX: Ugly: should implement the GMRES iteration properly,
            #      with Givens rotations and not using lstsq. Instead, we
            #      spare some work by solving the LSQ problem only every 5
            #      iterations.
            if not bailout and j % 5 != 1 and j < inner_m + len(outer_v) - 1:
                continue

            # -- GMRES optimization problem
            hess = np.zeros((j+1, j))
            e1 = np.zeros((j+1,))
            e1[0] = inner_res_0
            for q in xrange(j):
                hess[:(q+2), q] = hs[q]

            y, resids, rank, s = lstsq(hess, e1)
            inner_res = np.linalg.norm(np.dot(hess, y) - e1)

            # -- check for termination
            if inner_res < tol * inner_res_0:
                break

        # -- GMRES terminated: eval solution
        dx = ws[0]*y[0]
        for w, yc in zip(ws[1:], y[1:]):
            dx.axpy(yc, w)  # dx += w*yc

        # -- Store LGMRES augmentation vectors
        nx = dx.l2_norm()[0]
        if store_outer_Av:
            q = np.dot(hess, y)
            ax = vs[0]*q[0]
            for v, qc in zip(vs[1:], q[1:]):
                ax.axpy(qc, v)
            outer_v.append((dx * (1./nx), ax * (1./nx)))
        else:
            outer_v.append((dx * (1./nx), None))

        # -- Retain only a finite number of augmentation vectors
        while len(outer_v) > outer_k:
            del outer_v[0]

        # -- Apply step
        x += dx
    else:
        # didn't converge ...
        return x, maxiter

    getLogger('pymor.algorithms.genericsolvers.lgmres').info('Converged after {} iterations'.format(k_outer + 1))

    return x, 0
Beispiel #56
0
def gram_schmidt_biorth(V,
                        W,
                        product=None,
                        reiterate=True,
                        reiteration_threshold=1e-1,
                        check=True,
                        check_tol=1e-3,
                        copy=True):
    """Biorthonormalize a pair of |VectorArrays| using the biorthonormal Gram-Schmidt process.

    See Algorithm 1 in [BKS11]_.

    .. [BKS11]  P. Benner, M. Köhler, J. Saak,
                Sparse-Dense Sylvester Equations in :math:`\mathcal{H}_2`-Model Order Reduction,
                Max Planck Institute Magdeburg Preprint, available from http://www.mpi-magdeburg.mpg.de/preprints/,
                2011.

    Parameters
    ----------
    V, W
        The |VectorArrays| which are to be biorthonormalized.
    product
        The inner product |Operator| w.r.t. which to biorthonormalize.
        If `None`, the Euclidean product is used.
    reiterate
        If `True`, orthonormalize again if the norm of the orthogonalized vector is
        much smaller than the norm of the original vector.
    reiteration_threshold
        If `reiterate` is `True`, re-orthonormalize if the ratio between the norms of
        the orthogonalized vector and the original vector is smaller than this value.
    check
        If `True`, check if the resulting |VectorArray| is really orthonormal.
    check_tol
        Tolerance for the check.
    copy
        If `True`, create a copy of `V` and `W` instead of modifying `V` and `W` in-place.


    Returns
    -------
    The biorthonormalized |VectorArrays|.
    """
    assert V.space == W.space
    assert len(V) == len(W)

    logger = getLogger('pymor.algorithms.gram_schmidt.gram_schmidt_biorth')

    if copy:
        V = V.copy()
        W = W.copy()

    # main loop
    for i in range(len(V)):
        # calculate norm of V[i]
        if product is None:
            initial_norm = V[i].l2_norm()[0]
        else:
            initial_norm = np.sqrt(product.pairwise_apply2(V[i], V[i]))[0]

        # project V[i]
        if i == 0:
            V[0].scal(1 / initial_norm)
        else:
            first_iteration = True
            norm = initial_norm
            # If reiterate is True, reiterate as long as the norm of the vector changes
            # strongly during projection.
            while first_iteration or reiterate and norm / old_norm < reiteration_threshold:
                if first_iteration:
                    first_iteration = False
                else:
                    logger.info('Projecting vector V[{}] again'.format(i))

                for j in range(i):
                    # project by (I - V[j] * W[j]^T * E)
                    if product is None:
                        p = W[j].pairwise_dot(V[i])[0]
                    else:
                        p = product.pairwise_apply2(W[j], V[i])[0]
                    V[i].axpy(-p, V[j])

                # calculate new norm
                if product is None:
                    old_norm, norm = norm, V[i].l2_norm()[0]
                else:
                    old_norm, norm = norm, np.sqrt(
                        product.pairwise_apply2(V[i], V[i])[0])

            if norm > 0:
                V[i].scal(1 / norm)

        # calculate norm of W[i]
        if product is None:
            initial_norm = W[i].l2_norm()[0]
        else:
            initial_norm = np.sqrt(product.pairwise_apply2(W[i], W[i]))[0]

        # project W[i]
        if i == 0:
            W[0].scal(1 / initial_norm)
        else:
            first_iteration = True
            norm = initial_norm
            # If reiterate is True, reiterate as long as the norm of the vector changes
            # strongly during projection.
            while first_iteration or reiterate and norm / old_norm < reiteration_threshold:
                if first_iteration:
                    first_iteration = False
                else:
                    logger.info('Projecting vector W[{}] again'.format(i))

                for j in range(i):
                    # project by (I - W[j] * V[j]^T * E)
                    if product is None:
                        p = V[j].pairwise_dot(W[i])[0]
                    else:
                        p = product.pairwise_apply2(V[j], W[i])[0]
                    W[i].axpy(-p, W[j])

                # calculate new norm
                if product is None:
                    old_norm, norm = norm, W[i].l2_norm()[0]
                else:
                    old_norm, norm = norm, np.sqrt(
                        product.pairwise_apply2(W[i], W[i])[0])

            if norm > 0:
                W[i].scal(1 / norm)

        # rescale V[i]
        if product is None:
            p = W[i].pairwise_dot(V[i])[0]
        else:
            p = product.pairwise_apply2(W[i], V[i])[0]
        V[i].scal(1 / p)

    if check:
        if product:
            error_matrix = product.apply2(W, V)
        else:
            error_matrix = W.dot(V)
        error_matrix -= np.eye(len(V))
        if error_matrix.size > 0:
            err = np.max(np.abs(error_matrix))
            if err >= check_tol:
                raise AccuracyError(
                    'Result not biorthogonal (max err={})'.format(err))

    return V, W
Beispiel #57
0
        def __init__(self, grid, U, bounding_box, codim, title, legend, separate_colorbars, rescale_colorbars, backend):

            assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
                or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
                    and all(len(u) == len(U[0]) for u in U))
            U = (U.data.astype(np.float64, copy=False),) if hasattr(U, 'data') else \
                tuple(u.data.astype(np.float64, copy=False) for u in U)
            if isinstance(legend, str):
                legend = (legend,)
            assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
            if backend == 'gl':
                widget = GLPatchWidget
                cbar_widget = ColorBarWidget
            else:
                widget = MatplotlibPatchWidget
                cbar_widget = None
                if not separate_colorbars and len(U) > 1:
                    l = getLogger('pymor.gui.qt.visualize_patch')
                    l.warn('separate_colorbars=False not supported for matplotlib backend')
                separate_colorbars = True

            class PlotWidget(QWidget):
                def __init__(self):
                    super().__init__()
                    if separate_colorbars:
                        if rescale_colorbars:
                            self.vmins = tuple(np.min(u[0]) for u in U)
                            self.vmaxs = tuple(np.max(u[0]) for u in U)
                        else:
                            self.vmins = tuple(np.min(u) for u in U)
                            self.vmaxs = tuple(np.max(u) for u in U)
                    else:
                        if rescale_colorbars:
                            self.vmins = (min(np.min(u[0]) for u in U),) * len(U)
                            self.vmaxs = (max(np.max(u[0]) for u in U),) * len(U)
                        else:
                            self.vmins = (min(np.min(u) for u in U),) * len(U)
                            self.vmaxs = (max(np.max(u) for u in U),) * len(U)

                    layout = QHBoxLayout()
                    plot_layout = QGridLayout()
                    self.colorbarwidgets = [cbar_widget(self, vmin=vmin, vmax=vmax) if cbar_widget else None
                                            for vmin, vmax in zip(self.vmins, self.vmaxs)]
                    plots = [widget(self, grid, vmin=vmin, vmax=vmax, bounding_box=bounding_box, codim=codim)
                             for vmin, vmax in zip(self.vmins, self.vmaxs)]
                    if legend:
                        for i, plot, colorbar, l in zip(range(len(plots)), plots, self.colorbarwidgets, legend):
                            subplot_layout = QVBoxLayout()
                            caption = QLabel(l)
                            caption.setAlignment(Qt.AlignHCenter)
                            subplot_layout.addWidget(caption)
                            if not separate_colorbars or backend == 'matplotlib':
                                subplot_layout.addWidget(plot)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                if colorbar:
                                    hlayout.addWidget(colorbar)
                                subplot_layout.addLayout(hlayout)
                            plot_layout.addLayout(subplot_layout, int(i/columns), (i % columns), 1, 1)
                    else:
                        for i, plot, colorbar in zip(range(len(plots)), plots, self.colorbarwidgets):
                            if not separate_colorbars or backend == 'matplotlib':
                                plot_layout.addWidget(plot, int(i/columns), (i % columns), 1, 1)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                if colorbar:
                                    hlayout.addWidget(colorbar)
                                plot_layout.addLayout(hlayout, int(i/columns), (i % columns), 1, 1)
                    layout.addLayout(plot_layout)
                    if not separate_colorbars:
                        layout.addWidget(self.colorbarwidgets[0])
                        for w in self.colorbarwidgets[1:]:
                            w.setVisible(False)
                    self.setLayout(layout)
                    self.plots = plots

                def set(self, U, ind):
                    if rescale_colorbars:
                        if separate_colorbars:
                            self.vmins = tuple(np.min(u[ind]) for u in U)
                            self.vmaxs = tuple(np.max(u[ind]) for u in U)
                        else:
                            self.vmins = (min(np.min(u[ind]) for u in U),) * len(U)
                            self.vmaxs = (max(np.max(u[ind]) for u in U),) * len(U)

                    for u, plot, colorbar, vmin, vmax in zip(U, self.plots, self.colorbarwidgets, self.vmins,
                                                              self.vmaxs):
                        plot.set(u[ind], vmin=vmin, vmax=vmax)
                        if colorbar:
                            colorbar.set(vmin=vmin, vmax=vmax)

            super().__init__(U, PlotWidget(), title=title, length=len(U[0]))
            self.grid = grid
            self.codim = codim
Beispiel #58
0
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)

from __future__ import absolute_import, division, print_function

import numpy as np

from pymor.core.logger import getLogger
logger = getLogger(__name__)


def inverse_relation(R, size_rhs=None, with_indices=False):
    """Computes the inverse relation of a relation.

    If `r` is a relation, then the inverse relation `ri` is defined by

        x ri y  <=>  y r x

    Parameters
    ----------
    R
        2D |NumPy array| of integers representing a relation r on the
        natural numbers via ::

            x r y <=> (x < R.size[0] and y in R[x]).

        Rows of `R` which are to short are padded with -1.
    size_rhs
        Can be provided for speedup. Has to be greater than `R.max()`.
    with_indices
Beispiel #59
0
def greedy(discretization, reductor, samples, initial_basis=None, use_estimator=True, error_norm=None,
           extension_algorithm=gram_schmidt_basis_extension, atol=None, rtol=None, max_extensions=None,
           pool=None):
    """Greedy basis generation algorithm.

    This algorithm generates a reduced basis by iteratively adding the
    worst approximated solution snapshot for a given training set to the
    reduced basis. The approximation error is computed either by directly
    comparing the reduced solution to the detailed solution or by using
    an error estimator (`use_estimator == True`). The reduction and basis
    extension steps are performed by calling the methods provided by the
    `reductor` and `extension_algorithm` arguments.

    Parameters
    ----------
    discretization
        The |Discretization| to reduce.
    reductor
        Reductor for reducing the given |Discretization|. This has to be a
        function of the form `reductor(discretization, basis, extends=None)`.
        If your reductor takes more arguments, use, e.g., :func:`functools.partial`.
        The method has to return a tuple
        `(reduced_discretization, reconstructor, reduction_data)`.
        In case the last basis extension was `hierarchic` (see
        `extension_algorithm`), the extends argument is set to
        `(last_reduced_discretization, last_reconstructor, last_reduction_data)`
        which can be used by the reductor to speed up the reduction
        process. For an example see
        :func:`~pymor.reductors.coercive.reduce_coercive`.
    samples
        The set of |Parameter| samples on which to perform the greedy search.
    initial_basis
        The initial reduced basis with which the algorithm starts. If `None`,
        an empty basis is used as initial basis.
    use_estimator
        If `True`, use `reduced_discretization.estimate()` to estimate the
        errors on the sample set. Otherwise a detailed simulation is
        performed to calculate the error.
    error_norm
        If `use_estimator == False`, use this function to calculate the
        norm of the error. If `None`, the Euclidean norm is used.
    extension_algorithm
        The extension algorithm to be used to extend the current reduced
        basis with the maximum error snapshot. This has to be a function
        of the form `extension_algorithm(old_basis, new_vector)`, which
        returns a tuple `(new_basis, extension_data)`, where
        `extension_data` is a dict at least containing the key
        `hierarchic`. `hierarchic` should be set to `True` if `new_basis`
        contains `old_basis` as its first vectors.
    atol
        If not `None`, stop the algorithm if the maximum (estimated) error
        on the sample set drops below this value.
    rtol
        If not `None`, stop the algorithm if the maximum (estimated)
        relative error on the sample set drops below this value.
    max_extensions
        If not `None`, stop the algorithm after `max_extensions` extension
        steps.
    pool
        If not `None`, the |WorkerPool| to use for parallelization.

    Returns
    -------
    Dict with the following fields:

        :basis:                  The reduced basis.
        :reduced_discretization: The reduced |Discretization| obtained for the
                                 computed basis.
        :reconstructor:          Reconstructor for `reduced_discretization`.
        :max_errs:               Sequence of maximum errors during the greedy run.
        :max_err_mus:            The parameters corresponding to `max_errs`.
    """

    logger = getLogger('pymor.algorithms.greedy.greedy')
    samples = list(samples)
    sample_count = len(samples)
    logger.info('Started greedy search on {} samples'.format(sample_count))
    if pool is None or pool is dummy_pool:
        pool = dummy_pool
    else:
        logger.info('Using pool of {} workers for parallel greedy search'.format(len(pool)))

    with RemoteObjectManager() as rom:
        # Push everything we need during the greedy search to the workers.
        # Distribute the training set evenly among the workes.
        if not use_estimator:
            rom.manage(pool.push(discretization))
            if error_norm:
                rom.manage(pool.push(error_norm))
        samples = rom.manage(pool.scatter_list(samples))

        basis = initial_basis

        tic = time.time()
        extensions = 0
        max_errs = []
        max_err_mus = []
        hierarchic = False

        rd, rc, reduction_data = None, None, None
        while True:
            with logger.block('Reducing ...'):
                rd, rc, reduction_data = reductor(discretization, basis) if not hierarchic \
                    else reductor(discretization, basis, extends=(rd, rc, reduction_data))

            if sample_count == 0:
                logger.info('There is nothing else to do for empty samples.')
                return {'basis': basis, 'reduced_discretization': rd, 'reconstructor': rc,
                        'max_errs': [], 'max_err_mus': [], 'extensions': 0,
                        'time': time.time() - tic, 'reduction_data': reduction_data}

            with logger.block('Estimating errors ...'):
                if use_estimator:
                    errors, mus = list(zip(*pool.apply(_estimate, rd=rd, d=None, rc=None, samples=samples, error_norm=None)))
                else:
                    # FIXME: Always communicating rc may become a bottleneck in some use cases.
                    #        Add special treatment for GenericRBReconstructor?
                    errors, mus = list(zip(*pool.apply(_estimate, rd=rd, d=discretization, rc=rc,
                                                  samples=samples, error_norm=error_norm)))
            max_err_ind = np.argmax(errors)
            max_err, max_err_mu = errors[max_err_ind], mus[max_err_ind]

            max_errs.append(max_err)
            max_err_mus.append(max_err_mu)
            logger.info('Maximum error after {} extensions: {} (mu = {})'.format(extensions, max_err, max_err_mu))

            if atol is not None and max_err <= atol:
                logger.info('Absolute error tolerance ({}) reached! Stoping extension loop.'.format(atol))
                break

            if rtol is not None and max_err / max_errs[0] <= rtol:
                logger.info('Relative error tolerance ({}) reached! Stoping extension loop.'.format(rtol))
                break

            with logger.block('Computing solution snapshot for mu = {} ...'.format(max_err_mu)):
                U = discretization.solve(max_err_mu)
            with logger.block('Extending basis with solution snapshot ...'):
                try:
                    basis, extension_data = extension_algorithm(basis, U)
                except ExtensionError:
                    logger.info('Extension failed. Stopping now.')
                    break
            extensions += 1
            if 'hierarchic' not in extension_data:
                logger.warn('Extension algorithm does not report if extension was hierarchic. Assuming it was\'nt ..')
                hierarchic = False
            else:
                hierarchic = extension_data['hierarchic']

            logger.info('')

            if max_extensions is not None and extensions >= max_extensions:
                logger.info('Maximum number of {} extensions reached.'.format(max_extensions))
                with logger.block('Reducing once more ...'):
                    rd, rc, reduction_data = reductor(discretization, basis) if not hierarchic \
                        else reductor(discretization, basis, extends=(rd, rc, reduction_data))
                break

        tictoc = time.time() - tic
        logger.info('Greedy search took {} seconds'.format(tictoc))
        return {'basis': basis, 'reduced_discretization': rd, 'reconstructor': rc,
                'max_errs': max_errs, 'max_err_mus': max_err_mus, 'extensions': extensions,
                'time': tictoc, 'reduction_data': reduction_data}
def init_grid_and_problem(config, mu_bar=1, mu_hat=1, mpi_comm=MPI.COMM_WORLD):
    # assert mpi_comm.Get_size() < MPI.COMM_WORLD.Get_size() or mpi_comm.Get_size() == 1
    logger = getLogger('OS2015_academic_problem.OS2015_academic_problem')
    logger.info('initializing grid and problem ... ')

    lower_left = [-1, -1]
    upper_right = [1, 1]
    inner_boundary_id = 18446744073709551573
    grid = make_grid((lower_left, upper_right),
                     config['num_subdomains'],
                     config['half_num_fine_elements_per_subdomain_and_dim'],
                     inner_boundary_id,
                     mpi_comm=mpi_comm)
    grid_info(logger.error, grid, mpi_comm)
    all_dirichlet_boundary_info = make_boundary_info(
        grid, {'type': 'xt.grid.boundaryinfo.alldirichlet'})

    cos = '(cos(0.5*pi*x[0])*cos(0.5*pi*x[1]))'
    diffusion_functions = [
        make_expression_function_1x1(grid,
                                     'x',
                                     '1+{}'.format(cos),
                                     order=2,
                                     name='lambda_0'),
        make_expression_function_1x1(grid,
                                     'x',
                                     '-1*{}'.format(cos),
                                     order=2,
                                     name='lambda_1')
    ]
    # diffusion_functions = [make_expression_function_1x1(
    #     grid, 'x', '1', order=2, name='lambda_0'),
    #     make_expression_function_1x1(grid, 'x', 'x[0]', order=2, name='lambda_1')]
    parameter_type = {'diffusion': (1, )}
    coefficients = [
        ExpressionParameterFunctional('1.', parameter_type),
        ExpressionParameterFunctional('diffusion', parameter_type)
    ]

    kappa = make_constant_function_2x2(grid, [[1., 0.], [0., 1.]],
                                       name='kappa')
    f = make_expression_function_1x1(grid,
                                     'x',
                                     '0.5*pi*pi*{}'.format(cos),
                                     order=2,
                                     name='f')
    mbc = '1+(1-{})*{}'.format(mu_bar, cos)
    lambda_bar = make_expression_function_1x1(grid,
                                              'x',
                                              mbc,
                                              order=2,
                                              name='lambda_bar')
    lambda_hat = make_expression_function_1x1(grid,
                                              'x',
                                              mbc,
                                              order=2,
                                              name='lambda_hat')

    return {
        'grid': grid,
        'mpi_comm': mpi_comm,
        'boundary_info': all_dirichlet_boundary_info,
        'inner_boundary_id': inner_boundary_id,
        'lambda': {
            'functions': diffusion_functions,
            'coefficients': coefficients
        },
        'lambda_bar': lambda_bar,
        'lambda_hat': lambda_hat,
        'kappa': kappa,
        'f': f,
        'parameter_type': parameter_type,
        'mu_bar': (mu_bar, ),
        'mu_hat': (mu_hat, ),
        'mu_min': (min(0.1, mu_bar, mu_hat), ),
        'mu_max': (max(1, mu_bar, mu_hat), ),
        'parameter_range': (min(0.1, mu_bar, mu_hat), max(1, mu_bar, mu_hat))
    }