Exemplo n.º 1
0
def setup_multi_level_advection_diffusion_benchmark(
        nvars, corr_len, max_eval_concurrency=1):
    r"""
    Compute functionals of the transient advection-diffusion (with 1 configure variables which controls the two spatial mesh resolutions and the timestep). An integer increase in the configure variable value will raise the 3 numerical discretiation paramaters by the same integer.

    See :func:`pyapprox_dev.advection_diffusion_wrappers.setup_advection_diffusion_benchmark` for details on function arguments and output.
    """
    from scipy import stats
    from pyapprox.models.wrappers import TimerModelWrapper, PoolModel, \
        WorkTrackingModel
    from pyapprox.models.wrappers import PoolModel
    from pyapprox.variables import IndependentMultivariateRandomVariable
    from pyapprox.benchmarks.benchmarks import Benchmark
    from pyapprox.models.wrappers import MultiLevelWrapper
    univariate_variables = [stats.uniform(-np.sqrt(3), 2*np.sqrt(3))]*nvars
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    final_time, degree = 1.0, 1
    options = {'corr_len': corr_len}
    base_model = AdvectionDiffusionModel(
        final_time, degree, qoi_functional_misc,
        second_order_timestepping=False, options=options)
    multilevel_model = MultiLevelWrapper(
        base_model, base_model.num_config_vars)
    # add wrapper to allow execution times to be captured
    timer_model = TimerModelWrapper(multilevel_model, base_model)
    pool_model = PoolModel(
        timer_model, max_eval_concurrency, base_model=base_model)
    model = WorkTrackingModel(
        pool_model, base_model, multilevel_model.num_config_vars)
    attributes = {'fun': model, 'variable': variable,
                  'multi_level_model': multilevel_model}
    return Benchmark(attributes)
def setup_model(num_vars, max_eval_concurrency):
    corr_len = 1 / 10
    second_order_timestepping = False

    qoi_functional = qoi_functional_misc
    degree = 1
    base_model = AdvectionDiffusionModel(
        num_vars,
        corr_len,
        1.0e-0,
        degree,
        qoi_functional,
        add_work_to_qoi=False,
        boundary_condition_type=None,
        second_order_timestepping=second_order_timestepping)
    timer_model = TimerModelWrapper(base_model, base_model)
    model = PoolModel(timer_model, max_eval_concurrency, base_model=base_model)
    model = WorkTrackingModel(model, model.base_model)
    return model
def setup_advection_diffusion_benchmark(nvars,
                                        corr_len,
                                        max_eval_concurrency=1):
    r"""
    Compute functionals of the following model of transient advection-diffusion

    .. math::

       \frac{\partial u}{\partial t}(x,t,\rv) + \nabla u(x,t,\rv)-\nabla\cdot\left[k(x,\rv) \nabla u(x,t,\rv)\right] &=g(x,t) \qquad (x,t,\rv)\in D\times [0,1]\times\rvdom\\
       \mathcal{B}(x,t,\rv)&=0 \qquad\qquad (x,t,\rv)\in \partial D\times[0,1]\times\rvdom\\
       u(x,t,\rv)&=u_0(x,\rv) \qquad (x,t,\rv)\in D\times\{t=0\}\times\rvdom

    Following [NTWSIAMNA2008]_, [JEGGIJNME2020]_ we set 

    .. math:: g(x,t)=(1.5+\cos(2\pi t))\cos(x_1),

    the initial condition as :math:`u(x,z)=0`, :math:`B(x,t,z)` to be zero dirichlet boundary conditions.

    and we model the diffusivity :math:`k` as a random field represented by the
    Karhunen-Loeve (like) expansion (KLE)

    .. math::

       \log(k(x,\rv)-0.5)=1+\rv_1\left(\frac{\sqrt{\pi L}}{2}\right)^{1/2}+\sum_{k=2}^d \lambda_k\phi(x)\rv_k,

    with

    .. math::

       \lambda_k=\left(\sqrt{\pi L}\right)^{1/2}\exp\left(-\frac{(\lfloor\frac{k}{2}\rfloor\pi L)^2}{4}\right) k>1,  \qquad\qquad  \phi(x)=
       \begin{cases}
       \sin\left(\frac{(\lfloor\frac{k}{2}\rfloor\pi x_1)}{L_p}\right) & k \text{ even}\,,\\
       \cos\left(\frac{(\lfloor\frac{k}{2}\rfloor\pi x_1)}{L_p}\right) & k \text{ odd}\,.
       \end{cases}

    where :math:`L_p=\max(1,2L_c)`, :math:`L=\frac{L_c}{L_p}`.

    Parameters
    ----------
    nvars : integer
        The number of variables of the KLE

    corr_len : float
        The correlation length :math:`L_c` of the covariance kernel

    max_eval_concurrency : integer
        The maximum number of simulations that can be run in parallel. Should be         no more than the maximum number of cores on the computer being used

    Returns
    --------
    benchmark : pya.Benchmark
       Object containing the benchmark attributes
    
    """

    from scipy import stats
    from pyapprox.models.wrappers import TimerModelWrapper, PoolModel, \
        WorkTrackingModel
    from pyapprox.models.wrappers import PoolModel
    from pyapprox.variables import IndependentMultivariateRandomVariable
    from pyapprox.benchmarks.benchmarks import Benchmark
    univariate_variables = [stats.uniform(-np.sqrt(3), 2 * np.sqrt(3))] * nvars
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    final_time, degree = 1.0, 1
    options = {'corr_len': corr_len}
    base_model = AdvectionDiffusionModel(final_time,
                                         degree,
                                         qoi_functional_misc,
                                         second_order_timestepping=False,
                                         options=options)
    # add wrapper to allow execution times to be captured
    timer_model = TimerModelWrapper(base_model, base_model)
    pool_model = PoolModel(timer_model,
                           max_eval_concurrency,
                           base_model=base_model)
    # add wrapper that tracks execution times.
    model = WorkTrackingModel(pool_model, base_model)
    attributes = {'fun': model, 'variable': variable}
    return Benchmark(attributes)
Exemplo n.º 4
0
print(worktracking_fun_ensemble.work_tracker(query_fun_ids))

#%%
#As expected there are 5 samples tracked for each model and the median evaluation time of the second function is about twice as large as for the first function.

#%%
#Evaluating functions at multiple samples in parallel
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#For expensive models it is often useful to be able to evaluate each model concurrently. This can be achieved using :class:`pyapprox.models.wrappers.PoolModel`. Note this function is not intended for use with distributed memory systems, but rather is intended to use all the threads of a personal computer or compute node. See :class:`pyapprox.models.async_model.AsynchronousEvaluationModel` if you are interested in running multiple simulations in parallel on a distributed memory system.
#
#PoolModel cannot be used to wrap WorkTrackingModel. However it can still
#be used with WorkTrackingModel using the sequence of wrappers below.

max_eval_concurrency = 1  # set higher
# clear WorkTracker counters
pool_model = PoolModel(
    timer_fun_ensemble, max_eval_concurrency, assert_omp=False)
worktracking_fun_ensemble.work_tracker.costs = dict()
worktracking_fun_ensemble = WorkTrackingModel(
    pool_model, num_config_vars=1)

# create more samples to notice improvement in wall time
nsamples = 10
samples = pya.generate_independent_random_samples(variable, nsamples)
fun_ids = np.ones(nsamples)
fun_ids[:nsamples//2] = 0
ensemble_samples = np.vstack([samples, fun_ids])

t0 = time.time()
values = worktracking_fun_ensemble(ensemble_samples)
t1 = time.time()
print(f'With {max_eval_concurrency} threads that took {t1-t0} seconds')
def setup_advection_diffusion_source_inversion_benchmark(
        measurement_times=np.array([0.05, 0.15]),
        source_strength=0.5,
        source_width=0.1,
        true_sample=np.array([[0.25, 0.75, 4, 4, 4]]).T,
        noise_stdev=0.4,
        max_eval_concurrency=1):
    r"""
    Compute functionals of the following model of transient diffusion of 
    a contaminant

    .. math::

       \frac{\partial u}{\partial t}(x,t,\rv) + \nabla u(x,t,\rv)-\nabla\cdot\left[k(x,\rv) \nabla u(x,t,\rv)\right] &=g(x,t) \qquad (x,t,\rv)\in D\times [0,1]\times\rvdom\\
       \mathcal{B}(x,t,\rv)&=0 \qquad\qquad (x,t,\rv)\in \partial D\times[0,1]\times\rvdom\\
       u(x,t,\rv)&=u_0(x,\rv) \qquad (x,t,\rv)\in D\times\{t=0\}\times\rvdom

    Following [MNRJCP2006]_, [LMSISC2014]_ we set 

    .. math:: g(x,t)=\frac{s}{2\pi h^2}\exp\left(-\frac{\lvert x-x_\mathrm{src}\rvert^2}{2h^2}\right)

    the initial condition as :math:`u(x,z)=0`, :math:`B(x,t,z)` to be zero Neumann boundary conditions, i.e.

    .. math:: \nabla u\cdot n = 0 \quad\mathrm{on} \quad\partial D

    and we model the diffusivity :math:`k=1` as a constant.

    The quantities of interest are point observations :math:`u(x_l)` 
    taken at :math:`P` points in time :math:`\{t_p\}_{p=1}^P` at :math:`L` 
    locations :math:`\{x_l\}_{l=1}^L`. The final time :math:`T` is the last 
    observation time.

    These functionals can be used to define the posterior distribution 

    .. math::  \pi_{\text{post}}(\rv)=\frac{\pi(\V{y}|\rv)\pi(\rv)}{\int_{\rvdom} \pi(\V{y}|\rv)\pi(\rv)d\rv}

    where the prior is the tensor product of independent and identically 
    distributed uniform variables on :math:`[0,1]` i.e. 
    :math:`\pi(\rv)=1`, and the likelihood is given by

    .. math:: \pi(\V{y}|\rv)=\frac{1}{(2\pi)^{d/2}\sigma}\exp\left(-\frac{1}{2}\frac{(y-f(\rv))^T(y-f(\rv))}{\sigma^2}\right)

    and :math:`y` are noisy observations of the solution `u` at the 9 
    points of a uniform :math:`3\times 3` grid covering the physical domain 
    :math:`D` at successive times :math:`\{t_p\}_{p=1}^P`. Here the noise is indepenent and Normally distrbuted with mean 
    zero and variance :math:`\sigma^2`.

    Parameters
    ----------
    measurement_times : np.ndarray (P)
        The times :math:`\{t_p\}_{p=1}^P` at which measurements of the 
        contaminant concentration are taken

    source_strength : float
        The source strength :math:`s`

    source_width : float
        The source width :math:`h`

    true_sample : np.ndarray (2)
        The true location of the source used to generate the observations
        used in the likelihood function

    noise_stdev : float
        The standard deviation :math:`sigma` of the observational noise

    max_eval_concurrency : integer
        The maximum number of simulations that can be run in parallel. Should 
        be no more than the maximum number of cores on the computer being used

    Returns
    -------
    benchmark : pya.Benchmark
       Object containing the benchmark attributes documented below

    fun : callable

        The quantity of interest :math:`f(w)` with signature

        ``fun(w) -> np.ndarray``

        where ``w`` is a 2D np.ndarray with shape (nvars+3,nsamples) and the
        output is a 2D np.ndarray with shape (nsamples,1). The first ``nvars`` 
        rows of ``w`` are realizations of the random variables. The last 3 rows
        are configuration variables specifying the numerical discretization of 
        the PDE model. Specifically the first and second configuration variables
        specify the levels :math:`l_{x_1}` and :math:`l_{x_2}` which dictate
        the resolution of the FEM mesh in the directions :math:`{x_1}` and 
        :math:`{x_2}` respectively. The number of cells in the :math:`{x_i}` 
        direction is given by :math:`2^{l_{x_i}+2}`. The third configuration 
        variable specifies the level :math:`l_t` of the temporal discretization.
        The number of timesteps satisfies :math:`2^{l_{t}+2}` so the timestep 
        size is and :math:`T/2^{l_{t}+2}`.

    variable : pya.IndependentMultivariateRandomVariable
        Object containing information of the joint density of the inputs z
        which is the tensor product of independent and identically distributed 
        uniform variables on :math:`[0,1]`.

    Examples
    --------
    >>> from pyapprox_dev.benchmarks.benchmarks import setup_benchmark
    >>> benchmark=setup_benchmark('advection-diffusion',nvars=2)
    >>> print(benchmark.keys())
    dict_keys(['fun', 'variable'])

    References
    ----------
    .. [MNRJCP2006] `Youssef M. Marzouk, Habib N. Najm, Larry A. Rahn, Stochastic spectral methods for efficient Bayesian solution of inverse problems, Journal of Computational Physics, Volume 224, Issue 2, 2007, Pages 560-586, <https://doi.org/10.1016/j.jcp.2006.10.010>`_

    .. [LMSISC2014] `Jinglai Li and Youssef M. Marzouk. Adaptive Construction of Surrogates for the Bayesian Solution of Inverse Problems, SIAM Journal on Scientific Computing 2014 36:3, A1163-A1186 <https://doi.org/10.1137/130938189>`_

    Notes
    -----
    The example from [MNRJCP2006]_ can be obtained by setting `s=0.5`, `h=0.1`,
    `measurement_times=np.array([0.05,0.15])` and `noise_stdev=0.1`

    The example from [LMSISC2014]_ can be obtained by setting `s=2`, `h=0.05`,
    `measurement_times=np.array([0.1,0.2])` and `noise_stdev=0.1`
    """

    from scipy import stats
    from pyapprox.models.wrappers import TimerModelWrapper, PoolModel, \
        WorkTrackingModel
    from pyapprox.models.wrappers import PoolModel
    from pyapprox.variables import IndependentMultivariateRandomVariable
    from pyapprox.benchmarks.benchmarks import Benchmark
    univariate_variables = [stats.uniform(0, 1)] * 2
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    final_time, degree = measurement_times.max(), 2
    options = {
        'intermediate_times': measurement_times[:-1],
        'source_strength': source_strength,
        'source_width': source_width
    }
    base_model = AdvectionDiffusionSourceInversionModel(
        final_time,
        degree,
        qoi_functional_source_inversion,
        second_order_timestepping=False,
        options=options)
    # add wrapper to allow execution times to be captured
    timer_model = TimerModelWrapper(base_model, base_model)
    pool_model = PoolModel(timer_model,
                           max_eval_concurrency,
                           base_model=base_model)

    # add wrapper that tracks execution times.
    model = WorkTrackingModel(pool_model, base_model)

    from pyapprox.bayesian_inference.markov_chain_monte_carlo import \
        GaussianLogLike
    if true_sample.shape != (5, 1):
        msg = 'true_sample must be the concatenation of random sample and the '
        msg += 'configure sample'
        raise Exception(msg)
    noiseless_data = model(true_sample)[0, :]
    noise = np.random.normal(0, noise_stdev, (noiseless_data.shape[0]))
    data = noiseless_data + noise
    loglike = GaussianLogLike(model, data, noise_stdev)

    attributes = {'fun': model, 'variable': variable, 'loglike': loglike}
    return Benchmark(attributes)
def setup_advection_diffusion_benchmark(nvars,
                                        corr_len,
                                        max_eval_concurrency=1):
    r"""
    Compute functionals of the following model of transient advection-diffusion (with 3 configure variables which control the two spatial mesh resolutions and the timestep)

    .. math::

       \frac{\partial u}{\partial t}(x,t,\rv) + \nabla u(x,t,\rv)-\nabla\cdot\left[k(x,\rv) \nabla u(x,t,\rv)\right] &=g(x,t) \qquad (x,t,\rv)\in D\times [0,1]\times\rvdom\\
       \mathcal{B}(x,t,\rv)&=0 \qquad\qquad (x,t,\rv)\in \partial D\times[0,1]\times\rvdom\\
       u(x,t,\rv)&=u_0(x,\rv) \qquad (x,t,\rv)\in D\times\{t=0\}\times\rvdom

    Following [NTWSIAMNA2008]_, [JEGGIJNME2020]_ we set 

    .. math:: g(x,t)=(1.5+\cos(2\pi t))\cos(x_1),

    the initial condition as :math:`u(x,z)=0`, :math:`B(x,t,z)` to be zero dirichlet boundary conditions.

    and we model the diffusivity :math:`k` as a random field represented by the
    Karhunen-Loeve (like) expansion (KLE)

    .. math::

       \log(k(x,\rv)-0.5)=1+\rv_1\left(\frac{\sqrt{\pi L}}{2}\right)^{1/2}+\sum_{k=2}^d \lambda_k\phi(x)\rv_k,

    with

    .. math::

       \lambda_k=\left(\sqrt{\pi L}\right)^{1/2}\exp\left(-\frac{(\lfloor\frac{k}{2}\rfloor\pi L)^2}{4}\right) k>1,  \qquad\qquad  \phi(x)=
       \begin{cases}
       \sin\left(\frac{(\lfloor\frac{k}{2}\rfloor\pi x_1)}{L_p}\right) & k \text{ even}\,,\\
       \cos\left(\frac{(\lfloor\frac{k}{2}\rfloor\pi x_1)}{L_p}\right) & k \text{ odd}\,.
       \end{cases}

    where :math:`L_p=\max(1,2L_c)`, :math:`L=\frac{L_c}{L_p}`.

    The quantity of interest :math:`f(z)` is the measurement of the solution at a location :math:`x_k` at the final time :math:`T=1` obtained via the linear functional

    .. math:: f(z)=\int_D u(x,T,z)\frac{1}{2\pi\sigma^2}\exp\left(-\frac{\lVert x-x_k \rVert^2_2}{\sigma^2}\right) dx


    Parameters
    ----------
    nvars : integer
        The number of variables of the KLE

    corr_len : float
        The correlation length :math:`L_c` of the covariance kernel

    max_eval_concurrency : integer
        The maximum number of simulations that can be run in parallel. Should be         no more than the maximum number of cores on the computer being used

    Returns
    -------
    benchmark : pya.Benchmark
       Object containing the benchmark attributes documented below

    fun : callable

        The quantity of interest :math:`f(w)` with signature

        ``fun(w) -> np.ndarray``

        where ``w`` is a 2D np.ndarray with shape (nvars+3,nsamples) and the
        output is a 2D np.ndarray with shape (nsamples,1). The first ``nvars`` 
        rows of ``w`` are realizations of the random variables. The last 3 rows
        are configuration variables specifying the numerical discretization of 
        the PDE model. Specifically the first and second configuration variables
        specify the levels :math:`l_{x_1}` and :math:`l_{x_2}` which dictate
        the resolution of the FEM mesh in the directions :math:`{x_1}` and 
        :math:`{x_2}` respectively. The number of cells in the :math:`{x_i}` 
        direction is given by :math:`2^{l_{x_i}+2}`. The third configuration 
        variable specifies the level :math:`l_t` of the temporal discretization.
        The number of timesteps satisfies :math:`2^{l_{t}+2}` so the timestep 
        size is and :math:`T/2^{l_{t}+2}`.

    variable : pya.IndependentMultivariateRandomVariable
        Object containing information of the joint density of the inputs z
        which is the tensor product of independent and identically distributed 
        uniform variables on :math:`[-\sqrt{3},\sqrt{3}]`.

    Examples
    --------
    >>> from pyapprox_dev.benchmarks.benchmarks import setup_benchmark
    >>> benchmark=setup_benchmark('advection-diffusion',nvars=2)
    >>> print(benchmark.keys())
    dict_keys(['fun', 'variable'])
    """

    from scipy import stats
    from pyapprox.models.wrappers import TimerModelWrapper, PoolModel, \
        WorkTrackingModel
    from pyapprox.models.wrappers import PoolModel
    from pyapprox.variables import IndependentMultivariateRandomVariable
    from pyapprox.benchmarks.benchmarks import Benchmark
    univariate_variables = [stats.uniform(-np.sqrt(3), 2 * np.sqrt(3))] * nvars
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    final_time, degree = 1.0, 1
    options = {'corr_len': corr_len}
    base_model = AdvectionDiffusionModel(
        final_time,
        degree,
        qoi_functional_misc,
        second_order_timestepping=False,
        options=options,
        qoi_functional_grad=qoi_functional_grad_misc)
    # add wrapper to allow execution times to be captured
    timer_model = TimerModelWrapper(base_model, base_model)
    pool_model = PoolModel(timer_model,
                           max_eval_concurrency,
                           base_model=base_model)

    # add wrapper that tracks execution times.
    model = WorkTrackingModel(pool_model, base_model,
                              base_model.num_config_vars)
    attributes = {'fun': model, 'variable': variable}
    return Benchmark(attributes)