Beispiel #1
0
def test_akmcs_expected_feasibility():
    from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression
    from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation

    marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)]
    x = MonteCarloSampling(distributions=marginals,
                           nsamples=20,
                           random_state=1)
    model = PythonModel(model_script='series.py', model_object_name="series")
    rmodel = RunModel(model=model)
    regression_model = LinearRegression()
    correlation_model = ExponentialCorrelation()
    K = Kriging(
        regression_model=regression_model,
        correlation_model=correlation_model,
        optimizations_number=10,
        correlation_model_parameters=[1, 1],
        optimizer=MinimizeOptimizer('l-bfgs-b'),
    )
    # OPTIONS: 'U', 'EFF', 'Weighted-U'
    learning_function = ExpectedFeasibility(eff_a=0,
                                            eff_epsilon=2,
                                            eff_stop=0.001)
    a = AdaptiveKriging(distributions=marginals,
                        runmodel_object=rmodel,
                        surrogate=K,
                        learning_nsamples=10**3,
                        n_add=1,
                        learning_function=learning_function,
                        random_state=2)
    a.run(nsamples=25, samples=x.samples)

    assert a.samples[23, 0] == 1.366058523912817
    assert a.samples[20, 1] == -12.914668932772358
Beispiel #2
0
def test_akmcs_expected_improvement_global_fit():
    from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression
    from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation

    marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)]
    x = MonteCarloSampling(distributions=marginals,
                           nsamples=20,
                           random_state=1)
    model = PythonModel(model_script='series.py', model_object_name="series")
    rmodel = RunModel(model=model)
    regression_model = LinearRegression()
    correlation_model = ExponentialCorrelation()
    K = Kriging(
        regression_model=regression_model,
        correlation_model=correlation_model,
        optimizations_number=10,
        correlation_model_parameters=[1, 1],
        optimizer=MinimizeOptimizer('l-bfgs-b'),
    )
    # OPTIONS: 'U', 'EFF', 'Weighted-U'
    learning_function = ExpectedImprovementGlobalFit()
    a = AdaptiveKriging(distributions=marginals,
                        runmodel_object=rmodel,
                        surrogate=K,
                        learning_nsamples=10**3,
                        n_add=1,
                        learning_function=learning_function,
                        random_state=2)
    a.run(nsamples=25, samples=x.samples)

    assert a.samples[23, 0] == 11.939859785098493
    assert a.samples[20, 1] == -8.429899469300118
Beispiel #3
0
def test_akmcs_u():
    from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression
    from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation

    marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)]
    x = MonteCarloSampling(distributions=marginals,
                           nsamples=20,
                           random_state=1)
    model = PythonModel(model_script='series.py', model_object_name="series")
    rmodel = RunModel(model=model)
    regression_model = LinearRegression()
    correlation_model = ExponentialCorrelation()
    K = Kriging(regression_model=regression_model,
                correlation_model=correlation_model,
                optimizer=MinimizeOptimizer('l-bfgs-b'),
                optimizations_number=10,
                correlation_model_parameters=[1, 1],
                random_state=0)
    # OPTIONS: 'U', 'EFF', 'Weighted-U'
    learning_function = UFunction(u_stop=2)
    a = AdaptiveKriging(distributions=marginals,
                        runmodel_object=rmodel,
                        surrogate=K,
                        learning_nsamples=10**3,
                        n_add=1,
                        learning_function=learning_function,
                        random_state=2)
    a.run(nsamples=25, samples=x.samples)

    assert a.samples[23, 0] == -4.141979058326188
    assert a.samples[20, 1] == -1.6476534435429009
Beispiel #4
0
def test_akmcs_samples_error():
    from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression
    from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation

    marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)]
    x = MonteCarloSampling(distributions=marginals,
                           nsamples=20,
                           random_state=0)
    model = PythonModel(model_script='series.py', model_object_name="series")
    rmodel = RunModel(model=model)
    regression_model = LinearRegression()
    correlation_model = ExponentialCorrelation()
    K = Kriging(regression_model=regression_model,
                correlation_model=correlation_model,
                optimizer=MinimizeOptimizer('l-bfgs-b'),
                optimizations_number=10,
                correlation_model_parameters=[1, 1],
                random_state=1)
    # OPTIONS: 'U', 'EFF', 'Weighted-U'
    learning_function = WeightedUFunction(weighted_u_stop=2)
    with pytest.raises(NotImplementedError):
        a = AdaptiveKriging(distributions=[Normal(loc=0., scale=4.)] * 3,
                            runmodel_object=rmodel,
                            surrogate=K,
                            learning_nsamples=10**3,
                            n_add=1,
                            learning_function=learning_function,
                            random_state=2,
                            samples=x.samples)
Beispiel #5
0
def test_dist_object():
    """Validate dist_object, when dist_object is a distribution object."""
    with pytest.raises(BeartypeCallHintPepParamException):
        MonteCarloSampling(distributions='abc', random_state=np.random.RandomState(123))
Beispiel #6
0
def test_run_random_state():
    """Check if random_state attribute is not an integer, np.random.RandomState object or None, when when 'run' method
    is called."""
    with pytest.raises(BeartypeCallHintPepParamException):
        MonteCarloSampling(distributions=dist1).run(nsamples=5, random_state='abc')
Beispiel #7
0
def test_random_state2():
    """Check if random_state attribute is not an integer, np.random.RandomState object or None, when dist_object is a
    list of multiple distribution class object."""
    with pytest.raises(BeartypeCallHintPepParamException):
        MonteCarloSampling(distributions=[dist1, dist2], random_state='abc')
Beispiel #8
0
def test_nsamples_not_integer():
    """Validate error check, when nsamples is not an integer, while calling 'run' method."""
    with pytest.raises(BeartypeCallHintPepParamException):
        MonteCarloSampling(distributions=dist1, random_state=np.random.RandomState(123)).run(nsamples='abc')
Beispiel #9
0
import numpy as np
import pytest
from beartype.roar import BeartypeCallHintPepParamException

from UQpy.distributions import Normal, MultivariateNormal
from UQpy.sampling import MonteCarloSampling

dist1 = Normal(loc=0., scale=1.)
dist2 = Normal(loc=0., scale=1.)

x = MonteCarloSampling(distributions=dist1, nsamples=5, random_state=np.random.RandomState(123))
x.transform_u01()
y = MonteCarloSampling(distributions=[dist1, dist2])
y.run(nsamples=5, random_state=123)
y.transform_u01()

# Call run method multiple time, to cover lines where samples are append to existing ones
z1 = MonteCarloSampling(distributions=dist1, nsamples=2, random_state=123)
z1.run(nsamples=2)
z2 = MonteCarloSampling(distributions=[dist1, dist2], nsamples=2, random_state=np.random.RandomState(123))
z2.run(nsamples=2)
# Same object as z2, just to cover lines where, random_state is an integer
z3 = MonteCarloSampling(distributions=[dist1, dist2], nsamples=2, random_state=123)

z4 = MonteCarloSampling(distributions=[MultivariateNormal([0, 0])], nsamples=2,
                        random_state=np.random.RandomState(123))
z4.run(nsamples=2)
z4.transform_u01()
dist3 = Normal(loc=0., scale=1.)
del dist3.rvs
z5 = MonteCarloSampling(distributions=[dist3], random_state=np.random.RandomState(123))
from UQpy.sampling import MonteCarloSampling, AdaptiveKriging
from UQpy.run_model.RunModel import RunModel
from UQpy.distributions import Uniform
from local_BraninHoo import function
import time
from UQpy.utilities.MinimizeOptimizer import MinimizeOptimizer

# %% md
#
# Using UQpy :class:`MonteCarloSampling` class to generate samples for two random variables, which are uniformly
# distributed

# %%

marginals = [Uniform(loc=-5, scale=15), Uniform(loc=0, scale=15)]
x = MonteCarloSampling(distributions=marginals, nsamples=20)

# %% md
#
# :class:`.RunModel` class is used to define an object to evaluate the model at sample points.

# %%

model = PythonModel(model_script='local_BraninHoo.py',
                    model_object_name='function')
rmodel = RunModel(model=model)

# %% md
#
# :class:`.Kriging` class defines an object to generate a surrogate model for a given set of data.
Beispiel #11
0
# %%

normal1 = normal2 = Normal()

# %% md
#
# Next, we'll construct a :code:`MonteCarloSampling` object :code:`mc` to generate random samples following those
# distributions. Here, we specify an optional initial number of samples, :code:`nsamples` to be generated at the
# object's construction. For teh purposes of this demonstration, we also supply a random seed :code:`random_state`.
#
# We access the generated samples via the :code:`samples` attribute.

# %%

mc = MonteCarloSampling(distributions=[normal1, normal2],
                        nsamples=5,
                        random_state=RandomState(123))

mc.samples

# %% md
#
# To generate more samples on :code:`mc` after construction, we call :code:`mc.run` and once again specify
# :code:`nsamples`.

# %%

mc.run(nsamples=2, random_state=RandomState(23))

mc.samples
Beispiel #12
0
import shutil

from beartype.roar import BeartypeCallHintPepParamException

from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.run_model import ThirdPartyModel, RunModel
from UQpy.sampling import MonteCarloSampling
from UQpy.run_model.RunModel import RunModel
from UQpy.distributions import Normal
import pytest
import os
import numpy as np

d = Normal(loc=0, scale=1)
x_mcs = MonteCarloSampling(distributions=[d, d, d],
                           nsamples=5,
                           random_state=1234)
x_mcs_new = MonteCarloSampling(distributions=[d, d, d],
                               nsamples=5,
                               random_state=2345)
verbose_parameter = True

# def test_div_zero():
#     with pytest.raises(TypeError):
#         model = PythonModel(model_script='python_model.py', model_object_name='SumRVs', fmt=20,
#                             delete_files=True)
#         runmodel_object = RunModel_New(model=model)
#
#
# def test_fmt_1():
#     with pytest.raises(TypeError):
Beispiel #13
0
# Define the distribution objects.

# %%

dist1 = Uniform(location=15000, scale=10000)
dist2 = Uniform(location=450000, scale=80000)
dist3 = Uniform(location=2.0e8, scale=0.5e8)

# %% md
#
# Draw the samples using MCS.

# %%

x = MonteCarloSampling(distributions=[dist1, dist2, dist3] * 6,
                       samples_number=5,
                       random_state=938475)
samples = np.array(x.samples).round(2)

# %% md
#
# Run the model.

# %%

names_ = [
    'fc1', 'fy1', 'Es1', 'fc2', 'fy2', 'Es2', 'fc3', 'fy3', 'Es3', 'fc4',
    'fy4', 'Es4', 'fc5', 'fy5', 'Es5', 'fc6', 'fy6', 'Es6'
]

opensees_rc6_model = RunModel(samples=samples,
Beispiel #14
0
# %%

m = PythonModel(model_script='local_eigenvalue_model.py',
                model_object_name="RunPythonModel")
model = RunModel(model=m)
# model = RunModel(model_script='local_eigenvalue_model.py')
model.run(samples=y.samples)
r_srom = model.qoi_list

# %% md
#
# :class:`MonteCarloSampling` class is used to generate 1000 samples.

# %%

x_mcs = MonteCarloSampling(distributions=marginals, nsamples=1000)

# %% md
#
# Run the model 'eigenvalue_model.py' for each sample generated through :class:`.MonteCarloSampling` class.

# %%

model.run(samples=x_mcs.samples, append_samples=False)
r_mcs = model.qoi_list

# %% md
#
# Plot the distribution of each eigenvalue, estimated using :class:`.SROM` and :class:`.MonteCarloSampling` weights.

# %%
Beispiel #15
0
def test_dist_object3():
    """Create a MCS object using DistributionND class object. Validate dist_object, when dist_object is not a
    list of distribution object."""
    with pytest.raises(BeartypeCallHintPepParamException):
        MonteCarloSampling(distributions=['abc'], random_state=np.random.RandomState(123))
d2 = Uniform(location=0.02, scale=0.01)
d3 = Uniform(location=0.02, scale=0.01)
d4 = Uniform(location=0.0025, scale=0.0075)
d5 = Uniform(location=0.02, scale=0.06)
d6 = Uniform(location=0.02, scale=0.01)
d7 = Uniform(location=0.02, scale=0.01)
d8 = Uniform(location=0.0025, scale=0.0075)

# %% md
#
# Draw the samples using MCS.

# %%

x = MonteCarloSampling(distributions=[d1, d2, d3, d4, d5, d6, d7, d8],
                       samples_number=12,
                       random_state=349875)

# %% md
#
# Run the model.

# %%

run_ = RunModel(samples=x.samples,
                ntasks=6,
                model_script='dyna_script.py',
                input_template='dyna_input.k',
                var_names=['x0', 'y0', 'z0', 'R0', 'x1', 'y1', 'z1', 'R1'],
                model_dir='dyna_test',
                cluster=True,
Beispiel #17
0
from UQpy.run_model.RunModel import RunModel
from UQpy.distributions import Normal
from local_series import series
import matplotlib.pyplot as plt
import time
from UQpy.utilities.MinimizeOptimizer import MinimizeOptimizer

# %% md
#
# Using UQpy :class:`.MonteCarloSampling` class to generate samples for two random variables, which are normally
# distributed with mean :math:`0` and variance :math:`1`.

# %%

marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)]
x = MonteCarloSampling(distributions=marginals, nsamples=20, random_state=1)

# %% md
#
# RunModel class is used to define an object to evaluate the model at sample points.

# %%

model = PythonModel(model_script='local_series.py', model_object_name='series')
rmodel = RunModel(model=model)

# %% md
#
# :class:`.Kriging` class defines an object to generate a surrogate model for a given set of data.

# %%
Beispiel #18
0
# %% md
#
# Example 1: Three scalar random variables
# ----------------------------------------------------
# In this example, we pass three scalar random variables. Note that this is different from assigning a single variable
# with three components, which will be handled in the following example.
#
# Here we will pass the samples both as an ndarray and as a list. Recall that UQpy converts all samples into an ndarray
# of at least two dimensions internally.

# %%

if pick_model in {'scalar', 'vector', 'all'}:
    d = Normal(loc=0, scale=1)
    x_mcs = MonteCarloSampling(distributions=[d, d, d],
                               nsamples=5,
                               random_state=987979)
    names = ['var1', 'var11', 'var111']

    # UQpy returns samples as an ndarray. Convert them to a list for part 1.2
    x_mcs_list = list(x_mcs.samples)
    print(
        "Monte Carlo samples of three random variables from a standard normal distribution."
    )
    print('Samples stored as an array:')
    print('Data type:', type(x_mcs.samples))
    print('Number of samples:', len(x_mcs.samples))
    print('Dimensions of samples:', np.shape(x_mcs.samples))
    print('Samples')
    print(x_mcs.samples)
    print()
Beispiel #19
0
# %% md
#
# Towards defining the sampling scheme
# The fire load density is assumed to be uniformly distributed between 50 :math:`MJ/m^2` and 450 :math:`MJ/m^2`.
# The yield strength is assumed to be normally distributed, with the parameters
# being: mean = 250 :math:`MPa` and coefficient of variation of :math:`7%`.
#
# Creating samples using MCS.

# %%

d_n = Normal(loc=50, scale=400)
d_u = Uniform(location=2.50e8, scale=1.75e7)
x_mcs = MonteCarloSampling(distributions=[d_n, d_u],
                           samples_number=100,
                           random_state=987979)

# %% md
#
# Running simulations using the previously defined model object and samples

# %%

sample_points = x_mcs.samples
abaqus_sfe_model.run(samples=sample_points)

# %% md
#
# The outputs from the analysis are the values of the performance function.
Beispiel #20
0
plt.ylabel(r'Stiffness ($k$)')
plt.grid(True)
plt.tight_layout()
plt.show()

m = PythonModel(model_script='local_Resonance_pfn.py',
                model_object_name="RunPythonModel")
model = RunModel(model=m)

# %% md
#
# Monte Carlo Simulation

# %%

x_mcs = MonteCarloSampling(distributions=[d1, d2])
x_mcs.run(nsamples=1000000)

model.run(samples=x_mcs.samples)

A = np.asarray(model.qoi_list) < 0
pf = np.shape(np.asarray(
    model.qoi_list)[np.asarray(model.qoi_list) < 0])[0] / 1000000
print(pf)

ntrials = 1
pf_stretch = np.zeros((ntrials, 1))
cov1_stretch = np.zeros((ntrials, 1))
cov2_stretch = np.zeros((ntrials, 1))
m = np.ones(2)
m[0] = 5