Exemple #1
0
def test_regression_model():
    param_true = np.array([1.0, 2.0]).reshape((1, -1))
    from UQpy.run_model.model_execution.PythonModel import PythonModel
    model = PythonModel(model_script='pfn_models.py',
                        model_object_name='model_quadratic',
                        var_names=['theta_0', 'theta_1'])
    h_func = RunModel(model=model)
    h_func.run(samples=param_true)

    # Add noise
    error_covariance = 1.
    data_clean = np.array(h_func.qoi_list[0])
    noise = Normal(loc=0., scale=np.sqrt(error_covariance)).rvs(
        nsamples=4, random_state=1).reshape((4, ))
    data_3 = data_clean + noise

    candidate_model = ComputationalModel(n_parameters=2,
                                         runmodel_object=h_func,
                                         error_covariance=error_covariance)

    optimizer = MinimizeOptimizer(method='nelder-mead')
    ml_estimator = MLE(inference_model=candidate_model,
                       data=data_3,
                       n_optimizations=1,
                       random_state=1,
                       optimizer=optimizer)

    assert ml_estimator.mle[0] == 0.8689097631871134
    assert ml_estimator.mle[1] == 2.0030767805841143
def test_mle_optimizer(setup):
    h_func = setup
    candidate_model = ComputationalModel(n_parameters=2,
                                         runmodel_object=h_func,
                                         error_covariance=np.ones(4))
    ml_estimator = MLE(inference_model=candidate_model,
                       data=data,
                       initial_parameters=[0., 0.],
                       random_state=123)
    assert round(ml_estimator.mle[0], 3) == -0.039
def test_user_loglike_uniform():
    candidate_model = LogLikelihoodModel(
        n_parameters=2, log_likelihood=user_log_likelihood_uniform)
    optimizer = MinimizeOptimizer(method='nelder-mead',
                                  bounds=((-2, 2), (-2, 2)))
    ml_estimator = MLE(inference_model=candidate_model,
                       data=data,
                       n_optimizations=2,
                       optimizer=optimizer,
                       random_state=123)
    assert round(ml_estimator.mle[0], 3) == 0.786
def test_user_loglike(setup):
    h_func = setup
    candidate_model = ComputationalModel(n_parameters=2,
                                         runmodel_object=h_func,
                                         log_likelihood=user_log_likelihood)
    optimizer = MinimizeOptimizer(method='nelder-mead',
                                  bounds=((-2, 2), (-2, 2)))
    ml_estimator = MLE(inference_model=candidate_model,
                       optimizer=optimizer,
                       data=data,
                       n_optimizations=2,
                       random_state=123)
    assert round(ml_estimator.mle[0], 3) == -0.039
Exemple #5
0
def test_simple_probability_model():
    np.random.seed(1)
    mu, sigma = 0, 0.1  # true mean and standard deviation
    data_1 = np.random.normal(mu, sigma, 1000).reshape((-1, 1))
    # set parameters to be learnt as None
    dist = Normal(loc=None, scale=None)
    candidate_model = DistributionModel(distributions=dist, n_parameters=2)

    ml_estimator = MLE(inference_model=candidate_model,
                       data=data_1,
                       n_optimizations=3,
                       random_state=1)

    assert ml_estimator.mle[0] == 0.003881247615960185
    assert ml_estimator.mle[1] == 0.09810041339322118
Exemple #6
0
param_true = np.array([1.0, 2.0]).reshape((1, -1))
print('Shape of true parameter vector: {}'.format(param_true.shape))

model = PythonModel(model_script='local_pfn_models.py', model_object_name='model_quadratic', delete_files=True,
                    var_names=['theta_0', 'theta_1'])
h_func = RunModel(model=model)
h_func.run(samples=param_true)

# Add noise
error_covariance = 1.
data_clean = np.array(h_func.qoi_list[0])
noise = Normal(loc=0., scale=np.sqrt(error_covariance)).rvs(nsamples=50).reshape((50,))
data_3 = data_clean + noise
print('Shape of data: {}'.format(data_3.shape))


#%% md
#
# Then we create an instance of the Model class, using model_type='python', and we perform maximum likelihood estimation
# of the two parameters.

#%%

candidate_model = ComputationalModel(n_parameters=2, runmodel_object=h_func, error_covariance=error_covariance)

optimizer = MinimizeOptimizer(method='nelder-mead')
ml_estimator = MLE(inference_model=candidate_model, data=data_3, n_optimizations=1)
print('fitted parameters: theta_0={0:.3f} (true=1.), and theta_1={1:.3f} (true=2.)'.format(
    ml_estimator.mle[0], ml_estimator.mle[1]))
#%%

names = ['model_linear', 'model_quadratic', 'model_cubic']
estimators = []

for i in range(3):
    model = PythonModel(model_script='pfn_models.py',
                        model_object_name=names[i],
                        var_names=['theta_{}'.format(j) for j in range(i + 1)])
    h_func = RunModel(model=model)
    M = ComputationalModel(runmodel_object=h_func,
                           n_parameters=i + 1,
                           name=names[i],
                           error_covariance=error_covariance)
    estimators.append(MLE(inference_model=M, data=data_1))

#%% md
#
# Apart from the data, candidate models and method (:class:`.BIC`, :class:`.AIC`...),
# :class:`.InformationModelSelection` also takes as inputs lists of
# inputs to the maximum likelihood class. Those inputs should be lists of length
# len(candidate_models).

#%%

from UQpy.utilities.MinimizeOptimizer import MinimizeOptimizer
optimizer = MinimizeOptimizer(method='nelder-mead')
selector = InformationModelSelection(parameter_estimators=estimators,
                                     criterion=BIC(),
                                     n_optimizations=[1] * 3)
candidate_model = DistributionModel(n_parameters=5, distributions=d_guess)
print(candidate_model.list_params)

#%% md
#
# When calling MLEstimation, the function minimize from the scipy.optimize package is used by default. The user can
# define bounds for the optimization, a seed, the algorithm to be used, and set the algorithm to perform several
# optimization iterations, starting at a different random seed every time.

#%%

optimizer = MinimizeOptimizer(bounds=[[-5, 5], [0, 10], [-5, 5], [0, 10],
                                      [1.1, 4]],
                              method="SLSQP")
ml_estimator = MLE(inference_model=candidate_model,
                   data=data_2,
                   optimizer=optimizer)

ml_estimator = MLE(inference_model=candidate_model,
                   data=data_2,
                   optimizer=optimizer,
                   initial_parameters=[1., 1., 1., 1., 4.])

print(
    'ML estimates of the mean={0:.3f} and std. dev={1:.3f} of 1st marginal (true: 0.0, 1.0)'
    .format(ml_estimator.mle[0], ml_estimator.mle[1]))
print(
    'ML estimates of the mean={0:.3f} and std. dev={1:.3f} of 2nd marginal (true: 0.0, 1.0)'
    .format(ml_estimator.mle[2], ml_estimator.mle[3]))
print('ML estimates of the copula parameter={0:.3f} (true: 2.0)'.format(
    ml_estimator.mle[4]))
# Define the models to be compared, for each model one must create an instance of the model class

m0 = DistributionModel(distributions=Gamma(a=None, loc=None, scale=None),
                       n_parameters=3,
                       name='gamma')
m1 = DistributionModel(distributions=Exponential(loc=None, scale=None),
                       n_parameters=2,
                       name='exponential')
m2 = DistributionModel(distributions=ChiSquare(df=None, loc=None, scale=None),
                       n_parameters=3,
                       name='chi-square')

candidate_models = [m0, m1, m2]

mle1 = MLE(inference_model=m0, random_state=0, data=data)
mle2 = MLE(inference_model=m1, random_state=0, data=data)
mle3 = MLE(inference_model=m2, random_state=0, data=data)

#%% md
#
# Perform model selection using different information criteria

#%%

from UQpy.inference import BIC, AIC, AICc

criteria = [BIC(), AIC(), AICc()]
sorted_names = []
criterion_value = []
param = []