Esempio n. 1
0
    def runUQ(self, uqData, simulationData, randomVarsData, demandParams,
              workingDir, runType, localAppDir, remoteAppDir):
        """
        This function configures and runs a UQ simulation using UQpy based on the 
        input UQ configuration, simulation configuration, random variables,
        and requested demand parameters
        
        Input:
        uqData:         JsonObject that UQ options as input into the quoFEM GUI
        simulationData: JsonObject that contains information on the analysis package to run and its
                    configuration as input in the quoFEM GUI
        randomVarsData: JsonObject that specifies the input random variables, their distributions,
                    and associated parameters as input in the quoFEM GUI
        demandParams:   JsonObject that specifies the demand parameters as input in the quoFEM GUI
        workingDir:     Directory in which to run simulations and store temporary results
        runType:        Specifies whether computations are being run locally or on an HPC cluster
        localAppDir:    Directory containing apps for local run
        remoteAppDir:   Directory containing apps for remote run
        """

        # There is still plenty of configuration that can and should be added here. This currently does LHS sampling with Uniform
        # distributions only, though this is easily expanded

        # Copy required python files to template directory
        shutil.copyfile(
            os.path.join(localAppDir,
                         'applications/performUQ/other/runWorkflowDriver.py'),
            os.path.join(workingDir, 'runWorkflowDriver.py'))
        shutil.copyfile(
            os.path.join(localAppDir,
                         'applications/performUQ/other/createTemplate.py'),
            os.path.join(workingDir, 'createTemplate.py'))
        shutil.copyfile(
            os.path.join(localAppDir,
                         'applications/performUQ/other/processUQpyOutput.py'),
            os.path.join(workingDir, 'processUQpyOutput.py'))

        # Parse configuration for UQ
        distributionNames = []
        distributionParams = []
        variableNames = []
        distributionObjects = []
        samples = []
        samplingMethod = ""
        numberOfSamples = 0
        modelScript = 'runWorkflowDriver.py'
        inputTemplate = 'params.template'
        outputObjectName = 'OutputProcessor'
        outputScript = 'processUQpyOutput.py'
        numberOfTasks = 1
        numberOfNodes = 1
        coresPerTask = 1
        clusterRun = False
        resumeRun = False

        # If computations are being executed on HPC, enable UQpy to start computations using srun
        if runType == "runningRemote":
            clusterRun = True

        for val in randomVarsData:
            if val["distribution"] == "Uniform":
                distributionNames.append('Uniform')
                variableNames.append(val["name"])
                distributionParams.append(
                    [val["lowerbound"], val["upperbound"]])
            else:
                raise IOError("ERROR: You'll need to update UQpyRunner.py to run your" +\
                              " specified RV distribution!")

        for val in uqData["Parameters"]:
            if val["name"] == "Sampling Method":
                samplingMethod = val["value"]

            if val["name"] == "Number of Samples":
                numberOfSamples = val["value"]

            if val["name"] == "Number of Concurrent Tasks":
                numberOfTasks = val["value"]

            if val["name"] == "Number of Nodes":
                numberOfNodes = val["value"]

            if val["name"] == "Cores per Task":
                coresPerTask = val["value"]

        # Create distribution objects
        for index, val in enumerate(distributionNames, 0):
            distributionObjects.append(
                Distributions.Uniform(distributionParams[index][0],
                                      distributionParams[index][1]))

        createTemplate(variableNames, inputTemplate)

        # Generate samples
        if samplingMethod == "LHS":
            samples = LHS(dist_object=distributionObjects, lhs_criterion='random',\
                          lhs_iter=None, nsamples=numberOfSamples, var_names=variableNames)
            # samples = LHS(dist_name=distributionNames, dist_params=distributionParams, lhs_criterion='random',\
            #               lhs_iter=None, nsamples=numberOfSamples, var_names=variableNames)
        else:
            raise IOError("ERROR: You'll need to update UQpyRunner.py to run your specified" +\
                          " sampling method!")

        # Change workdir to the template directory
        os.chdir(workingDir)

        # Run model based on input config
        startTime = time.time()
        model = RunModel(samples=samples.samples,
                         model_script=modelScript,
                         input_template=inputTemplate,
                         var_names=variableNames,
                         output_script=outputScript,
                         output_object_name=outputObjectName,
                         verbose=True,
                         ntasks=numberOfTasks,
                         nodes=numberOfNodes,
                         cores_per_task=coresPerTask,
                         cluster=clusterRun,
                         resume=resumeRun)

        runTime = time.time() - startTime
        print("\nTotal time for all experiments: ", runTime)

        with open(os.path.join(workingDir, '..', 'tabularResults.out'),
                  'w') as f:
            f.write("%eval_id\t interface\t")

            for val in variableNames:
                f.write("%s\t" % val)

            for val in demandParams:
                f.write("%s\t" % val["name"])

            f.write("\n")

            for index, experiment in enumerate(model.qoi_list, 0):
                if len(experiment) != 0:
                    for item in experiment:
                        f.write("%s\t custom\t" % (index + 1))
                        for sample in samples.samples[index]:
                            f.write("%s\t" % sample)

                        for result in item:
                            f.write("%s\t" % result)

                            f.write("\n")
            f.close()
Esempio n. 2
0
def Rosenbrock(x, params):
    return np.exp(-(100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2) / params[0])


t = time.time()
x = MCMC(dimension=2,
         pdf_proposal_type='Normal',
         pdf_target_type='joint_pdf',
         pdf_target=Rosenbrock,
         pdf_target_params=[20],
         algorithm='MMH',
         jump=100,
         nsamples=15,
         seed=None)
t_MCMC = time.time() - t
print(t_MCMC)

np.savetxt('UQpy_Samples.txt', x.samples, fmt='%0.5f')

t = time.time()
z = RunModel(cpu=1,
             model_type=None,
             model_script='UQpy_Model.sh',
             input_script='UQpy_Input.sh',
             output_script='UQpy_Output.sh',
             dimension=2)
t_run = time.time() - t
print(t_run)

print('Samples', z.model_eval.samples)
print('Soluations', z.model_eval.QOI)
Esempio n. 3
0
from UQpy.RunModel import RunModel
import glob
import pickle
import os
import math
import time

calling_directory = os.getcwd()
t = time.time()

var_names = ['qtd', 'fy']

abaqus_sfe_model = RunModel(
    model_script='abaqus_subset_sfe_model_script.py',
    input_template='abaqus_input_subset_sfe.py',
    output_script='extract_abaqus_output_subset_sfe.py',
    var_names=['qtd', 'fy'],
    model_dir='Subset_SFE',
    ntasks=24)
print('Example: Created the model object.')

# Specify the target distribution. This is standard normal for use with subset simulation in UQpy.
dist = MVNormal(mean=np.zeros(2), cov=np.eye(2))

# Define the initial samples from the distribution
x = dist.rvs(nsamples=1000, random_state=834765)

# Run Subset Simulation
x_ss = SubsetSimulation(mcmc_class=MMH,
                        runmodel_object=abaqus_sfe_model,
                        samples_init=x,
Esempio n. 4
0
    def form_hl(self):
        n = self.dimension  # number of random variables (dimension)
        # initialization
        max_iter = self.n_iter
        tol = 1e-5
        u = np.zeros([max_iter + 1, n])
        if self.seed is not None:
            u[0, :] = Nataf(dimension=self.dimension,
                            input_samples=self.seed.reshape(1, -1),
                            dist_name=self.dist_name,
                            dist_params=self.dist_params,
                            corr=self.corr).samples
        x = np.zeros_like(u)
        beta = np.zeros(max_iter)
        converge_ = False

        for k in range(max_iter):
            # transform the initial point in the original space:  U to X
            u_x = InvNataf(dimension=self.dimension,
                           input_samples=u[k, :].reshape(1, -1),
                           dist_name=self.dist_name,
                           dist_params=self.dist_params,
                           corr_norm=self.corr)

            x[k, :] = u_x.samples
            jacobian = u_x.jacobian[0]
            # 1. evaluate Limit State Function at point

            g = RunModel(samples=x[k, :].reshape(1, -1),
                         model_script=self.model_script,
                         model_object_name=self.model_object_name,
                         input_template=self.input_template,
                         var_names=self.var_names,
                         output_script=self.output_script,
                         output_object_name=self.output_object_name,
                         ntasks=self.n_tasks,
                         cores_per_task=self.cores_per_task,
                         nodes=self.nodes,
                         resume=self.resume,
                         verbose=self.verbose,
                         model_dir=self.model_dir,
                         cluster=self.cluster)

            # 2. evaluate Limit State Function gradient at point u_k and direction cosines
            dg = gradient(sample=x[k, :].reshape(1, -1),
                          dimension=self.dimension,
                          eps=0.1,
                          model_script=self.model_script,
                          model_object_name=self.model_object_name,
                          input_template=self.input_template,
                          var_names=self.var_names,
                          output_script=self.output_script,
                          output_object_name=self.output_object_name,
                          ntasks=self.n_tasks,
                          cores_per_task=self.cores_per_task,
                          nodes=self.nodes,
                          resume=self.resume,
                          verbose=self.verbose,
                          model_dir=self.model_dir,
                          cluster=self.cluster,
                          order='second')
            try:
                p = np.linalg.solve(jacobian, dg[0, :])
            except:
                print('Bad transformation')
                if self.method == 'FORM':
                    u_star = np.inf
                    x_star = np.inf
                    beta = np.inf
                    pf = np.inf

                    return u_star, x_star, beta, pf, [], k

                elif self.method == 'SORM':
                    u_star = np.inf
                    x_star = np.inf
                    beta = np.inf
                    pf = np.inf
                    pf_srom = np.inf

                    return u_star, x_star, beta, pf, pf_srom, k

            try:
                np.isnan(p)
            except:

                print('Bad transformation')
                if self.method == 'FORM':
                    u_star = np.inf
                    x_star = np.inf
                    beta = np.inf
                    pf = np.inf

                    return u_star, x_star, beta, pf, [], k

                elif self.method == 'SORM':
                    u_star = np.inf
                    x_star = np.inf
                    beta = np.inf
                    pf = np.inf
                    pf_srom = np.inf

                    return u_star, x_star, beta, pf, pf_srom, k

            norm_grad = np.linalg.norm(p)
            alpha = p / norm_grad
            alpha = alpha.squeeze()
            # 3. calculate first order beta
            beta[k +
                 1] = -np.inner(u[k, :].T, alpha) + g.qoi_list[0] / norm_grad
            #-np.inner(u[k, :].T, alpha) + g.qoi_list[0] / norm_grad
            # 4. calculate u_{k+1}
            u[k + 1, :] = -beta[k + 1] * alpha
            # next iteration
            if np.linalg.norm(u[k + 1, :] - u[k, :]) <= tol:
                converge_ = True
                # delete unnecessary data
                u = u[:k + 1, :]
                # compute design point, reliability index and Pf
                u_star = u[-1, :]
                # transform points in the original space
                u_x = InvNataf(dimension=self.dimension,
                               input_samples=u_star.reshape(1, -1),
                               dist_name=self.dist_name,
                               dist_params=self.dist_params,
                               corr_norm=self.corr)
                x_star = u_x.samples
                beta = beta[k]
                pf = stats.norm.cdf(-beta)
                if self.method == 'SORM':
                    k = 3 * (k + 1) + 5
                    der_ = dg[1, :]
                    mixed_der = gradient(
                        sample=x_star.reshape(1, -1),
                        eps=0.1,
                        dimension=self.dimension,
                        model_script=self.model_script,
                        model_object_name=self.model_object_name,
                        input_template=self.input_template,
                        var_names=self.var_names,
                        output_script=self.output_script,
                        output_object_name=self.output_object_name,
                        ntasks=self.n_tasks,
                        cores_per_task=self.cores_per_task,
                        nodes=self.nodes,
                        resume=self.resume,
                        verbose=self.verbose,
                        model_dir=self.model_dir,
                        cluster=self.cluster,
                        order='mixed')

                    hessian = eval_hessian(self.dimension, mixed_der, der_)
                    q = np.eye(self.dimension)
                    q[:, 0] = u_star.T
                    q_, r_ = np.linalg.qr(q)
                    q0 = np.fliplr(q_)
                    a = np.dot(np.dot(q0.T, hessian), q0)
                    if self.dimension > 1:
                        jay = np.eye(self.dimension -
                                     1) + beta * a[:self.dimension -
                                                   1, :self.dimension -
                                                   1] / norm_grad
                    elif self.dimension == 1:
                        jay = np.eye(
                            self.dimension) + beta * a[:self.dimension, :self.
                                                       dimension] / norm_grad
                    correction = 1 / np.sqrt(np.linalg.det(jay))
                    pf_srom = pf * correction

                    return u_star, x_star, beta, pf, pf_srom, k

                elif self.method == 'FORM':
                    k = 3 * (k + 1)
                    return u_star, x_star[0], beta, pf, [], k
            else:
                continue

        if converge_ is False:
            print("{0} did not converge".format(self.method))

            if self.method == 'FORM':
                u_star = np.inf
                x_star = np.inf
                beta = np.inf
                pf = np.inf

                return u_star, x_star, beta, pf, [], k

            elif self.method == 'SORM':
                u_star = np.inf
                x_star = np.inf
                beta = np.inf
                pf = np.inf
                pf_srom = np.inf

                return u_star, x_star, beta, pf, pf_srom, k
Esempio n. 5
0
# prediction_results=K.predict(prediction_sampling.samples.reshape([1000, 1]), return_std=False)

#Solution 2.2
from UQpy.RunModel import RunModel
from UQpy.Distributions import *
from UQpy.SampleMethods import MCS
import numpy as np
import matplotlib.pyplot as plt
from UQpy.Surrogates import *
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter

model_serial_third_party = RunModel(
    model_script='PythonAsThirdParty_model.py',
    input_template='elastic_contact_sphere.py',
    var_names=['k', 'f0'],
    output_script='process_3rd_party_output.py',
    model_object_name='read_output',
    delete_files=True)

distribution_k_case_1 = Lognormal(loc=1e5, scale=2e4)
distribution_f0_case_1 = Uniform(loc=1e-2, scale=9e-2)
joint = JointInd(marginals=[distribution_k_case_1, distribution_f0_case_1])
sampling_1 = LHS(dist_object=joint, nsamples=100, verbose=True)
samples = sampling_1.samples

model_serial_third_party.run(samples=samples)
qoi = model_serial_third_party.qoi_list

maximum_indentation = list()
for result in qoi:
Esempio n. 6
0
def gradient(sample=None,
             dimension=None,
             eps=None,
             model_script=None,
             model_object_name=None,
             input_template=None,
             var_names=None,
             output_script=None,
             output_object_name=None,
             ntasks=None,
             cores_per_task=None,
             nodes=None,
             resume=None,
             verbose=None,
             model_dir=None,
             cluster=None,
             order=None):
    """
         Description: A function to estimate the gradients (1st, 2nd, mixed) of a function using finite differences


         Input:
             :param sample: The sample values at which the gradient of the model will be evaluated. Samples can be
             passed directly as  an array or can be passed through the text file 'UQpy_Samples.txt'.
             If passing samples via text file, set samples = None or do not set the samples input.
             :type sample: ndarray

             :param order: The type of derivatives to calculate (1st order, second order, mixed).
             :type order: str

             :param dimension: Number of random variables.
             :type dimension: int

             :param eps: step for the finite difference.
             :type eps: float

             :param model_script: The filename of the Python script which contains commands to execute the model

             :param model_object_name: The name of the function or class which executes the model

             :param input_template: The name of the template input file which will be used to generate input files for
              each run of the model. Refer documentation for more details.

             :param var_names: A list containing the names of the variables which are present in the template input
              files

             :param output_script: The filename of the Python script which contains the commands to process the output

             :param output_object_name: The name of the function or class which has the output values. If the object
              is a class named cls, the output must be saved as cls.qoi. If it a function, it should return the output
              quantity of interest

             :param ntasks: Number of tasks to be run in parallel. RunModel uses GNU parallel to execute models which
              require an input template

             :param cores_per_task: Number of cores to be used by each task

             :param nodes: On MARCC, each node has 24 cores_per_task. Specify the number of nodes if more than one
              node is required.

             :param resume: This option can be set to True if a parallel execution of a model with input template
              failed to finish running all jobs. GNU parallel will then run only the jobs which failed to execute.

             :param verbose: This option can be set to False if you do not want RunModel to print status messages to
              the screen during execution. It is True by default.

             :param model_dir: The directory  that contains the Python script which contains commands to execute the
             model

             :param cluster: This option defines if we run the code into a cluster

         Output:
             :return du_dj: vector of first-order gradients
             :rtype: ndarray
             :return d2u_dj: vector of second-order gradients
             :rtype: ndarray
             :return d2u_dij: vector of mixed gradients
             :rtype: ndarray
     """

    from UQpy.RunModel import RunModel

    if order is None:
        raise ValueError(
            'Exit code: Provide type of derivatives: first, second or mixed.')

    if dimension is None:
        raise ValueError('Error: Dimension must be defined')

    if eps is None:
        eps = [0.1] * dimension
    elif isinstance(eps, float):
        eps = [eps] * dimension
    elif isinstance(eps, list):
        if len(eps) != 1 and len(eps) != dimension:
            raise ValueError('Exit code: Inconsistent dimensions.')
        if len(eps) == 1:
            eps = [eps[0]] * dimension

    if order == 'first' or order == 'second':
        du_dj = np.zeros(dimension)
        d2u_dj = np.zeros(dimension)
        for i in range(dimension):
            x_i1_j = np.array(sample)
            x_i1_j[0, i] += eps[i]
            x_1i_j = np.array(sample)
            x_1i_j[0, i] -= eps[i]

            g0 = RunModel(samples=x_i1_j,
                          model_script=model_script,
                          model_object_name=model_object_name,
                          input_template=input_template,
                          var_names=var_names,
                          output_script=output_script,
                          output_object_name=output_object_name,
                          ntasks=ntasks,
                          cores_per_task=cores_per_task,
                          nodes=nodes,
                          resume=resume,
                          verbose=verbose,
                          model_dir=model_dir,
                          cluster=cluster)

            g1 = RunModel(samples=x_1i_j,
                          model_script=model_script,
                          model_object_name=model_object_name,
                          input_template=input_template,
                          var_names=var_names,
                          output_script=output_script,
                          output_object_name=output_object_name,
                          ntasks=ntasks,
                          cores_per_task=cores_per_task,
                          nodes=nodes,
                          resume=resume,
                          verbose=verbose,
                          model_dir=model_dir,
                          cluster=cluster)

            du_dj[i] = (g0.qoi_list[0] - g1.qoi_list[0]) / (2 * eps[i])

            if order == 'second':
                g = RunModel(samples=sample,
                             model_script=model_script,
                             model_object_name=model_object_name,
                             input_template=input_template,
                             var_names=var_names,
                             output_script=output_script,
                             output_object_name=output_object_name,
                             ntasks=ntasks,
                             cores_per_task=cores_per_task,
                             nodes=nodes,
                             resume=resume,
                             verbose=verbose,
                             model_dir=model_dir,
                             cluster=cluster)

                d2u_dj[i] = (g0.qoi_list[0] - 2 * g.qoi_list[0] +
                             g1.qoi_list[0]) / (eps[i]**2)

        return np.vstack([du_dj, d2u_dj])

    elif order == 'mixed':
        import itertools
        range_ = list(range(dimension))
        d2u_dij = list()
        for i in itertools.combinations(range_, 2):
            x_i1_j1 = np.array(sample)
            x_i1_1j = np.array(sample)
            x_1i_j1 = np.array(sample)
            x_1i_1j = np.array(sample)

            x_i1_j1[0, i[0]] += eps[i[0]]
            x_i1_j1[0, i[1]] += eps[i[1]]

            x_i1_1j[0, i[0]] += eps[i[0]]
            x_i1_1j[0, i[1]] -= eps[i[1]]

            x_1i_j1[0, i[0]] -= eps[i[0]]
            x_1i_j1[0, i[1]] += eps[i[1]]

            x_1i_1j[0, i[0]] -= eps[i[0]]
            x_1i_1j[0, i[1]] -= eps[i[1]]

            g0 = RunModel(samples=x_i1_j1,
                          model_script=model_script,
                          model_object_name=model_object_name,
                          input_template=input_template,
                          var_names=var_names,
                          output_script=output_script,
                          output_object_name=output_object_name,
                          ntasks=ntasks,
                          cores_per_task=cores_per_task,
                          nodes=nodes,
                          resume=resume,
                          verbose=verbose,
                          model_dir=model_dir,
                          cluster=cluster)

            g1 = RunModel(samples=x_i1_1j,
                          model_script=model_script,
                          model_object_name=model_object_name,
                          input_template=input_template,
                          var_names=var_names,
                          output_script=output_script,
                          output_object_name=output_object_name,
                          ntasks=ntasks,
                          cores_per_task=cores_per_task,
                          nodes=nodes,
                          resume=resume,
                          verbose=verbose,
                          model_dir=model_dir,
                          cluster=cluster)

            g2 = RunModel(samples=x_1i_j1,
                          model_script=model_script,
                          model_object_name=model_object_name,
                          input_template=input_template,
                          var_names=var_names,
                          output_script=output_script,
                          output_object_name=output_object_name,
                          ntasks=ntasks,
                          cores_per_task=cores_per_task,
                          nodes=nodes,
                          resume=resume,
                          verbose=verbose,
                          model_dir=model_dir,
                          cluster=cluster)

            g3 = RunModel(samples=x_1i_1j,
                          model_script=model_script,
                          model_object_name=model_object_name,
                          input_template=input_template,
                          var_names=var_names,
                          output_script=output_script,
                          output_object_name=output_object_name,
                          ntasks=ntasks,
                          cores_per_task=cores_per_task,
                          nodes=nodes,
                          resume=resume,
                          verbose=verbose,
                          model_dir=model_dir,
                          cluster=cluster)

            d2u_dij.append((g0.qoi_list[0] - g1.qoi_list[0] - g2.qoi_list[0] +
                            g3.qoi_list[0]) / (4 * eps[i[0]] * eps[i[1]]))

        return np.array(d2u_dij)
Esempio n. 7
0
    def run_subsim_stretch(self):
        step = 0
        n_keep = int(self.p_cond * self.nsamples_ss)

        # Generate the initial samples - Level 0
        if self.samples_init is None:
            x_init = MCMC(
                dimension=self.dimension,
                pdf_proposal_type=self.pdf_proposal_type,
                pdf_proposal_scale=self.pdf_proposal_scale,
                pdf_target=self.pdf_target,
                log_pdf_target=self.log_pdf_target,
                pdf_target_params=self.pdf_target_params,
                pdf_target_copula=self.pdf_target_copula,
                pdf_target_copula_params=self.pdf_target_copula_params,
                pdf_target_type=self.pdf_target_type,
                algorithm='MMH',
                jump=self.jump,
                nsamples=self.nsamples_ss,
                seed=self.seed,
                nburn=self.nburn,
                verbose=self.verbose)
            self.samples.append(x_init.samples)
        else:
            self.samples.append(self.samples_init)

        g_init = RunModel(samples=self.samples[step],
                          model_script=self.model_script,
                          model_object_name=self.model_object_name,
                          input_template=self.input_template,
                          var_names=self.var_names,
                          output_script=self.output_script,
                          output_object_name=self.output_object_name,
                          ntasks=self.n_tasks,
                          cores_per_task=self.cores_per_task,
                          nodes=self.nodes,
                          resume=self.resume,
                          verbose=self.verbose,
                          model_dir=self.model_dir,
                          cluster=self.cluster)

        self.g.append(np.asarray(g_init.qoi_list))
        g_ind = np.argsort(self.g[step])
        self.g_level.append(self.g[step][g_ind[n_keep]])

        # Estimate coefficient of variation of conditional probability of first level
        d1, d2 = self.cov_sus(step)
        self.d12.append(d1**2)
        self.d22.append(d2**2)

        while self.g_level[step] > 0:

            step = step + 1
            self.samples.append(self.samples[step - 1][g_ind[0:n_keep]])
            self.g.append(self.g[step - 1][g_ind[:n_keep]])

            for i in range(self.nsamples_ss - n_keep):

                x0 = self.samples[step][i:i + n_keep]

                x_mcmc = MCMC(
                    dimension=self.dimension,
                    pdf_proposal_type=self.pdf_proposal_type,
                    pdf_proposal_scale=self.pdf_proposal_scale,
                    pdf_target=self.pdf_target,
                    log_pdf_target=self.log_pdf_target,
                    pdf_target_params=self.pdf_target_params,
                    pdf_target_copula=self.pdf_target_copula,
                    pdf_target_copula_params=self.pdf_target_copula_params,
                    pdf_target_type=self.pdf_target_type,
                    algorithm=self.algorithm,
                    jump=self.jump,
                    nsamples=n_keep + 1,
                    seed=x0,
                    nburn=self.nburn,
                    verbose=self.verbose)

                x_temp = x_mcmc.samples[n_keep].reshape((1, self.dimension))
                g_model = RunModel(samples=x_temp,
                                   model_script=self.model_script,
                                   model_object_name=self.model_object_name,
                                   input_template=self.input_template,
                                   var_names=self.var_names,
                                   output_script=self.output_script,
                                   output_object_name=self.output_object_name,
                                   ntasks=self.n_tasks,
                                   cores_per_task=self.cores_per_task,
                                   nodes=self.nodes,
                                   resume=self.resume,
                                   verbose=self.verbose,
                                   model_dir=self.model_dir,
                                   cluster=self.cluster)

                g_temp = g_model.qoi_list

                # Accept or reject the sample
                if g_temp < self.g_level[step - 1]:
                    self.samples[step] = np.vstack(
                        (self.samples[step], x_temp))
                    self.g[step] = np.hstack((self.g[step], g_temp[0]))
                else:
                    self.samples[step] = np.vstack(
                        (self.samples[step], self.samples[step][i]))
                    self.g[step] = np.hstack((self.g[step], self.g[step][i]))

            g_ind = np.argsort(self.g[step])
            self.g_level.append(self.g[step][g_ind[n_keep]])
            d1, d2 = self.cov_sus(step)
            self.d12.append(d1**2)
            self.d22.append(d2**2)

        n_fail = len([value for value in self.g[step] if value < 0])
        pf = self.p_cond**step * n_fail / self.nsamples_ss
        cov1 = np.sqrt(np.sum(self.d12))
        cov2 = np.sqrt(np.sum(self.d22))

        return pf, cov1, cov2
Esempio n. 8
0
    def run_subsim_mmh(self):
        step = 0
        n_keep = int(self.p_cond * self.nsamples_ss)

        # Generate the initial samples - Level 0
        if self.samples_init is None:
            x_init = MCMC(dimension=self.dimension,
                          pdf_proposal_type=self.pdf_proposal_type,
                          pdf_proposal_scale=self.pdf_proposal_scale,
                          pdf_target=self.pdf_target,
                          pdf_target_params=self.pdf_target_params,
                          algorithm=self.algorithm,
                          nsamples=self.nsamples_ss,
                          seed=self.seed)
            self.samples.append(x_init.samples)
        else:
            self.samples.append(self.samples_init)

        g_init = RunModel(samples=self.samples[step],
                          model_script=self.model_script,
                          ntasks=self.ntasks,
                          model_object_name=self.model_object_name)

        self.g.append(np.asarray(g_init.qoi_list).reshape((-1, )))
        g_ind = np.argsort(self.g[step])
        self.g_level.append(self.g[step][g_ind[n_keep]])

        # Estimate coefficient of variation of conditional probability of first level
        d1, d2 = self.cov_sus(step)
        self.d12.append(d1**2)
        self.d22.append(d2**2)

        while self.g_level[step] > 0 and step < self.max_level:

            step = step + 1
            self.samples.append(self.samples[step - 1][g_ind[0:n_keep]])
            self.g.append(self.g[step - 1][g_ind[:n_keep]])

            for i in range(self.nsamples_ss - n_keep):
                x0 = self.samples[step][i].reshape((-1, self.dimension))

                x_mcmc = MCMC(dimension=self.dimension,
                              pdf_proposal_type=self.pdf_proposal_type,
                              pdf_proposal_scale=self.pdf_proposal_scale,
                              pdf_target=self.pdf_target,
                              pdf_target_params=self.pdf_target_params,
                              algorithm=self.algorithm,
                              nsamples=2,
                              seed=x0)

                x_temp = x_mcmc.samples[1].reshape((1, self.dimension))
                # x_temp = x_mcmc.samples[1]
                g_model = RunModel(samples=x_temp,
                                   model_script=self.model_script,
                                   ntasks=self.ntasks,
                                   model_object_name=self.model_object_name)

                g_temp = g_model.qoi_list

                # Accept or reject the sample
                if g_temp < self.g_level[step - 1]:
                    self.samples[step] = np.vstack(
                        (self.samples[step], x_temp))
                    self.g[step] = np.hstack((self.g[step], g_temp[0]))
                else:
                    self.samples[step] = np.vstack(
                        (self.samples[step], self.samples[step][i]))
                    self.g[step] = np.hstack((self.g[step], self.g[step][i]))

            g_ind = np.argsort(self.g[step])
            self.g_level.append(self.g[step][g_ind[n_keep]])
            # Estimate coefficient of variation of conditional probability of first level
            d1, d2 = self.cov_sus(step)
            self.d12.append(d1**2)
            self.d22.append(d2**2)

        n_fail = len([value for value in self.g[step] if value < 0])

        pf = self.p_cond**step * n_fail / self.nsamples_ss
        cov1 = np.sqrt(np.sum(self.d12))
        cov2 = np.sqrt(np.sum(self.d22))

        return pf, cov1, cov2
Esempio n. 9
0
import numpy as np

from UQpy.Distributions import Uniform
from UQpy.RunModel import RunModel
from UQpy.SampleMethods import MCS

dist1 = Uniform(loc=15000, scale=10000)
dist2 = Uniform(loc=450000, scale=80000)
dist3 = Uniform(loc=2.0e8, scale=0.5e8)

names_ = [
    'fc1', 'fy1', 'Es1', 'fc2', 'fy2', 'Es2', 'fc3', 'fy3', 'Es3', 'fc4',
    'fy4', 'Es4', 'fc5', 'fy5', 'Es5', 'fc6', 'fy6', 'Es6'
]

x = MCS(dist_object=[dist1, dist2, dist3] * 6, nsamples=5, random_state=938475)
samples = np.array(x.samples).round(2)

opensees_rc6_model = RunModel(samples=samples,
                              ntasks=5,
                              model_script='opensees_model.py',
                              input_template='import_variables.tcl',
                              var_names=names_,
                              model_object_name="opensees_run",
                              output_script='process_opensees_output.py',
                              output_object_name='read_output')

outputs = opensees_rc6_model.qoi_list
print(outputs)
Esempio n. 10
0
    def form_hl(self):

        # Hasofer-Lind (HL) algorithm
        import scipy as sp
        n = self.dimension  # number of random variables (dimension)

        # initialization
        max_iter = int(1e3)
        tol = 1e-5
        # Correlation matrix of the random variables in the original space
        u = np.zeros([max_iter+1, n])
        beta = np.zeros(max_iter)

        # HL method
        for k in range(max_iter):

            if k == 0:
                u[k, :] = np.array(self.init_design_point)

            from UQpy.SampleMethods import InvNataf
            dist = InvNataf(samples=u[k, :], dimension=self.dimension, dist_name=self.dist_name,
                            corr=self.corr, dist_params=self.dist_params)

            # 1. evaluate Limit State Function at point
            g = RunModel(samples=dist.samples, model_type=self.model_type, model_script=self.model_script,
                         input_script=self.input_script, output_script=self.output_script,
                         dimension=self.dimension)

            # 2. evaluate Limit State Function gradient at point u_k and direction cosines
            if self.deriv_script is None:
                raise RuntimeError('A python script that provides the derivatives of the limit state function'
                                   'is required for the Hasofer-Lind method.')
            else:
                dg = RunModel(samples=dist.samples_z.reshape(self.dimension), model_type=self.model_type,
                              model_script=self.deriv_script, input_script=self.input_script,
                              output_script=self.output_script, dimension=self.dimension)

            A = np.linalg.solve(dist.Jacobian[0], dg.model_eval.Grad)
            norm_g = sp.linalg.norm(A)
            alpha = A / norm_g
            alpha = alpha.squeeze()

            if self.method == 'FORM':
                # 3. calculate first order beta
                beta[k] = -np.inner(u[k, :].T, alpha) + g.model_eval.QOI[0] / norm_g
                # 4. calculate u_{k+1}
                u[k + 1, :] = -beta[k] * alpha
                # next iteration
                if np.linalg.norm(u[k + 1, :] - u[k, :]) <= tol:
                    break

            if self.method == 'SORM':

                # 3. calculate first order beta
                beta_ = -np.inner(u[k, :].T, alpha) + g.model_eval.QOI[0] / norm_g
                Q = np.identity(n=self.dimension)
                Q[:, -1] = u[k, :].T
                [Q, R] = np.linalg.qr(Q)
                Q = np.fliplr(Q)
                B = np.dot(np.dot(Q.T, dg.model_eval.Hessian), Q)
                J = np.identity(n=self.dimension-1) + beta_*B[:self.dimension-1, :self.dimension-1]/norm_g
                correction = 1/np.sqrt(np.linalg.det(J))
                pf = sp.stats.norm.cdf(-beta_)*correction
                beta[k] = -sp.stats.norm.ppf(pf)  # corrected index for second-order

                # 4. calculate u_{k+1}
                u[k + 1, :] = -beta[k] * alpha
                # next iteration
                if np.linalg.norm(u[k + 1, :] - u[k, :]) <= tol:
                    break

        # delete unnecessary data
        u = u[:k + 1, :]

        # compute design point, reliability index and Pf
        u_star = u[-1, :]
        from UQpy.SampleMethods import Nataf
        dist_star = Nataf(samples=u_star, dist_name=self.dist_name,
                          dist_params=self.dist_params, corr_norm=dist.corr_norm)

        x_star = dist_star.samples_x
        beta = beta[k]
        pf = sp.stats.norm.cdf(-beta)

        return u_star, x_star[0], beta, pf, k
Esempio n. 11
0
    def run_subsim_mmh(self):
        step = 0
        n_keep = int(self.p_cond * self.nsamples_ss)

        # Generate the initial samples - Level 0
        if self.samples_init is None:
            x_init = MCMC(dimension=self.dimension, pdf_proposal_type=self.pdf_proposal_type,
                          pdf_proposal_scale=self.pdf_proposal_scale, pdf_target_type=self.pdf_target_type,
                          pdf_target=self.pdf_target, pdf_target_params=self.pdf_target_params,
                          algorithm=self.algorithm, nsamples=self.nsamples_ss, seed=np.zeros(self.dimension))
            self.samples.append(x_init.samples)
        else:
            self.samples.append(self.samples_init)

        g_init = RunModel(samples=self.samples[step], model_type=self.model_type, model_script=self.model_script,
                          input_script=self.input_script, output_script=self.output_script, dimension=self.dimension)

        self.g.append(np.asarray(g_init.model_eval.QOI))
        g_ind = np.argsort(self.g[step])
        self.g_level.append(self.g[step][g_ind[n_keep]])

        # Estimate coefficient of variation of conditional probability of first level
        self.delta2.append(self.cov_sus(step)**2)

        while self.g_level[step] > 0 and step < self.max_level:

            step = step + 1
            self.samples.append(self.samples[step - 1][g_ind[0:n_keep]])
            self.g.append(self.g[step - 1][g_ind[:n_keep]])

            for i in range(self.nsamples_ss-n_keep):
                seed = self.samples[step][i]

                x_mcmc = MCMC(dimension=self.dimension, pdf_proposal_type=self.pdf_proposal_type,
                              pdf_proposal_scale=self.pdf_proposal_scale, pdf_target_type=self.pdf_target_type,
                              pdf_target=self.pdf_target, pdf_target_params=self.pdf_target_params,
                              algorithm=self.algorithm, nsamples=2, seed=seed)

                x_temp = x_mcmc.samples[1].reshape((1, self.dimension))
                g_model = RunModel(samples=x_temp, cpu=1, model_type=self.model_type, model_script=self.model_script,
                                   input_script=self.input_script, output_script=self.output_script,
                                   dimension=self.dimension)

                g_temp = g_model.model_eval.QOI

                # Accept or reject the sample
                if g_temp < self.g_level[step - 1]:
                    self.samples[step] = np.vstack((self.samples[step], x_temp))
                    self.g[step] = np.hstack((self.g[step], g_temp[0]))
                else:
                    self.samples[step] = np.vstack((self.samples[step], self.samples[step][i]))
                    self.g[step] = np.hstack((self.g[step], self.g[step][i]))

            g_ind = np.argsort(self.g[step])
            self.g_level.append(self.g[step][g_ind[n_keep]])
            # Estimate coefficient of variation of conditional probability of first level
            self.delta2.append(self.cov_sus(step)**2)

        n_fail = len([value for value in self.g[step] if value < 0])
        pf = self.p_cond**step*n_fail/self.nsamples_ss
        cov = np.sum(self.delta2)

        return pf, cov
Esempio n. 12
0
from UQpy.Distributions import Uniform
from UQpy.RunModel import RunModel
from UQpy.SampleMethods import MCS

# Define the distribution objects
d1 = Uniform(loc=0.02, scale=0.06)
d2 = Uniform(loc=0.02, scale=0.01)
d3 = Uniform(loc=0.02, scale=0.01)
d4 = Uniform(loc=0.0025, scale=0.0075)
d5 = Uniform(loc=0.02, scale=0.06)
d6 = Uniform(loc=0.02, scale=0.01)
d7 = Uniform(loc=0.02, scale=0.01)
d8 = Uniform(loc=0.0025, scale=0.0075)

# Draw the samples using MCS
x = MCS(dist_object=[d1, d2, d3, d4, d5, d6, d7, d8],
        nsamples=12,
        random_state=349875)

# Run the model
run_ = RunModel(samples=x.samples,
                ntasks=6,
                model_script='dyna_script.py',
                input_template='dyna_input.k',
                var_names=['x0', 'y0', 'z0', 'R0', 'x1', 'y1', 'z1', 'R1'],
                model_dir='dyna_test',
                cluster=True,
                verbose=False,
                fmt='{:>10.4f}',
                cores_per_task=12)
Esempio n. 13
0
import numpy as np
import matplotlib.pyplot as plt
from UQpy.RunModel import RunModel
from UQpy.SampleMethods import MCS
from UQpy.Distributions import Uniform

dist1 = Uniform(loc=0.9 * 1e5, scale=0.2 * 1e5)
dist2 = Uniform(loc=1e-2, scale=1e-1)

x = MCS(dist_object=[dist1, dist2],
        nsamples=2,
        random_state=np.random.RandomState(1821),
        verbose=True)
samples = x.samples

model_serial_third_party = RunModel(
    samples=samples,
    model_script='PythonAsThirdParty_model.py',
    input_template='elastic_contact_sphere.py',
    var_names=['k', 'f0'],
    output_script='process_3rd_party_output.py',
    model_object_name='read_output')

qoi = model_serial_third_party.qoi_list
a = 1
Esempio n. 14
0
import time


def Rosenbrock(x, params):
    return np.exp(-(100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2) / params[0])


t = time.time()
x = MCMC(dimension=2,
         pdf_proposal_type='Normal',
         pdf_target_type='joint_pdf',
         pdf_target=Rosenbrock,
         pdf_target_params=[20],
         algorithm='MMH',
         jump=100000,
         nsamples=15,
         seed=None)
t_MCMC = time.time() - t
print(t_MCMC)

t = time.time()
z = RunModel(cpu=1,
             model_type='python',
             model_script='python_model.py',
             dimension=2,
             samples=x.samples)
t_run = time.time() - t
print(t_run)

print('Samples', z.model_eval.samples)
print('Soluations', z.model_eval.QOI)