Пример #1
0
def GenerateSample(vectX, N, method="MC"):
    """
    """
    if method == "MC":
        X = vectX.getNumericalSample(N)
    elif method == "QMC":
        dim_in = vectX.getDimension()
        # Uniform quasi-random sample over the unit hypercube
        mySobolSeq = ot.SobolSequence(dim_in)
        U = mySobolSeq.generate(N)
        # Isoprobabilistic transform for each marginal
        #
        # Lengthy calculations: compute quantiles using a double loop
        #distrib = vectX.getDistribution()
        #X = empty((N,dim_in))
        #for i in range(dim_in):
        #margin_i = distrib.getMarginal(i)
        #for n in range(N):
        #u = array(U)[n,i]
        #Xi = margin_i.computeQuantile(u)
        #X[n,i] = array(Xi)
        #
        # Alternative: use the inverse isoprobabilistic transform from
        # OpenTURNS object "myDistribution"
        # Xi is normally distributed (N(0,1))
        Xi = norm.ppf(U)
        myDistribution = vectX.getDistribution()
        transfo_inv = myDistribution.getInverseIsoProbabilisticTransformation()
        X = transfo_inv(ot.Sample(Xi))
    return X
Пример #2
0
    def _estimKrigingTheta(self, algoKriging, lowerBound, upperBound, size):
        """
        Estimate the kriging theta values with an initial random search using
        a Sobol sequence of size samples.
        """

        if size > 0:
            # create uniform distribution of the parameters bounds
            dim = len(lowerBound)
            distBoundCol = []
            for i in range(dim):
                distBoundCol += [ot.Uniform(lowerBound[i], upperBound[i])]
            distBound = ot.ComposedDistribution(distBoundCol)

            # set the bounds
            searchInterval = ot.Interval(lowerBound, upperBound)
            algoKriging.setOptimizationBounds(searchInterval)
            # Generate starting points with a low discrepancy sequence
            startingPoint = ot.LowDiscrepancyExperiment(
                ot.SobolSequence(), distBound, size).generate()

            algoKriging.setOptimizationAlgorithm(
                ot.MultiStart(ot.TNC(), startingPoint))
        else:
            algoKriging.setOptimizeParameters(False)

        return algoKriging
Пример #3
0
def compute_roughness_sampling(distribution, size=500000):
    """
    Sampling method for computing Roughness
    This allows comparing sampling & integrating methods
    """
    dimension = distribution.getDimension()
    uniformNd = ot.ComposedDistribution([ot.Uniform(0, 1) for i in range(dimension)])
    sequence = ot.SobolSequence(dimension)
    experiment = ot.LowDiscrepancyExperiment(sequence, distribution, size, False)
    sample = experiment.generate()
    pdf = distribution.computePDF(sample)
    return pdf.computeMean()[0]
def computeCrossingProbability_QMC(b, t, mu_S, covariance, R, delta_t, n_block,
                                   n_iter, CoV):
    X, event = getXEvent(b, t, mu_S, covariance, R, delta_t)
    algo = ot.ProbabilitySimulationAlgorithm(
        event,
        ot.LowDiscrepancyExperiment(ot.SobolSequence(X.getDimension()),
                                    n_block, False))
    algo.setBlockSize(n_block)
    algo.setMaximumOuterSampling(n_iter)
    algo.setMaximumCoefficientOfVariation(CoV)
    algo.run()
    return algo.getResult().getProbabilityEstimate() / delta_t
Пример #5
0
    def scrambled_sobol_generate(self):
        """Scrambled Sobol.

        Scramble function as in Owen (1997).
        """
        # Generate sobol sequence
        samples = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                              self.distribution,
                                              self.n_samples)
        r = np.array(samples.generate())

        # Scramble the sequence
        for col in range(self.dim):
            r[:, col] = self.scramble(r[:, col])

        return r
    def generate_design(self):
        self.design = pd.DataFrame(index=np.arange(self.n_samples))
        if self.n_factors > 0:
            seq = ot.SobolSequence(self.n_factors)
            self.design = pd.DataFrame(np.array(seq.generate(self.n_samples)))
            self.design.columns = self.factor_map.keys()

            # transforming design into the ranges specified by factor_map
            for k in self.design.columns.values:
                self.design[k] = (self.design[k]
                        *(self.factor_map[k][1]-self.factor_map[k][0])
                        +self.factor_map[k][0])
        
        # adding constant components
        for k in self.constant_comps.keys():
            self.design[k] = self.constant_comps[k]*np.ones(len(self.design.index))
        return
Пример #7
0
def generateByLowDiscrepancy(distribution, size, computeSecondOrder=False):
    '''
    Generates the input DOE for the estimator of the Sobol' sensitivity 
    indices.
    Uses a Low Discrepancy sequence.
    '''
    dimension = distribution.getDimension()
    # Create a doubled distribution
    marginalList = [distribution.getMarginal(p) for p in range(dimension)]
    twiceDistribution = ot.ComposedDistribution(marginalList*2)
    # Generates a low discrepancy sequence in twice the dimension
    sequence = ot.SobolSequence(2*dimension)
    experiment = ot.LowDiscrepancyExperiment(sequence, twiceDistribution, size)
    fullDesign = experiment.generate()
    # Split the A and B designs
    A = fullDesign[:,0:dimension] # A
    B = fullDesign[:,dimension:2*dimension] # B
    # Uses the kernel to generate the sample
    design = generateSampleKernel(A,B,computeSecondOrder)
    return design
Пример #8
0
dim = 2
n_sample = 10
sigma = 0.5
sampler = KdeSampler(sample=[[0.5, 0.7]], dim=dim, bw=sigma)
sample_kde = sampler.generate(n_sample)

dists = [ot.Uniform(0, 1) for _ in range(dim)]
dists = ot.ComposedDistribution(dists)
lhs = ot.LHSExperiment(dists, n_sample)
lhs_opt = ot.SimulatedAnnealingLHS(lhs, ot.GeometricProfile(),
                                   ot.SpaceFillingC2())

sample_lhs = np.array(lhs.generate())
sample_lhs_opt = np.array(lhs_opt.generate())
sample_sobol = np.array(ot.SobolSequence(dim).generate(n_sample))

print(f'Discrepancy CD:\n'
      f'-> KDE: {ot.SpaceFillingC2().evaluate(sample_kde)}\n'
      f'-> LHS opt: {ot.SpaceFillingC2().evaluate(sample_lhs_opt)}\n'
      f'-> LHS: {ot.SpaceFillingC2().evaluate(sample_lhs)}\n'
      f'-> Sobol: {ot.SpaceFillingC2().evaluate(sample_sobol)}\n')

print(f'Discrepancy WD:\n'
      f"-> KDE: {Space.discrepancy(sample_kde, method='WD')}\n"
      f"-> LHS opt: {Space.discrepancy(sample_lhs_opt, method='WD')}\n"
      f"-> LHS: {Space.discrepancy(sample_lhs, method='WD')}\n"
      f"-> Sobol: {Space.discrepancy(sample_sobol, method='WD')}\n")

print(f'Discrepancy MD:\n'
      f"-> KDE: {Space.discrepancy(sample_kde, method='MD')}\n"
Пример #9
0
#
# To illustrate these sequences we generate their first 1024 points and compare with the sequence obtained from the pseudo random generator (Merse Twister) as the latter has a higher discrepancy.

# %%
from __future__ import print_function
import openturns as ot
import math as m
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# 1. Sobol sequence
dimension = 2
size = 1024
sequence = ot.SobolSequence(2)
sample = sequence.generate(size)
graph = ot.Graph("Sobol", "", "", True, "")
cloud = ot.Cloud(sample)
graph.add(cloud)
view = viewer.View(graph)

# %%
# 2. Halton sequence
dimension = 2
sequence = ot.HaltonSequence(2)
sample = sequence.generate(size)
graph = ot.Graph("Halton", "", "", True, "")
cloud = ot.Cloud(sample)
graph.add(cloud)
view = viewer.View(graph)
Пример #10
0
import math as m
import sys

ot.TESTPREAMBLE()
ot.PlatformInfo.SetNumericalPrecision(3)

m = 10
x = [[0.5 + i] for i in range(m)]

#ot.ResourceMap.SetAsUnsignedInteger( "OptimizationAlgorithm-DefaultMaximumEvaluationNumber", 100)
inVars = ["a", "b", "c", "x"]
formulas = ["a + b * exp(c * x)", "(a * x^2 + b) / (c + x^2)"]
model = ot.SymbolicFunction(inVars, formulas)
p_ref = [2.8, 1.2, 0.5]
params = [0, 1, 2]
modelX = ot.ParametricFunction(model, params, p_ref)
y = modelX(x)
y += ot.Normal([0.0]*2, [0.05]*2, ot.IdentityMatrix(2)).getSample(m)
candidate = [1.0]*3
bootstrapSizes = [0, 100]
for bootstrapSize in bootstrapSizes:
    algo = ot.NonLinearLeastSquaresCalibration(modelX, x, y, candidate)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result (Auto)=", algo.getResult().getParameterMAP())
    algo.setAlgorithm(ot.MultiStart(ot.TNC(), ot.LowDiscrepancyExperiment(ot.SobolSequence(), ot.Normal(candidate, ot.CovarianceMatrix(ot.Point(candidate).getDimension())), ot.ResourceMap.GetAsUnsignedInteger("NonLinearLeastSquaresCalibration-MultiStartSize")).generate()))
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result  (TNC)=", algo.getResult().getParameterMAP())
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)

# Generate sample with the given plane
distribution = ot.ComposedDistribution([ot.Uniform(0, 1)] * 2)
size = 200
myPlane = ot.LowDiscrepancyExperiment(ot.SobolSequence(), distribution, size)

sample = myPlane.generate()

# Create an empty graph
graph = ot.Graph("Low Discrepancy experiment", "x1", "x2", True, "")

# Create the cloud
cloud = ot.Cloud(sample, "blue", "plus", "")

# Then, draw it
graph.add(cloud)

fig = plt.figure(figsize=(4, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(graph, figure=fig, axes=[axis], add_legend=False)
def mySaltelliSobolRandomizedSequence(distribution, size, model):
    sequence = ot.SobolSequence(distribution.getDimension())
    experiment = ot.LowDiscrepancyExperiment(sequence, distribution, size)
    experiment.setRandomize(True)
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
    return sensitivity_algorithm
Пример #13
0
    def run(self):
        """
        Launch the algorithm and build the POD models.

        Notes
        -----
        This method launches the iterative algorithm. Once the algorithm stops,
        it builds the POD models : Monte Carlo simulation are performed for each
        defect sizes with the final classifier model. Eventually, the sample is
        used to compute the mean POD and the POD at the confidence level.
        """

        # Create an initial uniform distribution if not given
        if self._distribution is None:
            inputMin = self._input.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = self._input.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [
                ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)
            ]
            self._distribution = ot.ComposedDistribution(marginals)

        # Create the design of experiments of the candidate points where the
        # criterion is computed
        if self._distribution.hasIndependentCopula():
            # without copula use low discrepancy experiment as first doe
            doeCandidate = ot.LowDiscrepancyExperiment(
                ot.SobolSequence(), self._distribution,
                self._candidateSize).generate()
        else:
            # else simple Monte Carlo distribution on Uniform distribution
            doeCandidate = self._distribution.getSample(self._candidateSize)

        doeCandidate = np.array(doeCandidate)
        # build initial classifier model
        # build the kriging model without optimization

        if self._verbose:
            print('Building the classifier')

        n_ini = int(self._input.getSize())
        self._input = np.array(self._input)
        self._signals = np.hstack(self._signals)

        n_added_points = 0
        algo_iteration = 0

        ## Cas de la classif par svc
        if self._classifierType == "svc":
            algo_temp = list(
                map(
                    lambda C, kernel, degree, probability: svm.SVC(
                        C=C,
                        kernel=kernel,
                        degree=degree,
                        probability=probability,
                        coef0=1,
                    ), *self._ClassifierParameters))[0]

        ## Cas de la classif par fro
        if self._classifierType == "rf":
            algo_temp = list(
                map(
                    lambda n_estimators, max_depth, min_samples_split,
                    random_state: ExtraTreesClassifier(
                        n_estimators=n_estimators,
                        max_depth=max_depth,
                        min_samples_split=min_samples_split,
                        random_state=random_state),
                    *self._ClassifierParameters))[0]

        algo_temp.fit(self._input, self._signals)

        list_classifiers = []
        f_iter = algo_temp.predict_proba
        list_classifiers.append(f_iter)
        self._classifierModel = f_iter

        plt.ion()
        # Start the improvment loop
        if self._verbose and self._nMorePoints > 0:
            print('Start the improvement loop')

        while n_added_points < self._nMorePoints:

            # calcul de ce qu il y a dans l' exp de la proba
            probs = f_iter(doeCandidate)[:, 1]

            # recuperation des indices ou la p p_min < proba(x) < p_max
            ind_p1 = np.where(probs < self._pmax)[0]
            ind_p2 = np.where(probs >= self._pmin)[0]
            ind_p = np.intersect1d(ind_p2, ind_p1)
            ind = ind_p

            # s'il n'a pas d indices on elargit p_min = 0.45, p_max=0.55
            if len(ind) == 0:
                ind_p1 = np.where(probs < 0.1)[0]
                ind_p2 = np.where(probs >= 0.8)[0]
                ind_p = np.intersect1d(ind_p2, ind_p1)
                ind = ind_p

            ind_rank = np.argsort(probs[ind])
            quant = [
                0,
                int(len(ind) / 4.),
                int(len(ind) / 2.),
                int(3. * len(ind) / 4.),
                len(ind) - 1
            ]

            ind_bis = ind_rank[quant]
            x_new = doeCandidate[ind[ind_bis], :]
            z_new = np.hstack(self._physicalModel(x_new))

            n_new_temp = len(self._input) + len(x_new)

            # si on depasse le nombre de points, on s arrete
            if n_new_temp > (n_ini + self._nMorePoints):
                x_new = x_new[:self._nMorePoints + n_ini - len(self._input), :]
                z_new = z_new[:self._nMorePoints + n_ini - len(self._input)]

            self._input = np.vstack((self._input, x_new))
            self._signals = np.hstack((self._signals, z_new))

            n_added_points = n_new_temp - n_ini
            algo_iteration = algo_iteration + 1

            if self._classifierType == "svc":
                algo_temp = list(
                    map(
                        lambda C, kernel, degree, probability: svm.SVC(
                            C=C,
                            kernel=kernel,
                            degree=degree,
                            probability=probability,
                            coef0=1), *self._ClassifierParameters))[0]

            if self._classifierType == "rf":
                algo_temp = list(
                    map(
                        lambda n_estimators, max_depth, min_samples_split,
                        random_state: ExtraTreesClassifier(
                            n_estimators=n_estimators,
                            max_depth=max_depth,
                            min_samples_split=min_samples_split,
                            random_state=random_state),
                        *self._ClassifierParameters))[0]

            # Apprentissage avec self._input,self._signals
            algo_temp.fit(self._input, self._signals)

            self._confMat = np.zeros((2, 2))
            for classifier in list_classifiers:
                conf_temp = 1. * confusion_matrix(
                    self._signals,
                    classifier(self._input)[:, 1] >= 0.5)
                conf_temp = 1. * conf_temp / conf_temp.sum(axis=0)
                self._confMat = conf_temp + self._confMat

            self._confMat = 1. * self._confMat / len(list_classifiers)
            classif_algo_temp = algo_temp.predict_proba

            p11 = self._confMat[1, 1]
            p10 = self._confMat[1, 0]

            def agg_classifier(x_in):
                c = p11 - p10
                p1_bayes = 1. / c * (classif_algo_temp(x_in)[:, 1] - p10)
                p1_bayes = np.vstack(
                    np.min(np.array([
                        np.max(np.array([p1_bayes,
                                         np.zeros(len(p1_bayes))]),
                               axis=0),
                        np.ones(len(p1_bayes))
                    ]),
                           axis=0))
                return (np.array([1 - p1_bayes, p1_bayes]).T)[0]

            f_iter = agg_classifier
            list_classifiers.append(f_iter)
            self._classifierModel = f_iter

            if self._verbose:
                updateProgress(n_added_points - 1, self._nMorePoints,
                               'Adding points')

            if self._graph:
                self._PODPerDefect = self._computePOD(self._defectSizes,
                                                      agg_classifier)
                # create the interpolate function of the POD model
                meanPOD = self._PODPerDefect.computeMean()
                interpModel = interp1d(self._defectSizes,
                                       np.array(meanPOD),
                                       kind='linear')
                self._PODmodel = ot.PythonFunction(1, 1, interpModel)
                # The POD at confidence level is built in getPODCLModel() directly
                fig, ax = self.drawPOD(self._probabilityLevel,
                                       self._confidenceLevel)
                plt.draw()
                plt.pause(0.001)
                plt.show()
                if self._graphDirectory is not None:
                    fig.savefig(os.path.join(self._graphDirectory,
                                             'AdaptiveHitMissPOD_') +
                                str(algo_iteration),
                                bbox_inches='tight',
                                transparent=True)

        self._input = ot.NumericalSample(self._input)
        self._signals = ot.NumericalSample(np.vstack(self._signals))
        # Compute the sample predicted for each defect sizes
        self._PODPerDefect = self._computePOD(self._defectSizes,
                                              self._classifierModel)
        # compute the POD for all defect sizes
        meanPOD = self._PODPerDefect.computeMean()
        # create the interpolate function of the POD model
        interpModel = interp1d(self._defectSizes,
                               np.array(meanPOD),
                               kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        # The POD at confidence level is built in getPODCLModel() directly

        # remove the interactive plotting
        plt.ioff()
Пример #14
0
        # Asymptotic confidence interval
        sensitivity_algorithm.setUseAsymptoticDistribution(True)
        interval_fo_asymptotic = sensitivity_algorithm.getFirstOrderIndicesInterval(
        )
        interval_to_asymptotic = sensitivity_algorithm.getTotalOrderIndicesInterval(
        )
        print("asymptotic intervals:")
        print("First order indices distribution = ",
              sensitivity_algorithm.getFirstOrderIndicesDistribution())
        print("Total order indices distribution = ",
              sensitivity_algorithm.getTotalOrderIndicesDistribution())
        print("First order indices interval = ", interval_fo_asymptotic)
        print("Total order indices interval = ", interval_to_asymptotic)

# with experiment
sequence = ot.SobolSequence(input_dimension)
experiment = ot.LowDiscrepancyExperiment(
    sequence,
    ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * input_dimension), size)
sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
print(sensitivity_algorithm.getFirstOrderIndices())

# multi variate model
model_aggregated = ot.SymbolicFunction(
    ['X1', 'X2', 'X3'],
    ['2*X1 + X2 - 3*X3 + 0.3*X1*X2', '-5*X1 + 4*X2 - 0.8*X2*X3 + 2*X3'])
distribution_aggregated = ot.ComposedDistribution([ot.Uniform()] * 3)
inputDesign = ot.SobolIndicesExperiment(distribution_aggregated,
                                        size).generate()
outputDesign = model_aggregated(inputDesign)
# Case 1 : Estimation of sensitivity using estimator and no bootstrap
Пример #15
0
    def _estimKrigingTheta(self, algoKriging, lowerBound, upperBound, size):
        """
        Estimate the kriging theta values with an initial random search using
        a Sobol sequence of size samples.
        """
        # get input parameters of the kriging algorithm
        X = algoKriging.getInputSample()
        Y = algoKriging.getOutputSample()

        algoKriging.run()
        krigingResult = algoKriging.getResult()
        covarianceModel = krigingResult.getCovarianceModel()
        basis = krigingResult.getBasisCollection()
        if LooseVersion(ot.__version__) == '1.9':
            llf = algoKriging.getReducedLogLikelihoodFunction()
        else:
            llf = algoKriging.getLogLikelihoodFunction()

        # create uniform distribution of the parameters bounds
        dim = len(lowerBound)
        distBoundCol = []
        for i in range(dim):
            distBoundCol += [ot.Uniform(lowerBound[i], upperBound[i])]
        distBound = ot.ComposedDistribution(distBoundCol)

        if size > 0:
            # Generate starting points with a low discrepancy sequence
            thetaStart = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                                     distBound,
                                                     size).generate()
            # Get the best theta from the maximum llf value
            llfValue = llf(thetaStart)
            indexMax = int(np.argmax(llfValue))
            bestTheta = thetaStart[indexMax]

            # update theta after random search
            if LooseVersion(ot.__version__) == '1.6':
                covarianceModel.setScale(bestTheta)
            elif LooseVersion(ot.__version__) > '1.6':
                # optimize theta and sigma in ot 1.8
                covarianceModel.setScale(bestTheta[:-1])
                covarianceModel.setAmplitude([bestTheta[-1]])

        # Now the KrigingAlgorithm is used to optimize the likelihood using a
        # good starting point
        if LooseVersion(ot.__version__) == "1.9":
            algoKriging = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
        else:
            algoKriging = ot.KrigingAlgorithm(X, Y, basis, covarianceModel,
                                              True)

        # set TNC optim
        searchInterval = ot.Interval(lowerBound, upperBound)
        if LooseVersion(ot.__version__) == '1.6':
            optimizer = ot.TNC()
            optimizer.setBoundConstraints(searchInterval)
            algoKriging.setOptimizer(optimizer)
        elif LooseVersion(ot.__version__) in ['1.7', '1.8']:
            optimizer = algoKriging.getOptimizationSolver()
            problem = optimizer.getProblem()
            problem.setBounds(searchInterval)
            optimizer.setProblem(problem)
            algoKriging.setOptimizationSolver(optimizer)
        elif LooseVersion(ot.__version__) == '1.9':
            algoKriging.setOptimizationBounds(searchInterval)

        return algoKriging
Пример #16
0
    def __init__(self, n_samples, bounds, kind, dists=None, discrete=None):
        """Initialize the DOE generation.

        In case of :attr:`kind` is ``uniform``, :attr:`n_samples` is decimated
        in order to have the same number of points in all dimensions.

        If :attr:`kind` is ``discrete``, a join distribution between a discrete
        uniform distribution is made with continuous distributions.

        Another possibility is to set a list of PDF to sample from. Thus one
        can do: `dists=['Uniform(15., 60.)', 'Normal(4035., 400.)']`. If not
        set, uniform distributions are used.

        :param int n_samples: number of samples.
        :param array_like bounds: Space's corners [[min, n dim], [max, n dim]]
        :param str kind: Sampling Method if string can be one of
          ['halton', 'sobol', 'faure', '[o]lhs[c]', 'sobolscramble', 'uniform',
          'discrete'] otherwize can be a list of openturns distributions.
        :param lst(str) dists: List of valid openturns distributions as string.
        :param int discrete: Position of the discrete variable.
        """
        self.n_samples = n_samples
        self.bounds = np.asarray(bounds)
        self.kind = kind
        self.dim = self.bounds.shape[1]

        self.scaler = preprocessing.MinMaxScaler()
        self.scaler.fit(self.bounds)

        if dists is None:
            dists = [ot.Uniform(float(self.bounds[0][i]),
                                float(self.bounds[1][i]))
                     for i in range(self.dim)]
        else:
            dists = bat.space.dists_to_ot(dists)

        if discrete is not None:
            # Creating uniform discrete distribution for OT
            disc_list = [[i] for i in range(int(self.bounds[0, discrete]),
                                            int(self.bounds[1, discrete] + 1))]
            disc_dist = ot.UserDefined(disc_list)

            dists.pop(discrete)
            dists.insert(discrete, disc_dist)

        # Join distribution
        self.distribution = ot.ComposedDistribution(dists)

        if self.kind == 'halton':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.HaltonSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'sobol':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'faure':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.FaureSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif (self.kind == 'lhs') or (self.kind == 'lhsc'):
            self.sequence_type = ot.LHSExperiment(self.distribution, self.n_samples)
        elif self.kind == 'olhs':
            lhs = ot.LHSExperiment(self.distribution, self.n_samples)
            self.sequence_type = ot.SimulatedAnnealingLHS(lhs, ot.GeometricProfile(),
                                                          ot.SpaceFillingC2())
        elif self.kind == 'saltelli':
            # Only relevant for computation of Sobol' indices
            size = self.n_samples // (2 * self.dim + 2)  # N(2*dim + 2)
            self.sequence_type = ot.SobolIndicesExperiment(self.distribution,
                                                           size, True).generate()
#
# To illustrate these sequences we generate their first 1024 points and compare with the sequence obtained from the pseudo random generator (Merse Twister) as the latter has a higher discrepancy.

# %%
import openturns as ot
import math as m
import openturns.viewer as viewer
from matplotlib import pylab as plt

ot.Log.Show(ot.Log.NONE)

# %%
# 1. Sobol sequence
dimension = 2
size = 1024
sequence = ot.SobolSequence(dimension)
sample = sequence.generate(size)
graph = ot.Graph("Sobol", "", "", True, "")
cloud = ot.Cloud(sample)
graph.add(cloud)
view = viewer.View(graph)

# %%
# 2. Halton sequence
dimension = 2
sequence = ot.HaltonSequence(dimension)
sample = sequence.generate(size)
graph = ot.Graph("Halton", "", "", True, "")
cloud = ot.Cloud(sample)
graph.add(cloud)
view = viewer.View(graph)
    globalErrorCovariance[i, i] = 2.0 + (1.0 + i) * (1.0 + i)
    for j in range(i):
        globalErrorCovariance[i, j] = 1.0 / (1.0 + i + j)
bootstrapSizes = [0, 100]
for bootstrapSize in bootstrapSizes:
    algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate,
                                           priorCovariance, errorCovariance)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result   (Auto)=", algo.getResult().getParameterMAP())
    algo.setOptimizationAlgorithm(
        ot.MultiStart(
            ot.TNC(),
            ot.LowDiscrepancyExperiment(
                ot.SobolSequence(),
                ot.Normal(
                    candidate,
                    ot.CovarianceMatrix(ot.Point(candidate).getDimension())),
                ot.ResourceMap.GetAsUnsignedInteger(
                    "GaussianNonLinearCalibration-MultiStartSize")).generate())
    )
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result    (TNC)=", algo.getResult().getParameterMAP())
    algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate,
                                           priorCovariance,
                                           globalErrorCovariance)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    print("result (Global)=", algo.getResult().getParameterMAP())
Пример #19
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View

ot.RandomGenerator.SetSeed(0)

# Generate sample with the given plane
distribution = ot.ComposedDistribution([ot.Uniform(0, 1)] * 2)
size = 200
experiment = ot.LowDiscrepancyExperiment(ot.SobolSequence(), distribution,
                                         size)

sample = experiment.generate()

# Create an empty graph
graph = ot.Graph("Low Discrepancy experiment", "x1", "x2", True, "")

# Create the cloud
cloud = ot.Cloud(sample, "blue", "plus", "")

# Then, draw it
graph.add(cloud)

fig = plt.figure(figsize=(4, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(graph, figure=fig, axes=[axis], add_legend=False)
# %%
# Create the problem and set the optimization algorithm
# -----------------------------------------------------

# %%
problem = ot.OptimizationProblem(rastrigin)

# %%
# We use the :class:`~openturns.Cobyla` algorithm and run it from multiple starting points selected by a :class:`~openturns.LowDiscrepancyExperiment`.

# %%
size = 64
distribution = ot.ComposedDistribution(
    [ot.Uniform(lowerbound[0], upperbound[0])] * dim)
experiment = ot.LowDiscrepancyExperiment(
    ot.SobolSequence(), distribution, size)
solver = ot.MultiStart(ot.Cobyla(problem), experiment.generate())

# %%
# Visualize the starting points of the optimization algorithm
# -----------------------------------------------------------

# %%
startingPoints = solver.getStartingSample()
graph = rastrigin.draw(lowerbound, upperbound, [100]*dim)
graph.setTitle("Rastrigin function")
cloud = ot.Cloud(startingPoints)
cloud.setPointStyle("bullet")
cloud.setColor("black")
graph.add(cloud)
graph.setLegends([""])
fig.set_size_inches(10, 10)

# %%
# We see that this LHS is optimized in the sense that it fills the space more evenly than a non-optimized does in general. 

# %%
# Sobol' low discrepancy sequence
# -------------------------------

# %%
dim = 2
distribution = ot.ComposedDistribution([ot.Uniform()]*dim)
bounds = distribution.getRange()

# %%
sequence = ot.SobolSequence(dim)

# %%
samplesize = 2**5 # Sobol' sequences are in base 2
experiment = ot.LowDiscrepancyExperiment(sequence, distribution, samplesize, False)
sample = experiment.generate()

# %%
samplesize

# %%
subdivisions = [2**2, 2**1]
fig = otv.PlotDesign(sample, bounds, subdivisions);
fig.set_size_inches(6, 6)

# %%
Пример #22
0
    def run(self):
        """
        Launch the algorithm and build the POD models.

        Notes
        -----
        This method launches the iterative algorithm. First the censored data
        are filtered if needed. The Box Cox transformation is performed if it is
        enabled. Then the enrichment of the design of experiments is performed.
        Once the algorithm stops, it builds the POD models : conditional samples are 
        simulated for each defect size, then the distributions of the probability
        estimator (for MC simulation) are built. Eventually, a sample of this
        distribution is used to compute the mean POD and the POD at the confidence
        level.
        """

        # Create an initial uniform distribution if not given
        if self._distribution is None:
            inputMin = self._input.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = self._input.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)]
            self._distribution = ot.ComposedDistribution(marginals)

        # Create the design of experiments of the candidate points where the
        # criterion is computed
        if self._distribution.hasIndependentCopula():
            # without copula use low discrepancy experiment as first doe
            doeCandidate = ot.LowDiscrepancyExperiment(ot.SobolSequence(), 
                            self._distribution, self._candidateSize).generate()
        else:
            # else simple Monte Carlo distribution
            doeCandidate = self._distribution.getSample(self._candidateSize)

        # build initial kriging model
        # build the kriging model without optimization
        algoKriging = self._buildKrigingAlgo(self._input, self._signals)
        if self._verbose:
            print('Building the kriging model')
            print('Optimization of the covariance model parameters...')

        if LooseVersion(ot.__version__) == '1.9':
            llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension()
        else:
            llDim = algoKriging.getLogLikelihoodFunction().getInputDimension()
        lowerBound = [0.001] * llDim
        upperBound = [50] * llDim               
        algoKriging = self._estimKrigingTheta(algoKriging,
                                              lowerBound, upperBound,
                                              self._initialStartSize)
        algoKriging.run()

        # Get kriging results
        self._krigingResult = algoKriging.getResult()
        self._covarianceModel = self._krigingResult.getCovarianceModel()
        self._basis = self._krigingResult.getBasisCollection()
        metamodel = self._krigingResult.getMetaModel()

        self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult)
        if self._verbose:
            print('Kriging validation Q2 (>0.9): {:0.4f}\n'.format(self._Q2))

        plt.ion()
        # Start the improvment loop
        iteration = 0
        while iteration < self._nIteration:
            iteration += 1
            if self._verbose:
                print('Iteration : {}/{}'.format(iteration, self._nIteration))

            # compute POD (ptrue = pn-1) for bias reducing in the criterion
            # Monte Carlo for all defect sizes in a vectorized way.
            # get Sample for all parameters except the defect size
            samplePred = self._distribution.getSample(self._samplingSize)[:,1:]
            fullSamplePred = ot.NumericalSample(self._samplingSize * self._defectNumber,
                                                self._dim)
            # Add the defect sizes as first value 
            for i, defect in enumerate(self._defectSizes):
                fullSamplePred[self._samplingSize*i:self._samplingSize*(i+1), :] = \
                                        self._mergeDefectInX(defect, samplePred)
            meanPredictionSample = metamodel(fullSamplePred)
            meanPredictionSample = np.reshape(meanPredictionSample, (self._samplingSize,
                                                    self._defectNumber), 'F')
            # compute the POD for all defect sizes
            currentPOD = np.mean(meanPredictionSample > self._detectionBoxCox, axis=0)

            # Compute criterion for all candidate in the candidate doe
            criterion = 1000000000
            for icand, candidate in enumerate(doeCandidate):

                # add the current candidate to the kriging doe
                inputAugmented = self._input[:]
                inputAugmented.add(candidate)
                signalsAugmented = self._signals[:]
                # predict the signal value of the candidate using the current
                # kriging model
                signalsAugmented.add(metamodel(candidate))
                # create a temporary kriging model with the new doe and without
                # updating the covariance model parameters
                if LooseVersion(ot.__version__) == '1.9':
                    algoKrigingTemp = ot.KrigingAlgorithm(inputAugmented, signalsAugmented,
                                                          self._covarianceModel,
                                                          self._basis,
                                                          True)
                else:
                    algoKrigingTemp = ot.KrigingAlgorithm(inputAugmented, signalsAugmented,
                                                          self._basis,
                                                          self._covarianceModel,
                                                          True)
                if LooseVersion(ot.__version__) > '1.6':
                    optimizer = algoKrigingTemp.getOptimizationSolver()
                    optimizer.setMaximumIterationNumber(0)
                    algoKrigingTemp.setOptimizationSolver(optimizer)

                algoKrigingTemp.run()
                krigingResultTemp = algoKrigingTemp.getResult()

                # compute the criterion for all defect size
                crit = []
                # save results, used to compute the PODModel et PODCLModel
                PODPerDefect = ot.NumericalSample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
                for idef, defect in enumerate(self._defectSizes):
                    podSample = self._computePODSamplePerDefect(defect,
                        self._detectionBoxCox, krigingResultTemp,
                        self._distribution, self._simulationSize, self._samplingSize)
                    PODPerDefect[:, idef] = podSample

                    meanPOD = podSample.computeMean()[0]
                    varPOD = podSample.computeVariance()[0]
                    crit.append(varPOD + (meanPOD - currentPOD[idef])**2)
                # compute the criterion aggregated for all defect sizes
                newCriterion = np.sqrt(np.mean(crit))

                # check if the result is better or not
                if newCriterion < criterion:
                    self._PODPerDefect = PODPerDefect
                    criterion = newCriterion
                    indexOpt = icand
                
                if self._verbose:
                    updateProgress(icand, int(doeCandidate.getSize()), 'Computing criterion')

            # get the best candidate
            candidateOpt = doeCandidate[indexOpt]
            # add new point to DOE
            self._input.add(candidateOpt)
            # add the signal computed by the physical model
            if self._boxCox:
                self._signals.add(self._boxCoxTransform(self._physicalModel(candidateOpt)))
            else:
                self._signals.add(self._physicalModel(candidateOpt))
            # remove added candidate from the doeCandidate
            doeCandidate.erase(indexOpt)
            if self._verbose:
                print('Criterion value : {:0.4f}'.format(criterion))
                print('Added point : {}'.format(candidateOpt))
                print('Update the kriging model')

            # update the kriging model without optimization
            algoKriging = self._buildKrigingAlgo(self._input, self._signals)
            if LooseVersion(ot.__version__) == '1.7':
                optimizer = algoKriging.getOptimizationSolver()
                optimizer.setMaximumIterationNumber(0)
                algoKriging.setOptimizationSolver(optimizer)
            elif LooseVersion(ot.__version__) == '1.8':
                algoKriging.setOptimizeParameters(False)

            algoKriging.run()

            self._Q2 = self._computeQ2(self._input, self._signals, algoKriging.getResult())

            # Check the quality of the kriging model if it needs optimization
            if self._Q2 < 0.95:
                if self._verbose:
                    print('Optimization of the covariance model parameters...')

                if LooseVersion(ot.__version__) == '1.9':
                    llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension()
                else:
                    llDim = algoKriging.getLogLikelihoodFunction().getInputDimension()
                lowerBound = [0.001] * llDim
                upperBound = [50] * llDim               
                algoKriging = self._estimKrigingTheta(algoKriging,
                                                      lowerBound, upperBound,
                                                      self._initialStartSize)
                algoKriging.run()

            # Get kriging results
            self._krigingResult = algoKriging.getResult()
            self._covarianceModel = self._krigingResult.getCovarianceModel()
            self._basis = self._krigingResult.getBasisCollection()
            metamodel = self._krigingResult.getMetaModel()

            self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult)
            if self._verbose:
                print('Kriging validation Q2 (>0.9): {:0.4f}'.format(self._Q2))

            if self._graph:
                # create the interpolate function of the POD model
                meanPOD = self._PODPerDefect.computeMean()
                interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
                self._PODmodel = ot.PythonFunction(1, 1, interpModel)
                # The POD at confidence level is built in getPODCLModel() directly
                fig, ax = self.drawPOD(self._probabilityLevel, self._confidenceLevel)
                plt.draw()
                plt.pause(0.001)
                plt.show()
                if self._graphDirectory is not None:
                    fig.savefig(os.path.join(self._graphDirectory, 'AdaptiveSignalPOD_')+str(iteration),
                                bbox_inches='tight', transparent=True)

        # Compute the final POD with the last updated kriging model
        if self._verbose:
                print('\nStart computing the POD with the last updated kriging model')
        # compute the sample containing the POD values for all defect 
        self._PODPerDefect = ot.NumericalSample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
        for i, defect in enumerate(self._defectSizes):
            self._PODPerDefect[:, i] = self._computePODSamplePerDefect(defect,
                self._detectionBoxCox, self._krigingResult, self._distribution,
                self._simulationSize, self._samplingSize)
            if self._verbose:
                updateProgress(i, self._defectNumber, 'Computing POD per defect')

        # compute the mean POD 
        meanPOD = self._PODPerDefect.computeMean()
        # create the interpolate function of the POD model
        interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        # The POD at confidence level is built in getPODCLModel() directly

        # remove the interactive plotting
        plt.ioff()
import openturns as ot
from openturns.viewer import View

# Sobol
d = ot.LowDiscrepancyExperiment(ot.SobolSequence(), ot.ComposedDistribution([ot.Uniform()]*3), 32)
s = d.generate()
s.setDescription(["X1", "X2", "X3"])
g = ot.Graph()
g.setTitle("Low discrepancy experiment")
g.setGridColor("black")
p = ot.Pairs(s)
g.add(p)
View(g)