Пример #1
0
    def _estimKrigingTheta(self, algoKriging, lowerBound, upperBound, size):
        """
        Estimate the kriging theta values with an initial random search using
        a Sobol sequence of size samples.
        """

        if size > 0:
            # create uniform distribution of the parameters bounds
            dim = len(lowerBound)
            distBoundCol = []
            for i in range(dim):
                distBoundCol += [ot.Uniform(lowerBound[i], upperBound[i])]
            distBound = ot.ComposedDistribution(distBoundCol)

            # set the bounds
            searchInterval = ot.Interval(lowerBound, upperBound)
            algoKriging.setOptimizationBounds(searchInterval)
            # Generate starting points with a low discrepancy sequence
            startingPoint = ot.LowDiscrepancyExperiment(
                ot.SobolSequence(), distBound, size).generate()

            algoKriging.setOptimizationAlgorithm(
                ot.MultiStart(ot.TNC(), startingPoint))
        else:
            algoKriging.setOptimizeParameters(False)

        return algoKriging
Пример #2
0
def compute_roughness_sampling(distribution, size=500000):
    """
    Sampling method for computing Roughness
    This allows comparing sampling & integrating methods
    """
    dimension = distribution.getDimension()
    uniformNd = ot.ComposedDistribution([ot.Uniform(0, 1) for i in range(dimension)])
    sequence = ot.SobolSequence(dimension)
    experiment = ot.LowDiscrepancyExperiment(sequence, distribution, size, False)
    sample = experiment.generate()
    pdf = distribution.computePDF(sample)
    return pdf.computeMean()[0]
def computeCrossingProbability_QMC(b, t, mu_S, covariance, R, delta_t, n_block,
                                   n_iter, CoV):
    X, event = getXEvent(b, t, mu_S, covariance, R, delta_t)
    algo = ot.ProbabilitySimulationAlgorithm(
        event,
        ot.LowDiscrepancyExperiment(ot.SobolSequence(X.getDimension()),
                                    n_block, False))
    algo.setBlockSize(n_block)
    algo.setMaximumOuterSampling(n_iter)
    algo.setMaximumCoefficientOfVariation(CoV)
    algo.run()
    return algo.getResult().getProbabilityEstimate() / delta_t
Пример #4
0
    def scrambled_sobol_generate(self):
        """Scrambled Sobol.

        Scramble function as in Owen (1997).
        """
        # Generate sobol sequence
        samples = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                              self.distribution,
                                              self.n_samples)
        r = np.array(samples.generate())

        # Scramble the sequence
        for col in range(self.dim):
            r[:, col] = self.scramble(r[:, col])

        return r
Пример #5
0
    def _generateSample(self, **kwargs):
        """Generation of two samples A and B using diverse methods
        """
        distribution = self.composedDistribution
        if 'method' in kwargs :
            method = kwargs['method']
        else :
            method = 'MonteCarlo'
        N2 = 2 * self.size
        if method == 'MonteCarlo':
            sample = distribution.getSample(N2)
        elif method == 'LHS':
            lhsExp = ot.LHSExperiment(distribution,
                                             N2,
                                             False,  # alwaysShuffle
                                             True)  # randomShift
            sample = lhsExp.generate()
        elif method == 'QMC':
            restart = True
            if 'sequence' in kwargs:
                if kwargs['sequence'] == 'Faure':
                    seq = ot.FaureSequence
                if kwargs['sequence'] == 'Halton':
                    seq = ot.HaltonSequence
                if kwargs['sequence'] == 'ReverseHalton':
                    seq = ot.ReverseHaltonSequence
                if kwargs['sequence'] == 'Haselgrove':
                    seq = ot.HaselgroveSequence
                if kwargs['sequence'] == 'Sobol':
                    seq = ot.SobolSequence
            else:
                print(
'sequence undefined for low discrepancy experiment, default: SobolSequence')
                print(
"'sequence' arguments: 'Faure','Halton','ReverseHalton','Haselgrove','Sobol'")
                seq = ot.SobolSequence
            LDExperiment = ot.LowDiscrepancyExperiment(seq(),
                                                       distribution,
                                                       N2,
                                                       True)
            LDExperiment.setRandomize(False)
            sample = LDExperiment.generate()
        sample = ot.Sample(sample)
        self._sample_A = sample[:self.size, :]
        self._sample_B = sample[self.size:, :]
Пример #6
0
def generateByLowDiscrepancy(distribution, size, computeSecondOrder=False):
    '''
    Generates the input DOE for the estimator of the Sobol' sensitivity 
    indices.
    Uses a Low Discrepancy sequence.
    '''
    dimension = distribution.getDimension()
    # Create a doubled distribution
    marginalList = [distribution.getMarginal(p) for p in range(dimension)]
    twiceDistribution = ot.ComposedDistribution(marginalList*2)
    # Generates a low discrepancy sequence in twice the dimension
    sequence = ot.SobolSequence(2*dimension)
    experiment = ot.LowDiscrepancyExperiment(sequence, twiceDistribution, size)
    fullDesign = experiment.generate()
    # Split the A and B designs
    A = fullDesign[:,0:dimension] # A
    B = fullDesign[:,dimension:2*dimension] # B
    # Uses the kernel to generate the sample
    design = generateSampleKernel(A,B,computeSecondOrder)
    return design
    for j in range(i):
        globalErrorCovariance[i, j] = 1.0 / (1.0 + i + j)
bootstrapSizes = [0, 100]
for bootstrapSize in bootstrapSizes:
    algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate,
                                           priorCovariance, errorCovariance)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result   (Auto)=", algo.getResult().getParameterMAP())
    algo.setOptimizationAlgorithm(
        ot.MultiStart(
            ot.TNC(),
            ot.LowDiscrepancyExperiment(
                ot.SobolSequence(),
                ot.Normal(
                    candidate,
                    ot.CovarianceMatrix(ot.Point(candidate).getDimension())),
                ot.ResourceMap.GetAsUnsignedInteger(
                    "GaussianNonLinearCalibration-MultiStartSize")).generate())
    )
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result    (TNC)=", algo.getResult().getParameterMAP())
    algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate,
                                           priorCovariance,
                                           globalErrorCovariance)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    print("result (Global)=", algo.getResult().getParameterMAP())
Пример #8
0
    def _estimKrigingTheta(self, algoKriging, lowerBound, upperBound, size):
        """
        Estimate the kriging theta values with an initial random search using
        a Sobol sequence of size samples.
        """
        # get input parameters of the kriging algorithm
        X = algoKriging.getInputSample()
        Y = algoKriging.getOutputSample()

        algoKriging.run()
        krigingResult = algoKriging.getResult()
        covarianceModel = krigingResult.getCovarianceModel()
        basis = krigingResult.getBasisCollection()
        if LooseVersion(ot.__version__) == '1.9':
            llf = algoKriging.getReducedLogLikelihoodFunction()
        else:
            llf = algoKriging.getLogLikelihoodFunction()

        # create uniform distribution of the parameters bounds
        dim = len(lowerBound)
        distBoundCol = []
        for i in range(dim):
            distBoundCol += [ot.Uniform(lowerBound[i], upperBound[i])]
        distBound = ot.ComposedDistribution(distBoundCol)

        if size > 0:
            # Generate starting points with a low discrepancy sequence
            thetaStart = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                                     distBound,
                                                     size).generate()
            # Get the best theta from the maximum llf value
            llfValue = llf(thetaStart)
            indexMax = int(np.argmax(llfValue))
            bestTheta = thetaStart[indexMax]

            # update theta after random search
            if LooseVersion(ot.__version__) == '1.6':
                covarianceModel.setScale(bestTheta)
            elif LooseVersion(ot.__version__) > '1.6':
                # optimize theta and sigma in ot 1.8
                covarianceModel.setScale(bestTheta[:-1])
                covarianceModel.setAmplitude([bestTheta[-1]])

        # Now the KrigingAlgorithm is used to optimize the likelihood using a
        # good starting point
        if LooseVersion(ot.__version__) == "1.9":
            algoKriging = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
        else:
            algoKriging = ot.KrigingAlgorithm(X, Y, basis, covarianceModel,
                                              True)

        # set TNC optim
        searchInterval = ot.Interval(lowerBound, upperBound)
        if LooseVersion(ot.__version__) == '1.6':
            optimizer = ot.TNC()
            optimizer.setBoundConstraints(searchInterval)
            algoKriging.setOptimizer(optimizer)
        elif LooseVersion(ot.__version__) in ['1.7', '1.8']:
            optimizer = algoKriging.getOptimizationSolver()
            problem = optimizer.getProblem()
            problem.setBounds(searchInterval)
            optimizer.setProblem(problem)
            algoKriging.setOptimizationSolver(optimizer)
        elif LooseVersion(ot.__version__) == '1.9':
            algoKriging.setOptimizationBounds(searchInterval)

        return algoKriging
# %%
# Sobol' low discrepancy sequence
# -------------------------------

# %%
dim = 2
distribution = ot.ComposedDistribution([ot.Uniform()]*dim)
bounds = distribution.getRange()

# %%
sequence = ot.SobolSequence(dim)

# %%
samplesize = 2**5 # Sobol' sequences are in base 2
experiment = ot.LowDiscrepancyExperiment(sequence, distribution, samplesize, False)
sample = experiment.generate()

# %%
samplesize

# %%
subdivisions = [2**2, 2**1]
fig = otv.PlotDesign(sample, bounds, subdivisions);
fig.set_size_inches(6, 6)

# %%
# We have elementary intervals in 2 dimensions, each having a volume equal to 1/8. Since there are 32 points, the Sobol' sequence is so that each elementary interval contains exactly 32/8 = 4 points. Notice that each elementary interval is closed on the left (or bottom) and open on the right (or top). 

# %%
# Halton low discrepancy sequence
Пример #10
0
    def run(self):
        """
        Launch the algorithm and build the POD models.

        Notes
        -----
        This method launches the iterative algorithm. Once the algorithm stops,
        it builds the POD models : Monte Carlo simulation are performed for each
        defect sizes with the final classifier model. Eventually, the sample is
        used to compute the mean POD and the POD at the confidence level.
        """

        # Create an initial uniform distribution if not given
        if self._distribution is None:
            inputMin = self._input.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = self._input.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [
                ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)
            ]
            self._distribution = ot.ComposedDistribution(marginals)

        # Create the design of experiments of the candidate points where the
        # criterion is computed
        if self._distribution.hasIndependentCopula():
            # without copula use low discrepancy experiment as first doe
            doeCandidate = ot.LowDiscrepancyExperiment(
                ot.SobolSequence(), self._distribution,
                self._candidateSize).generate()
        else:
            # else simple Monte Carlo distribution on Uniform distribution
            doeCandidate = self._distribution.getSample(self._candidateSize)

        doeCandidate = np.array(doeCandidate)
        # build initial classifier model
        # build the kriging model without optimization

        if self._verbose:
            print('Building the classifier')

        n_ini = int(self._input.getSize())
        self._input = np.array(self._input)
        self._signals = np.hstack(self._signals)

        n_added_points = 0
        algo_iteration = 0

        ## Cas de la classif par svc
        if self._classifierType == "svc":
            algo_temp = list(
                map(
                    lambda C, kernel, degree, probability: svm.SVC(
                        C=C,
                        kernel=kernel,
                        degree=degree,
                        probability=probability,
                        coef0=1,
                    ), *self._ClassifierParameters))[0]

        ## Cas de la classif par fro
        if self._classifierType == "rf":
            algo_temp = list(
                map(
                    lambda n_estimators, max_depth, min_samples_split,
                    random_state: ExtraTreesClassifier(
                        n_estimators=n_estimators,
                        max_depth=max_depth,
                        min_samples_split=min_samples_split,
                        random_state=random_state),
                    *self._ClassifierParameters))[0]

        algo_temp.fit(self._input, self._signals)

        list_classifiers = []
        f_iter = algo_temp.predict_proba
        list_classifiers.append(f_iter)
        self._classifierModel = f_iter

        plt.ion()
        # Start the improvment loop
        if self._verbose and self._nMorePoints > 0:
            print('Start the improvement loop')

        while n_added_points < self._nMorePoints:

            # calcul de ce qu il y a dans l' exp de la proba
            probs = f_iter(doeCandidate)[:, 1]

            # recuperation des indices ou la p p_min < proba(x) < p_max
            ind_p1 = np.where(probs < self._pmax)[0]
            ind_p2 = np.where(probs >= self._pmin)[0]
            ind_p = np.intersect1d(ind_p2, ind_p1)
            ind = ind_p

            # s'il n'a pas d indices on elargit p_min = 0.45, p_max=0.55
            if len(ind) == 0:
                ind_p1 = np.where(probs < 0.1)[0]
                ind_p2 = np.where(probs >= 0.8)[0]
                ind_p = np.intersect1d(ind_p2, ind_p1)
                ind = ind_p

            ind_rank = np.argsort(probs[ind])
            quant = [
                0,
                int(len(ind) / 4.),
                int(len(ind) / 2.),
                int(3. * len(ind) / 4.),
                len(ind) - 1
            ]

            ind_bis = ind_rank[quant]
            x_new = doeCandidate[ind[ind_bis], :]
            z_new = np.hstack(self._physicalModel(x_new))

            n_new_temp = len(self._input) + len(x_new)

            # si on depasse le nombre de points, on s arrete
            if n_new_temp > (n_ini + self._nMorePoints):
                x_new = x_new[:self._nMorePoints + n_ini - len(self._input), :]
                z_new = z_new[:self._nMorePoints + n_ini - len(self._input)]

            self._input = np.vstack((self._input, x_new))
            self._signals = np.hstack((self._signals, z_new))

            n_added_points = n_new_temp - n_ini
            algo_iteration = algo_iteration + 1

            if self._classifierType == "svc":
                algo_temp = list(
                    map(
                        lambda C, kernel, degree, probability: svm.SVC(
                            C=C,
                            kernel=kernel,
                            degree=degree,
                            probability=probability,
                            coef0=1), *self._ClassifierParameters))[0]

            if self._classifierType == "rf":
                algo_temp = list(
                    map(
                        lambda n_estimators, max_depth, min_samples_split,
                        random_state: ExtraTreesClassifier(
                            n_estimators=n_estimators,
                            max_depth=max_depth,
                            min_samples_split=min_samples_split,
                            random_state=random_state),
                        *self._ClassifierParameters))[0]

            # Apprentissage avec self._input,self._signals
            algo_temp.fit(self._input, self._signals)

            self._confMat = np.zeros((2, 2))
            for classifier in list_classifiers:
                conf_temp = 1. * confusion_matrix(
                    self._signals,
                    classifier(self._input)[:, 1] >= 0.5)
                conf_temp = 1. * conf_temp / conf_temp.sum(axis=0)
                self._confMat = conf_temp + self._confMat

            self._confMat = 1. * self._confMat / len(list_classifiers)
            classif_algo_temp = algo_temp.predict_proba

            p11 = self._confMat[1, 1]
            p10 = self._confMat[1, 0]

            def agg_classifier(x_in):
                c = p11 - p10
                p1_bayes = 1. / c * (classif_algo_temp(x_in)[:, 1] - p10)
                p1_bayes = np.vstack(
                    np.min(np.array([
                        np.max(np.array([p1_bayes,
                                         np.zeros(len(p1_bayes))]),
                               axis=0),
                        np.ones(len(p1_bayes))
                    ]),
                           axis=0))
                return (np.array([1 - p1_bayes, p1_bayes]).T)[0]

            f_iter = agg_classifier
            list_classifiers.append(f_iter)
            self._classifierModel = f_iter

            if self._verbose:
                updateProgress(n_added_points - 1, self._nMorePoints,
                               'Adding points')

            if self._graph:
                self._PODPerDefect = self._computePOD(self._defectSizes,
                                                      agg_classifier)
                # create the interpolate function of the POD model
                meanPOD = self._PODPerDefect.computeMean()
                interpModel = interp1d(self._defectSizes,
                                       np.array(meanPOD),
                                       kind='linear')
                self._PODmodel = ot.PythonFunction(1, 1, interpModel)
                # The POD at confidence level is built in getPODCLModel() directly
                fig, ax = self.drawPOD(self._probabilityLevel,
                                       self._confidenceLevel)
                plt.draw()
                plt.pause(0.001)
                plt.show()
                if self._graphDirectory is not None:
                    fig.savefig(os.path.join(self._graphDirectory,
                                             'AdaptiveHitMissPOD_') +
                                str(algo_iteration),
                                bbox_inches='tight',
                                transparent=True)

        self._input = ot.NumericalSample(self._input)
        self._signals = ot.NumericalSample(np.vstack(self._signals))
        # Compute the sample predicted for each defect sizes
        self._PODPerDefect = self._computePOD(self._defectSizes,
                                              self._classifierModel)
        # compute the POD for all defect sizes
        meanPOD = self._PODPerDefect.computeMean()
        # create the interpolate function of the POD model
        interpModel = interp1d(self._defectSizes,
                               np.array(meanPOD),
                               kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        # The POD at confidence level is built in getPODCLModel() directly

        # remove the interactive plotting
        plt.ioff()
# %%
vect = ot.RandomVector(distribution)
G = ot.CompositeRandomVector(model, vect)
event = ot.ThresholdEvent(G, ot.Greater(), 0.3)

# %%
# Define the low discrepancy sequence.

# %%
sequence = ot.SobolSequence()

# %%
# Create a simulation algorithm.

# %%
experiment = ot.LowDiscrepancyExperiment(sequence, 1)
experiment.setRandomize(True)
algo = ot.ProbabilitySimulationAlgorithm(event, experiment)
algo.setMaximumCoefficientOfVariation(0.05)
algo.setMaximumOuterSampling(int(1e5))
algo.run()

# %%
# Retrieve results.

# %%
result = algo.getResult()
probability = result.getProbabilityEstimate()
print('Pf=', probability)
Пример #12
0
R = ot.IdentityMatrix(dim)
myDistribution = ot.Normal(mean, sigma, R)

# We create a 'usual' RandomVector from the Distribution
vect = ot.RandomVector(myDistribution)

# We create a composite random vector
output = ot.CompositeRandomVector(myFunction, vect)

# We create an Event from this RandomVector
myEvent = ot.Event(output, ot.Less(), -3.0)

# Monte Carlo
experiments = [ot.MonteCarloExperiment()]
# Quasi Monte Carlo
experiments.append(ot.LowDiscrepancyExperiment())
# Randomized Quasi Monte Carlo
experiment = ot.LowDiscrepancyExperiment()
experiment.setRandomize(True)
experiments.append(experiment)
# Importance sampling
mean[0] = 4.99689645939288809018e+01
mean[1] = 1.84194175946153282375e+00
mean[2] = 1.04454036676956398821e+01
mean[3] = 4.66776215562709406726e+00
myImportance = ot.Normal(mean, sigma, R)
experiments.append(ot.ImportanceSamplingExperiment(myImportance))
# Randomized LHS
experiment = ot.LHSExperiment()
experiment.setAlwaysShuffle(True)
experiments.append(experiment)
Пример #13
0
import math as m
import sys

ot.TESTPREAMBLE()
ot.PlatformInfo.SetNumericalPrecision(3)

m = 10
x = [[0.5 + i] for i in range(m)]

#ot.ResourceMap.SetAsUnsignedInteger( "OptimizationAlgorithm-DefaultMaximumEvaluationNumber", 100)
inVars = ["a", "b", "c", "x"]
formulas = ["a + b * exp(c * x)", "(a * x^2 + b) / (c + x^2)"]
model = ot.SymbolicFunction(inVars, formulas)
p_ref = [2.8, 1.2, 0.5]
params = [0, 1, 2]
modelX = ot.ParametricFunction(model, params, p_ref)
y = modelX(x)
y += ot.Normal([0.0]*2, [0.05]*2, ot.IdentityMatrix(2)).getSample(m)
candidate = [1.0]*3
bootstrapSizes = [0, 100]
for bootstrapSize in bootstrapSizes:
    algo = ot.NonLinearLeastSquaresCalibration(modelX, x, y, candidate)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result (Auto)=", algo.getResult().getParameterMAP())
    algo.setAlgorithm(ot.MultiStart(ot.TNC(), ot.LowDiscrepancyExperiment(ot.SobolSequence(), ot.Normal(candidate, ot.CovarianceMatrix(ot.Point(candidate).getDimension())), ot.ResourceMap.GetAsUnsignedInteger("NonLinearLeastSquaresCalibration-MultiStartSize")).generate()))
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result  (TNC)=", algo.getResult().getParameterMAP())
def mySaltelliSobolRandomizedSequence(distribution, size, model):
    sequence = ot.SobolSequence(distribution.getDimension())
    experiment = ot.LowDiscrepancyExperiment(sequence, distribution, size)
    experiment.setRandomize(True)
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
    return sensitivity_algorithm
def myHaltonLowDiscrepancyExperiment(distribution, size, model):
    sequence = ot.HaltonSequence(distribution.getDimension())
    experiment = ot.LowDiscrepancyExperiment(sequence, distribution, size)
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
    return sensitivity_algorithm
Пример #16
0
        interval_fo_asymptotic = sensitivity_algorithm.getFirstOrderIndicesInterval(
        )
        interval_to_asymptotic = sensitivity_algorithm.getTotalOrderIndicesInterval(
        )
        print("asymptotic intervals:")
        print("First order indices distribution = ",
              sensitivity_algorithm.getFirstOrderIndicesDistribution())
        print("Total order indices distribution = ",
              sensitivity_algorithm.getTotalOrderIndicesDistribution())
        print("First order indices interval = ", interval_fo_asymptotic)
        print("Total order indices interval = ", interval_to_asymptotic)

# with experiment
sequence = ot.SobolSequence(input_dimension)
experiment = ot.LowDiscrepancyExperiment(
    sequence,
    ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * input_dimension), size)
sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
print(sensitivity_algorithm.getFirstOrderIndices())

# multi variate model
model_aggregated = ot.SymbolicFunction(
    ['X1', 'X2', 'X3'],
    ['2*X1 + X2 - 3*X3 + 0.3*X1*X2', '-5*X1 + 4*X2 - 0.8*X2*X3 + 2*X3'])
distribution_aggregated = ot.ComposedDistribution([ot.Uniform()] * 3)
inputDesign = ot.SobolIndicesExperiment(distribution_aggregated,
                                        size).generate()
outputDesign = model_aggregated(inputDesign)
# Case 1 : Estimation of sensitivity using estimator and no bootstrap
for method in methods:
    sensitivity_algorithm = eval(
# %%
# Create the problem and set the optimization algorithm
# -----------------------------------------------------

# %%
problem = ot.OptimizationProblem(rastrigin)

# %%
# We use the :class:`~openturns.Cobyla` algorithm and run it from multiple starting points selected by a :class:`~openturns.LowDiscrepancyExperiment`.

# %%
size = 64
distribution = ot.ComposedDistribution(
    [ot.Uniform(lowerbound[0], upperbound[0])] * dim)
experiment = ot.LowDiscrepancyExperiment(
    ot.SobolSequence(), distribution, size)
solver = ot.MultiStart(ot.Cobyla(problem), experiment.generate())

# %%
# Visualize the starting points of the optimization algorithm
# -----------------------------------------------------------

# %%
startingPoints = solver.getStartingSample()
graph = rastrigin.draw(lowerbound, upperbound, [100]*dim)
graph.setTitle("Rastrigin function")
cloud = ot.Cloud(startingPoints)
cloud.setPointStyle("bullet")
cloud.setColor("black")
graph.add(cloud)
graph.setLegends([""])
Пример #18
0
    def __init__(self, n_samples, bounds, kind, dists=None, discrete=None):
        """Initialize the DOE generation.

        In case of :attr:`kind` is ``uniform``, :attr:`n_samples` is decimated
        in order to have the same number of points in all dimensions.

        If :attr:`kind` is ``discrete``, a join distribution between a discrete
        uniform distribution is made with continuous distributions.

        Another possibility is to set a list of PDF to sample from. Thus one
        can do: `dists=['Uniform(15., 60.)', 'Normal(4035., 400.)']`. If not
        set, uniform distributions are used.

        :param int n_samples: number of samples.
        :param array_like bounds: Space's corners [[min, n dim], [max, n dim]]
        :param str kind: Sampling Method if string can be one of
          ['halton', 'sobol', 'faure', '[o]lhs[c]', 'sobolscramble', 'uniform',
          'discrete'] otherwize can be a list of openturns distributions.
        :param lst(str) dists: List of valid openturns distributions as string.
        :param int discrete: Position of the discrete variable.
        """
        self.n_samples = n_samples
        self.bounds = np.asarray(bounds)
        self.kind = kind
        self.dim = self.bounds.shape[1]

        self.scaler = preprocessing.MinMaxScaler()
        self.scaler.fit(self.bounds)

        if dists is None:
            dists = [ot.Uniform(float(self.bounds[0][i]),
                                float(self.bounds[1][i]))
                     for i in range(self.dim)]
        else:
            dists = bat.space.dists_to_ot(dists)

        if discrete is not None:
            # Creating uniform discrete distribution for OT
            disc_list = [[i] for i in range(int(self.bounds[0, discrete]),
                                            int(self.bounds[1, discrete] + 1))]
            disc_dist = ot.UserDefined(disc_list)

            dists.pop(discrete)
            dists.insert(discrete, disc_dist)

        # Join distribution
        self.distribution = ot.ComposedDistribution(dists)

        if self.kind == 'halton':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.HaltonSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'sobol':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'faure':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.FaureSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif (self.kind == 'lhs') or (self.kind == 'lhsc'):
            self.sequence_type = ot.LHSExperiment(self.distribution, self.n_samples)
        elif self.kind == 'olhs':
            lhs = ot.LHSExperiment(self.distribution, self.n_samples)
            self.sequence_type = ot.SimulatedAnnealingLHS(lhs, ot.GeometricProfile(),
                                                          ot.SpaceFillingC2())
        elif self.kind == 'saltelli':
            # Only relevant for computation of Sobol' indices
            size = self.n_samples // (2 * self.dim + 2)  # N(2*dim + 2)
            self.sequence_type = ot.SobolIndicesExperiment(self.distribution,
                                                           size, True).generate()
Пример #19
0
    def run(self):
        """
        Launch the algorithm and build the POD models.

        Notes
        -----
        This method launches the iterative algorithm. First the censored data
        are filtered if needed. The Box Cox transformation is performed if it is
        enabled. Then the enrichment of the design of experiments is performed.
        Once the algorithm stops, it builds the POD models : conditional samples are 
        simulated for each defect size, then the distributions of the probability
        estimator (for MC simulation) are built. Eventually, a sample of this
        distribution is used to compute the mean POD and the POD at the confidence
        level.
        """

        # Create an initial uniform distribution if not given
        if self._distribution is None:
            inputMin = self._input.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = self._input.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)]
            self._distribution = ot.ComposedDistribution(marginals)

        # Create the design of experiments of the candidate points where the
        # criterion is computed
        if self._distribution.hasIndependentCopula():
            # without copula use low discrepancy experiment as first doe
            doeCandidate = ot.LowDiscrepancyExperiment(ot.SobolSequence(), 
                            self._distribution, self._candidateSize).generate()
        else:
            # else simple Monte Carlo distribution
            doeCandidate = self._distribution.getSample(self._candidateSize)

        # build initial kriging model
        # build the kriging model without optimization
        algoKriging = self._buildKrigingAlgo(self._input, self._signals)
        if self._verbose:
            print('Building the kriging model')
            print('Optimization of the covariance model parameters...')

        if LooseVersion(ot.__version__) == '1.9':
            llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension()
        else:
            llDim = algoKriging.getLogLikelihoodFunction().getInputDimension()
        lowerBound = [0.001] * llDim
        upperBound = [50] * llDim               
        algoKriging = self._estimKrigingTheta(algoKriging,
                                              lowerBound, upperBound,
                                              self._initialStartSize)
        algoKriging.run()

        # Get kriging results
        self._krigingResult = algoKriging.getResult()
        self._covarianceModel = self._krigingResult.getCovarianceModel()
        self._basis = self._krigingResult.getBasisCollection()
        metamodel = self._krigingResult.getMetaModel()

        self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult)
        if self._verbose:
            print('Kriging validation Q2 (>0.9): {:0.4f}\n'.format(self._Q2))

        plt.ion()
        # Start the improvment loop
        iteration = 0
        while iteration < self._nIteration:
            iteration += 1
            if self._verbose:
                print('Iteration : {}/{}'.format(iteration, self._nIteration))

            # compute POD (ptrue = pn-1) for bias reducing in the criterion
            # Monte Carlo for all defect sizes in a vectorized way.
            # get Sample for all parameters except the defect size
            samplePred = self._distribution.getSample(self._samplingSize)[:,1:]
            fullSamplePred = ot.NumericalSample(self._samplingSize * self._defectNumber,
                                                self._dim)
            # Add the defect sizes as first value 
            for i, defect in enumerate(self._defectSizes):
                fullSamplePred[self._samplingSize*i:self._samplingSize*(i+1), :] = \
                                        self._mergeDefectInX(defect, samplePred)
            meanPredictionSample = metamodel(fullSamplePred)
            meanPredictionSample = np.reshape(meanPredictionSample, (self._samplingSize,
                                                    self._defectNumber), 'F')
            # compute the POD for all defect sizes
            currentPOD = np.mean(meanPredictionSample > self._detectionBoxCox, axis=0)

            # Compute criterion for all candidate in the candidate doe
            criterion = 1000000000
            for icand, candidate in enumerate(doeCandidate):

                # add the current candidate to the kriging doe
                inputAugmented = self._input[:]
                inputAugmented.add(candidate)
                signalsAugmented = self._signals[:]
                # predict the signal value of the candidate using the current
                # kriging model
                signalsAugmented.add(metamodel(candidate))
                # create a temporary kriging model with the new doe and without
                # updating the covariance model parameters
                if LooseVersion(ot.__version__) == '1.9':
                    algoKrigingTemp = ot.KrigingAlgorithm(inputAugmented, signalsAugmented,
                                                          self._covarianceModel,
                                                          self._basis,
                                                          True)
                else:
                    algoKrigingTemp = ot.KrigingAlgorithm(inputAugmented, signalsAugmented,
                                                          self._basis,
                                                          self._covarianceModel,
                                                          True)
                if LooseVersion(ot.__version__) > '1.6':
                    optimizer = algoKrigingTemp.getOptimizationSolver()
                    optimizer.setMaximumIterationNumber(0)
                    algoKrigingTemp.setOptimizationSolver(optimizer)

                algoKrigingTemp.run()
                krigingResultTemp = algoKrigingTemp.getResult()

                # compute the criterion for all defect size
                crit = []
                # save results, used to compute the PODModel et PODCLModel
                PODPerDefect = ot.NumericalSample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
                for idef, defect in enumerate(self._defectSizes):
                    podSample = self._computePODSamplePerDefect(defect,
                        self._detectionBoxCox, krigingResultTemp,
                        self._distribution, self._simulationSize, self._samplingSize)
                    PODPerDefect[:, idef] = podSample

                    meanPOD = podSample.computeMean()[0]
                    varPOD = podSample.computeVariance()[0]
                    crit.append(varPOD + (meanPOD - currentPOD[idef])**2)
                # compute the criterion aggregated for all defect sizes
                newCriterion = np.sqrt(np.mean(crit))

                # check if the result is better or not
                if newCriterion < criterion:
                    self._PODPerDefect = PODPerDefect
                    criterion = newCriterion
                    indexOpt = icand
                
                if self._verbose:
                    updateProgress(icand, int(doeCandidate.getSize()), 'Computing criterion')

            # get the best candidate
            candidateOpt = doeCandidate[indexOpt]
            # add new point to DOE
            self._input.add(candidateOpt)
            # add the signal computed by the physical model
            if self._boxCox:
                self._signals.add(self._boxCoxTransform(self._physicalModel(candidateOpt)))
            else:
                self._signals.add(self._physicalModel(candidateOpt))
            # remove added candidate from the doeCandidate
            doeCandidate.erase(indexOpt)
            if self._verbose:
                print('Criterion value : {:0.4f}'.format(criterion))
                print('Added point : {}'.format(candidateOpt))
                print('Update the kriging model')

            # update the kriging model without optimization
            algoKriging = self._buildKrigingAlgo(self._input, self._signals)
            if LooseVersion(ot.__version__) == '1.7':
                optimizer = algoKriging.getOptimizationSolver()
                optimizer.setMaximumIterationNumber(0)
                algoKriging.setOptimizationSolver(optimizer)
            elif LooseVersion(ot.__version__) == '1.8':
                algoKriging.setOptimizeParameters(False)

            algoKriging.run()

            self._Q2 = self._computeQ2(self._input, self._signals, algoKriging.getResult())

            # Check the quality of the kriging model if it needs optimization
            if self._Q2 < 0.95:
                if self._verbose:
                    print('Optimization of the covariance model parameters...')

                if LooseVersion(ot.__version__) == '1.9':
                    llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension()
                else:
                    llDim = algoKriging.getLogLikelihoodFunction().getInputDimension()
                lowerBound = [0.001] * llDim
                upperBound = [50] * llDim               
                algoKriging = self._estimKrigingTheta(algoKriging,
                                                      lowerBound, upperBound,
                                                      self._initialStartSize)
                algoKriging.run()

            # Get kriging results
            self._krigingResult = algoKriging.getResult()
            self._covarianceModel = self._krigingResult.getCovarianceModel()
            self._basis = self._krigingResult.getBasisCollection()
            metamodel = self._krigingResult.getMetaModel()

            self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult)
            if self._verbose:
                print('Kriging validation Q2 (>0.9): {:0.4f}'.format(self._Q2))

            if self._graph:
                # create the interpolate function of the POD model
                meanPOD = self._PODPerDefect.computeMean()
                interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
                self._PODmodel = ot.PythonFunction(1, 1, interpModel)
                # The POD at confidence level is built in getPODCLModel() directly
                fig, ax = self.drawPOD(self._probabilityLevel, self._confidenceLevel)
                plt.draw()
                plt.pause(0.001)
                plt.show()
                if self._graphDirectory is not None:
                    fig.savefig(os.path.join(self._graphDirectory, 'AdaptiveSignalPOD_')+str(iteration),
                                bbox_inches='tight', transparent=True)

        # Compute the final POD with the last updated kriging model
        if self._verbose:
                print('\nStart computing the POD with the last updated kriging model')
        # compute the sample containing the POD values for all defect 
        self._PODPerDefect = ot.NumericalSample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
        for i, defect in enumerate(self._defectSizes):
            self._PODPerDefect[:, i] = self._computePODSamplePerDefect(defect,
                self._detectionBoxCox, self._krigingResult, self._distribution,
                self._simulationSize, self._samplingSize)
            if self._verbose:
                updateProgress(i, self._defectNumber, 'Computing POD per defect')

        # compute the mean POD 
        meanPOD = self._PODPerDefect.computeMean()
        # create the interpolate function of the POD model
        interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        # The POD at confidence level is built in getPODCLModel() directly

        # remove the interactive plotting
        plt.ioff()
import openturns as ot
from openturns.viewer import View

# Sobol
d = ot.LowDiscrepancyExperiment(ot.SobolSequence(), ot.ComposedDistribution([ot.Uniform()]*3), 32)
s = d.generate()
s.setDescription(["X1", "X2", "X3"])
g = ot.Graph()
g.setTitle("Low discrepancy experiment")
g.setGridColor("black")
p = ot.Pairs(s)
g.add(p)
View(g)