Exemple #1
0
 def test_getSample_getMean(self):
     """Test InverseWishart.getSample and InverseWishart.getMean"""
     d, Scale, DoF, N = self.dimension, self.Scale, self.DoF, int(1E+5)
     Identity = ot.CovarianceMatrix(d)
     Scale_wishart = ot.CovarianceMatrix(Scale.solveLinearSystem(Identity))
     inverse_wishart = ot.InverseWishart(Scale, DoF)
     sample_inverse = ot.NumericalSample(N, (d * (d + 1)) // 2)
     sample = ot.NumericalSample(N, (d * (d + 1)) // 2)
     for i in range(N):
         M_inverse = inverse_wishart.getRealizationAsMatrix()
         M = M_inverse.solveLinearSystem(Identity)
         indice = 0
         for j in range(d):
             for k in range(j + 1):
                 sample_inverse[i, indice] = M_inverse[k, j]
                 sample[i, indice] = M[k, j]
                 indice += 1
     mean_inverse = sample_inverse.computeMean()
     mean = sample.computeMean()
     theoretical_mean_inverse = inverse_wishart.getMean()
     theoretical_mean = (ot.Wishart(Scale_wishart, DoF)).getMean()
     indice, coefficient = 0, 1. / (DoF - d - 1)
     for j in range(d):
         for k in range(j + 1):
             assert_almost_equal(theoretical_mean_inverse[indice],
                                 coefficient * Scale[k, j])
             assert_almost_equal(theoretical_mean[indice],
                                 DoF * Scale_wishart[k, j])
             assert_almost_equal(mean_inverse[indice],
                                 coefficient * Scale[k, j], 0.1, 1.E-3)
             assert_almost_equal(mean[indice], DoF * Scale_wishart[k, j],
                                 0.1, 1.E-3)
             indice += 1
Exemple #2
0
    def _exec_sample(self, X):
        samplingSize = X.getSize()

        # create sample containing all input combined with all defect sizes
        fullX = ot.NumericalSample(samplingSize * self.defectNumber,
                                   self.dim + 1)
        for i, x in enumerate(X):
            x = np.array(x, ndmin=2)
            x = x.repeat(self.defectNumber, axis=0)
            xWitha = np.concatenate((np.vstack(self.defectSizes), x), axis=1)
            fullX[self.defectNumber * i:self.defectNumber *
                  (i + 1), :] = xWitha

        # add randomness from the residual, identical for all defect size
        residualsSample = ot.Normal(samplingSize).getSample(
            self.simulationSize) * self.chaosPOD._stderr
        fullRes = ot.NumericalSample(self.simulationSize,
                                     samplingSize * self.defectNumber)
        for i in range(samplingSize):
            fullRes[:, self.defectNumber * i:self.defectNumber *
                    (i + 1)] = np.repeat(residualsSample[:, i],
                                         self.defectNumber,
                                         axis=1)
        fullRes = np.transpose(fullRes)

        # compute the signal
        Y = np.array(self.chaosFunction(fullX))

        # compute the POD
        prob = np.mean((Y + fullRes) > self.detection, axis=1)
        prob = prob.reshape(samplingSize, self.defectNumber)
        return prob
Exemple #3
0
    def filterCensoredData(inputSample, signals, noiseThres, saturationThres):
        """
        Sort inputSample and signals with respect to the censore thresholds.

        Parameters
        ----------
        inputSample : 2-d sequence of float
            Vector of the input sample.
        signals : 2-d sequence of float
            Vector of the signals, of dimension 1.
        noiseThres : float
            Value for low censored data. Default is None.
        saturationThres : float
            Value for high censored data. Default is None

        Returns
        -------
        inputSampleUnc : 2-d sequence of float
            Vector of the input sample in the uncensored area.
        inputSampleNoise : 2-d sequence of float
            Vector of the input sample in the noisy area.
        inputSampleSat : 2-d sequence of float
            Vector of the input sample in the saturation area.
        signalsUnc : 2-d sequence of float
            Vector of the signals in the uncensored area.

        Notes
        -----
        The data are sorted in three different vectors whether they belong to
        the noisy area, the uncensored area or the saturation area.
        """
        # check if one sided censoring
        if noiseThres is None:
            noiseThres = -ot.sys.float_info.max
        if saturationThres is None:
            saturationThres = ot.sys.float_info.max

        # transform in numpy.array
        inputSample = np.array(inputSample)
        signals = np.array(signals)
        # inputSample in the uncensored area
        inputSampleUnc = inputSample[np.hstack(np.logical_and(signals > noiseThres, 
                                            signals < saturationThres))]
        # inputSample in the noisy area
        inputSampleNoise = inputSample[np.hstack(signals <= noiseThres)]
        # inputSample in the saturation area
        inputSampleSat = inputSample[np.hstack(signals >= saturationThres)]
        # signals in the uncensored area
        signalsUnc = signals[np.hstack(np.logical_and(signals > noiseThres,
                                            signals < saturationThres))]

        # transform in numericalSample
        inputSampleUnc = ot.NumericalSample(inputSampleUnc)
        inputSampleNoise = ot.NumericalSample(inputSampleNoise)
        inputSampleSat = ot.NumericalSample(inputSampleSat)
        signalsUnc = ot.NumericalSample(signalsUnc)

        return inputSampleUnc, inputSampleNoise, inputSampleSat, signalsUnc
 def _mergeDefectInX(self, defect, X):
     """
     defect : scalar of the defect value
     X : sample without the defect column
     """
     size = X.getSize()
     dim = X.getDimension() + 1
     samplePred = ot.NumericalSample(size, dim)
     samplePred[:, 0] = ot.NumericalSample(size, [defect])
     samplePred[:, 1:] = X
     return samplePred
Exemple #5
0
    def __init__(self,
                 inputSample,
                 outputSample,
                 noiseThres=None,
                 saturationThres=None,
                 resDistFact=None,
                 boxCox=False):

        self._inputSample = ot.NumericalSample(np.vstack(inputSample))
        self._outputSample = ot.NumericalSample(np.vstack(outputSample))
        self._noiseThres = noiseThres
        self._saturationThres = saturationThres
        # Add flag to tell if censored data must taken into account or not.
        if noiseThres is not None or saturationThres is not None:
            # flag to tell censoring is enabled
            self._censored = True
            # Results instances are created for both cases.
            self._resultsCens = _Results()
            self._resultsUnc = _Results()
        else:
            self._censored = False
            # Results instance is created only for uncensored case.
            self._resultsUnc = _Results()

        if resDistFact is None:
            # default is NormalFactory
            self._resDistFact = ot.NormalFactory()
        else:
            self._resDistFact = resDistFact

        # if Box Cox is a float the transformation is enabled with the given value
        if type(boxCox) is float:
            self._lambdaBoxCox = boxCox
            self._boxCox = True
        else:
            self._lambdaBoxCox = None
            self._boxCox = boxCox

        self._size = self._inputSample.getSize()
        self._dim = self._inputSample.getDimension()

        # Assertions on parameters
        assert (self._size >= 3), "Not enough observations."
        assert (self._size == self._outputSample.getSize()), \
                "InputSample and outputSample must have the same size."
        assert (self._dim == 1), "Dimension of inputSample must be 1."
        assert (self._outputSample.getDimension() == 1
                ), "Dimension of outputSample must be 1."

        # run the analysis
        self._run()
        # print warnings
        self._printWarnings()
Exemple #6
0
    def _computePOD(self, defectSizes, coefs):
        """
        Compute the POD for all defect sizes in a vectorized way.
        """
        # create the input sample that must be computed by the metamodels
        samplePred = self._distribution.getSample(self._samplingSize)[:, 1:]
        fullSamplePred = ot.NumericalSample(
            self._samplingSize * self._defectNumber, self._dim)
        for i, defect in enumerate(defectSizes):
            fullSamplePred[self._samplingSize*i:self._samplingSize*(i+1), :] = \
                                    self._mergeDefectInX(defect, samplePred)

        # create the chaos function for user defined coefs
        chaosFunction = self._buildChaosFunction(self._reducedBasis,
                                                 self._transformation, coefs)

        # add the randomness from the residuals
        residualsSample = self._normalDist.getSample(self._samplingSize * \
                                             self._defectNumber) * self._stderr
        chaosRandomSample = chaosFunction(fullSamplePred) + residualsSample
        chaosRandomSample = np.reshape(
            chaosRandomSample, (self._samplingSize, self._defectNumber), 'F')

        # compute the POD for all defect sizes
        POD = np.mean(chaosRandomSample > self._detectionBoxCox, axis=0)

        return POD
Exemple #7
0
    def _PODgaussModelCl(self, defects, intercept, slope, stderr, detection):

        class buildPODModel():
            def __init__(self, intercept, slope, sigmaEpsilon, detection):

                self.intercept = intercept
                self.slope = slope
                self.sigmaEpsilon = sigmaEpsilon
                self.detection = detection

            def PODmodel(self, x):
                t = (self.detection - (self.intercept + 
                              self.slope * x)) / self.sigmaEpsilon
                return ot.DistFunc.pNormal(t,True)

        N = defects.getSize()
        X = ot.NumericalSample(N, [1, 0])
        X[:, 1] = defects
        X = ot.Matrix(X)
        covMatrix = X.computeGram(True).solveLinearSystem(ot.IdentityMatrix(2))
        sampleNormal = ot.Normal([0,0], ot.CovarianceMatrix(
                    covMatrix.getImplementation())).getSample(self._simulationSize)
        sampleSigmaEpsilon = (ot.Chi(N-2).inverse()*np.sqrt(N-2)*stderr).getSample(
                                                            self._simulationSize)

        PODcoll = []
        for i in range(self._simulationSize):
            sigmaEpsilon = sampleSigmaEpsilon[i][0]
            interceptSimu = sampleNormal[i][0] * sigmaEpsilon + intercept
            slopeSimu = sampleNormal[i][1] * sigmaEpsilon + slope
            PODcoll.append(buildPODModel(interceptSimu, slopeSimu, sigmaEpsilon,
                                         detection).PODmodel)
        return PODcoll
    def _computePOD(self, defectSizes, algoClassifier):
        """
        Compute the POD sample for all defect sizes in a vectorized way.
        """
        # create the input sample that must be computed by the metamodels
        samplePred = self._distribution.getSample(self._samplingSize)[:, 1:]
        fullSamplePred = ot.NumericalSample(
            self._samplingSize * self._defectNumber, self._dim)
        for i, defect in enumerate(defectSizes):
            fullSamplePred[self._samplingSize*i:self._samplingSize*(i+1), :] = \
                                    self._mergeDefectInX(defect, samplePred)

        classifierSample = algoClassifier(np.array(fullSamplePred))[:, 1]
        classifierSample = np.reshape(classifierSample,
                                      (self._samplingSize, self._defectNumber),
                                      'F')
        return ot.NumericalSample(classifierSample)
Exemple #9
0
    def __init__(self,
                 inputSample=None,
                 outputSample=None,
                 detection=None,
                 noiseThres=None,
                 saturationThres=None,
                 boxCox=False):

        self._inputSample = ot.NumericalSample(np.vstack(inputSample))
        self._signals = ot.NumericalSample(np.vstack(outputSample))
        self._detection = detection
        self._noiseThres = noiseThres
        self._saturationThres = saturationThres
        self._boxCox = boxCox

        # Add flag to tell if censored data must taken into account or not.
        if self._noiseThres is not None or self._saturationThres is not None:
            # flag to tell censoring is enabled
            self._censored = True
        else:
            self._censored = False

        self._dim = self._inputSample.getDimension()

        self._verbose = True
        self._simulationSize = 1000
        self._samplingSize = 5000

        self._PODgauss = None
        self._PODbin = None
        self._PODks = None
        self._PODqr = None
        self._PODchaos = None
        self._PODkriging = None

        self._activeMethods = {
            'LinearGauss': True,
            'LinearBinomial': True,
            'LinearKernelSmoothing': True,
            'QuantileRegression': True,
            'PolynomialChaos': True,
            'Kriging': True
        }
Exemple #10
0
    def getP7History(self):
        """
        Accessor to the history of problem evaluations.

        Returns
        -------
        p7result: :class:`~openturns.NumericalSample`
            Returns values of variables and evaluation results.
            Each element of the top-level list is one evaluated point.
            Nested list structure is [variables, objectives, constraints, objective gradients, constraint gradients].
            Gradients are added only if analytical gradients are enabled.
        """
        return ot.NumericalSample(self.__p7_history)
Exemple #11
0
    def __init__(self, inputSample, outputSample, detection, noiseThres,
                 saturationThres, boxCox):

        self._simulationSize = 1000

        # inherited attributes
        self._inputSample = ot.NumericalSample(np.vstack(inputSample))
        self._outputSample = ot.NumericalSample(np.vstack(outputSample))
        self._detection = detection
        self._noiseThres = noiseThres
        self._saturationThres = saturationThres

        # if Box Cox is a float the transformation is enabled with the given value
        if type(boxCox) is float:
            self._lambdaBoxCox = boxCox
            self._boxCox = True
        else:
            self._lambdaBoxCox = None
            self._boxCox = boxCox

        self._size = self._inputSample.getSize()
        self._dim = self._inputSample.getDimension()

        #################### check attributes for censoring ####################
        # Add flag to tell if censored data must taken into account or not.
        if self._noiseThres is not None or self._saturationThres is not None:
            # flag to tell censoring is enabled
            self._censored = True
        else:
            self._censored = False

        # Assertions on parameters
        assert (self._size >= 3), "Not enough observations."
        assert (self._size == self._outputSample.getSize()), \
                "InputSample and outputSample must have the same size."
        assert (self._outputSample.getDimension() == 1
                ), "Dimension outputSample must be 1."
Exemple #12
0
    def _exec(self, X):
        inputTG = X.getTimeGrid()
        inputValues = X.getValues()
        f = ot.NumericalMathFunction(ot.PiecewiseLinearEvaluationImplementation(
            [x[0] for x in inputTG.getVertices()], inputValues))
        outputValues = ot.NumericalSample(0, 1)
        for t in self.outputGrid_.getVertices():
            kernel = ot.Normal(t[0], 0.05)

            def pdf(X):
                return [kernel.computePDF(X)]
            weight = ot.NumericalMathFunction(ot.PythonFunction(1, 1, pdf))
            outputValues.add(self.algo_.integrate(
                weight * f, kernel.getRange()))
        return ot.Field(self.outputGrid_, outputValues)
Exemple #13
0
 def _PODgaussModel(self, defects, stderr, linearModel):
     X = ot.NumericalSample(defects.getSize(), [1, 0])
     X[:, 1] = defects
     X = ot.Matrix(X)
     # compute the prediction variance of the linear regression model
     def predictionVariance(x):
         Y = ot.NumericalPoint([1.0, x])
         gramX = X.computeGram()
         return stderr**2 * (1. + ot.dot(Y, gramX.solveLinearSystem(Y)))
     # function to compute the POD(defect)
     def PODmodel(x):
         t = (self._detectionBoxCox - linearModel(x[0])) / np.sqrt(predictionVariance(x[0]))
         # DistFunc.pNormal(t,True) = complementary CDF of the Normal(0,1)
         return [ot.DistFunc.pNormal(t,True)]
     return PODmodel
Exemple #14
0
    def _buildChaosAlgo(self, inputSample, outputSample):
        """
        Build the functional chaos algorithm without running it.
        """
        if self._distribution is None:
            # create default distribution : Uniform between min and max of the
            # input sample
            inputSample = ot.NumericalSample(inputSample)
            inputMin = inputSample.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = inputSample.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [
                ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)
            ]
            self._distribution = ot.ComposedDistribution(marginals)

        # put description of the inputSample into decription of the distribution
        self._distribution.setDescription(inputSample.getDescription())

        if self._adaptiveStrategy is None:
            # Create the adaptive strategy : default is fixed strategy of degree 5
            # with linear enumerate function
            polyCol = [0.] * self._dim
            for i in range(self._dim):
                polyCol[i] = ot.StandardDistributionPolynomialFactory(
                    self._distribution.getMarginal(i))

            enumerateFunction = ot.EnumerateFunction(self._dim)
            multivariateBasis = ot.OrthogonalProductPolynomialFactory(
                polyCol, enumerateFunction)
            # default degree is 3 (in __init__)
            indexMax = enumerateFunction.getStrataCumulatedCardinal(
                self._degree)
            self._adaptiveStrategy = ot.FixedStrategy(multivariateBasis,
                                                      indexMax)

        if self._projectionStrategy is None:
            # sparse polynomial chaos
            basis_sequence_factory = ot.LAR()
            fitting_algorithm = ot.KFold()
            approximation_algorithm = ot.LeastSquaresMetaModelSelectionFactory(
                basis_sequence_factory, fitting_algorithm)
            self._projectionStrategy = ot.LeastSquaresStrategy(
                inputSample, outputSample, approximation_algorithm)

        return ot.FunctionalChaosAlgorithm(inputSample, outputSample, \
                self._distribution, self._adaptiveStrategy, self._projectionStrategy)
Exemple #15
0
            def function_intersection(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.prod(axis=1)).T
Exemple #16
0
    def _PODbootstrapModelCl(self):

        class buildPODModel():
            def __init__(self, inputSample, outputSample, detection, noiseThres,
                            saturationThres, resDistFact, boxCox, censored):

                results = _computeLinearModel(inputSample, outputSample, detection,
                                        noiseThres, saturationThres, boxCox, censored)

                self.intercept = results['intercept']
                self.slope = results['slope']
                self.residuals = results['residuals']
                self.detectionBoxCox = results['detection']
                self.resDist = resDistFact.build(self.residuals)

            def PODmodel(self, x):
                defectThres = self.detectionBoxCox - (self.intercept + 
                              self.slope * x)
                return self.resDist.computeComplementaryCDF(defectThres)


        data = ot.NumericalSample(self._size, 2)
        data[:, 0] = self._inputSample
        data[:, 1] = self._outputSample
        # bootstrap of the data
        bootstrapExp = ot.BootstrapExperiment(data)
        PODcoll = []
        for i in range(self._simulationSize):
        # generate a sample with replacement within data of the same size
            bootstrapData = bootstrapExp.generate()
            # compute the linear models
            model = buildPODModel(bootstrapData[:,0], bootstrapData[:,1],
                                  self._detection, self._noiseThres,
                                  self._saturationThres, self._resDistFact,
                                  self._boxCox, self._censored)

            PODcoll.append(model.PODmodel)
            if self._verbose:
                updateProgress(i, self._simulationSize, 'Computing POD (bootstrap)')

        return PODcoll
Exemple #17
0
    def getResiduals(self):
        """
        Accessor to the residuals. 

        Returns
        -------
        residuals : :py:class:`openturns.NumericalSample`
            The residuals computed from the uncensored and censored linear
            regression model. The first column corresponds with the uncensored case.
        """
        size = self._resultsUnc.residuals.getSize()
        if self._censored:
            residuals = ot.NumericalSample(size, 2)
            residuals[:, 0] = self._resultsUnc.residuals
            residuals[:, 1] = self._resultsCens.residuals
            residuals.setDescription([
                'Residuals for uncensored case', 'Residuals for censored case'
            ])
        else:
            residuals = self._resultsUnc.residuals
            residuals.setDescription(['Residuals for uncensored case'])

        return residuals
    files_to_send=[program],
    tmpdir=tmpdir,
    user_data=data)

if test_analytical:
    dist_func.set_separate_workdir(False)

if 'win' not in sys.platform:
    # change group pid in order to avoid wrapper_launcher destroying parent process
    # when interrupting
    os.setpgid(0, 0)

model = ot.NumericalMathFunction(dist_func)

# create sample
inS = ot.NumericalSample(sample_size, 4)
if not make_error:
    F = 2
else:
    F = 666
for i in range(sample_size):
    inS[i, 0] = i + 1
    inS[i, 1] = F
    inS[i, 2] = work_time
    inS[i, 3] = nb_output

print('Compute')
if make_error:
    try:
        outS = model(inS)
    except:
    def run(self):
        """
        Launch the algorithm and build the POD models.

        Notes
        -----
        This method launches the iterative algorithm. Once the algorithm stops,
        it builds the POD models : Monte Carlo simulation are performed for each
        defect sizes with the final classifier model. Eventually, the sample is
        used to compute the mean POD and the POD at the confidence level.
        """

        # Create an initial uniform distribution if not given
        if self._distribution is None:
            inputMin = self._input.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = self._input.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [
                ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)
            ]
            self._distribution = ot.ComposedDistribution(marginals)

        # Create the design of experiments of the candidate points where the
        # criterion is computed
        if self._distribution.hasIndependentCopula():
            # without copula use low discrepancy experiment as first doe
            doeCandidate = ot.LowDiscrepancyExperiment(
                ot.SobolSequence(), self._distribution,
                self._candidateSize).generate()
        else:
            # else simple Monte Carlo distribution on Uniform distribution
            doeCandidate = self._distribution.getSample(self._candidateSize)

        doeCandidate = np.array(doeCandidate)
        # build initial classifier model
        # build the kriging model without optimization

        if self._verbose:
            print('Building the classifier')

        n_ini = int(self._input.getSize())
        self._input = np.array(self._input)
        self._signals = np.hstack(self._signals)

        n_added_points = 0
        algo_iteration = 0

        ## Cas de la classif par svc
        if self._classifierType == "svc":
            algo_temp = list(
                map(
                    lambda C, kernel, degree, probability: svm.SVC(
                        C=C,
                        kernel=kernel,
                        degree=degree,
                        probability=probability,
                        coef0=1,
                    ), *self._ClassifierParameters))[0]

        ## Cas de la classif par fro
        if self._classifierType == "rf":
            algo_temp = list(
                map(
                    lambda n_estimators, max_depth, min_samples_split,
                    random_state: ExtraTreesClassifier(
                        n_estimators=n_estimators,
                        max_depth=max_depth,
                        min_samples_split=min_samples_split,
                        random_state=random_state),
                    *self._ClassifierParameters))[0]

        algo_temp.fit(self._input, self._signals)

        list_classifiers = []
        f_iter = algo_temp.predict_proba
        list_classifiers.append(f_iter)
        self._classifierModel = f_iter

        plt.ion()
        # Start the improvment loop
        if self._verbose and self._nMorePoints > 0:
            print('Start the improvement loop')

        while n_added_points < self._nMorePoints:

            # calcul de ce qu il y a dans l' exp de la proba
            probs = f_iter(doeCandidate)[:, 1]

            # recuperation des indices ou la p p_min < proba(x) < p_max
            ind_p1 = np.where(probs < self._pmax)[0]
            ind_p2 = np.where(probs >= self._pmin)[0]
            ind_p = np.intersect1d(ind_p2, ind_p1)
            ind = ind_p

            # s'il n'a pas d indices on elargit p_min = 0.45, p_max=0.55
            if len(ind) == 0:
                ind_p1 = np.where(probs < 0.1)[0]
                ind_p2 = np.where(probs >= 0.8)[0]
                ind_p = np.intersect1d(ind_p2, ind_p1)
                ind = ind_p

            ind_rank = np.argsort(probs[ind])
            quant = [
                0,
                int(len(ind) / 4.),
                int(len(ind) / 2.),
                int(3. * len(ind) / 4.),
                len(ind) - 1
            ]

            ind_bis = ind_rank[quant]
            x_new = doeCandidate[ind[ind_bis], :]
            z_new = np.hstack(self._physicalModel(x_new))

            n_new_temp = len(self._input) + len(x_new)

            # si on depasse le nombre de points, on s arrete
            if n_new_temp > (n_ini + self._nMorePoints):
                x_new = x_new[:self._nMorePoints + n_ini - len(self._input), :]
                z_new = z_new[:self._nMorePoints + n_ini - len(self._input)]

            self._input = np.vstack((self._input, x_new))
            self._signals = np.hstack((self._signals, z_new))

            n_added_points = n_new_temp - n_ini
            algo_iteration = algo_iteration + 1

            if self._classifierType == "svc":
                algo_temp = list(
                    map(
                        lambda C, kernel, degree, probability: svm.SVC(
                            C=C,
                            kernel=kernel,
                            degree=degree,
                            probability=probability,
                            coef0=1), *self._ClassifierParameters))[0]

            if self._classifierType == "rf":
                algo_temp = list(
                    map(
                        lambda n_estimators, max_depth, min_samples_split,
                        random_state: ExtraTreesClassifier(
                            n_estimators=n_estimators,
                            max_depth=max_depth,
                            min_samples_split=min_samples_split,
                            random_state=random_state),
                        *self._ClassifierParameters))[0]

            # Apprentissage avec self._input,self._signals
            algo_temp.fit(self._input, self._signals)

            self._confMat = np.zeros((2, 2))
            for classifier in list_classifiers:
                conf_temp = 1. * confusion_matrix(
                    self._signals,
                    classifier(self._input)[:, 1] >= 0.5)
                conf_temp = 1. * conf_temp / conf_temp.sum(axis=0)
                self._confMat = conf_temp + self._confMat

            self._confMat = 1. * self._confMat / len(list_classifiers)
            classif_algo_temp = algo_temp.predict_proba

            p11 = self._confMat[1, 1]
            p10 = self._confMat[1, 0]

            def agg_classifier(x_in):
                c = p11 - p10
                p1_bayes = 1. / c * (classif_algo_temp(x_in)[:, 1] - p10)
                p1_bayes = np.vstack(
                    np.min(np.array([
                        np.max(np.array([p1_bayes,
                                         np.zeros(len(p1_bayes))]),
                               axis=0),
                        np.ones(len(p1_bayes))
                    ]),
                           axis=0))
                return (np.array([1 - p1_bayes, p1_bayes]).T)[0]

            f_iter = agg_classifier
            list_classifiers.append(f_iter)
            self._classifierModel = f_iter

            if self._verbose:
                updateProgress(n_added_points - 1, self._nMorePoints,
                               'Adding points')

            if self._graph:
                self._PODPerDefect = self._computePOD(self._defectSizes,
                                                      agg_classifier)
                # create the interpolate function of the POD model
                meanPOD = self._PODPerDefect.computeMean()
                interpModel = interp1d(self._defectSizes,
                                       np.array(meanPOD),
                                       kind='linear')
                self._PODmodel = ot.PythonFunction(1, 1, interpModel)
                # The POD at confidence level is built in getPODCLModel() directly
                fig, ax = self.drawPOD(self._probabilityLevel,
                                       self._confidenceLevel)
                plt.draw()
                plt.pause(0.001)
                plt.show()
                if self._graphDirectory is not None:
                    fig.savefig(os.path.join(self._graphDirectory,
                                             'AdaptiveHitMissPOD_') +
                                str(algo_iteration),
                                bbox_inches='tight',
                                transparent=True)

        self._input = ot.NumericalSample(self._input)
        self._signals = ot.NumericalSample(np.vstack(self._signals))
        # Compute the sample predicted for each defect sizes
        self._PODPerDefect = self._computePOD(self._defectSizes,
                                              self._classifierModel)
        # compute the POD for all defect sizes
        meanPOD = self._PODPerDefect.computeMean()
        # create the interpolate function of the POD model
        interpModel = interp1d(self._defectSizes,
                               np.array(meanPOD),
                               kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        # The POD at confidence level is built in getPODCLModel() directly

        # remove the interactive plotting
        plt.ioff()
Exemple #20
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

basisSize = 3
sampleSize = 3

X = ot.NumericalSample(sampleSize, 1)
for i in range(sampleSize):
    X[i, 0] = i + 1.0

Y = ot.NumericalSample(sampleSize, 1)

phis = []
for j in range(basisSize):
    phis.append(ot.NumericalMathFunction(['x'], ['y'], ['x^' + str(j + 1)]))
basis = ot.Basis(phis)
for i in range(basisSize):
    print(ot.NumericalMathFunctionCollection(basis)[i](X))

proxy = ot.DesignProxy(X, basis)
full = range(basisSize)

design = proxy.computeDesign(full)
print(design)

proxy.setWeight([0.5] * sampleSize)
design = proxy.computeDesign(full)
print(design)
Exemple #21
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)
factory = ot.TriangularFactory()
ref = factory.build()
dimension = ref.getDimension()
if dimension <= 2:
    sample = ref.getSample(50)
    distribution = factory.build(sample)
    if dimension == 1:
        distribution.setDescription(['$t$'])
        pdf_graph = distribution.drawPDF(256)
        cloud = ot.Cloud(sample, ot.NumericalSample(sample.getSize(), 1))
        cloud.setColor('blue')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
        pdf_axis = fig.add_subplot(111)
        View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
    else:
        sample = ref.getSample(500)
        distribution.setDescription(['$t_0$', '$t_1$'])
        pdf_graph = distribution.drawPDF([256]*2)
        cloud = ot.Cloud(sample)
        cloud.setColor('red')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
    ot.ResourceMap.Set("GeneralizedLinearModelAlgorithm-LinearAlgebra", "HMAT")

    # Test 1
    print("========================")
    print("Test standard using HMat")
    print("========================")
    sampleSize = 6
    spatialDimension = 1

    # Create the function to estimate
    input_description = ["x0"]
    foutput = ["f0"]
    formulas = ["x0"]
    model = ot.NumericalMathFunction(input_description, foutput, formulas)

    X = ot.NumericalSample(sampleSize, spatialDimension)
    X2 = ot.NumericalSample(sampleSize, spatialDimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = model(X)
    # Data validation
    Y2 = model(X2)
    for i in range(sampleSize):
        # Add a small noise to data
        Y[i, 0] += 0.01 * ot.DistFunc.rNormal()
Exemple #23
0
def matrix_plot(X, ot_distribution=None, ot_kernel=None,
                labels=None, res=1000, grid=False):
    """
    Return a handle to a matplotlib figure containing a 'matrix plot'
    representation of the sample in X. It plots:
       - the marginal distributions on the diagonal terms,
       - the dependograms on the lower terms,
       - scatter plots on the upper terms.
    One may also add representation of the original distribution provided it
    is known, and/or a kernel smoothing (based on OpenTURNS).

    Parameters
    ----------
    X: array_like
        The sample to plot with shape (n_samples, n_features).
    ot_distribution: OpenTURNS Distribution of dimension n_features, optional.
        The underlying multivariate distribution if known.
        Default is set to None.
    ot_kernel: A list of n_features OpenTURNS KernelSmoothing's ready for
        build, optional.
        Kernel smoothing for the margins.
        Default is set to None.
    labels: A list of n_features strings, optional.
        Variates' names for labelling X & Y axes.
        Default is set to None.
    res: int, optional.
        Number of points used for plotting the marginal PDFs.
        Default is set to 1000.
    grid: bool, optional.
        Whether a grid should be added or not.
        Default is set to False (no grid).

    Returns
    -------
    ax: matplotlib.Axes instance.
        A handle to the matplotlib figure.

    Example
    -------
    >>> import pylab as pl
    >>> from phimeca.graphs import plot_matrix
    >>> import openturns as ot
    >>> probabilistic_model = ot.Normal(3)
    >>> sample = probabilistic_model.getSample(100)
    >>> ax = plot_matrix(sample,
                         ot_distribution=X,
                         ot_kernel=[ot.KernelSmoothing(ot.Epanechnikov())] * 3,
                         labels=[('$X_%d$' % i) for i in xrange(3)],
                         grid=True)
    >>> pl.show()
    """

    X = np.array(X)
    n_samples, n_features = X.shape
    if ot_distribution is None:
        ranks = np.array(ot.NumericalSample(X).rank())
    else:
        ranks = np.zeros_like(X)
        for i in xrange(n_features):
            ranks[:, i] = np.ravel(ot_distribution.getMarginal(i).computeCDF(
                np.atleast_2d(X[:, i]).T))
            ranks[:, i] *= n_samples

    pl.figure(figsize=(8, 8))
    n = 0
    for i in xrange(n_features):
        for j in xrange(n_features):
            n += 1
            pl.subplot(n_features, n_features, n)
            if i == j:
                n_bins = int(1 + np.log2(n_samples)) + 1
                pl.hist(X[:, j], bins=n_bins, normed=True,
                        cumulative=False, bottom=None,
                        edgecolor='grey', color='grey', alpha=.25)
                if ot_distribution is not None:
                    Xi = ot_distribution.getMarginal(i)
                    a = Xi.getRange().getLowerBound()[0]
                    b = Xi.getRange().getUpperBound()[0]
                    middle = (a + b) / 2.
                    width = b - a
                    if Xi.computePDF(a - .1 * width / 2.) == 0.:
                        a = middle - 1.1 * width / 2.
                    if Xi.computePDF(b + .1 * width / 2.) == 0.:
                        b = middle + 1.1 * width / 2.
                    support = np.linspace(a, b, res)
                    pdf = Xi.computePDF(np.atleast_2d(support).T)
                    pl.plot(support, pdf, color='b', alpha=.5, lw=1.5)
                if ot_kernel is not None:
                    Xi = ot_kernel[i].build(np.atleast_2d(X[:, i]).T)
                    if ot_distribution is None:
                        a = Xi.getRange().getLowerBound()[0]
                        b = Xi.getRange().getUpperBound()[0]
                        support = np.linspace(a, b, res)
                    pdf = Xi.computePDF(np.atleast_2d(support).T)
                    pl.plot(support, pdf, color='r', alpha=.5, lw=1.5)
                pl.xticks([pl.xlim()[0], np.mean(pl.xlim()), pl.xlim()[1]])
                pl.yticks([])
            elif i < j:
                pl.plot(X[:, j], X[:, i],
                        'o', color='grey', alpha=0.25)
                pl.xticks([pl.xlim()[0], np.mean(pl.xlim()), pl.xlim()[1]],
                          ('', ) * 3)
                pl.yticks([pl.ylim()[0], np.mean(pl.ylim()), pl.ylim()[1]],
                          ('', ) * 3)
            else:
                pl.plot(ranks[:, j].astype(float) / n_samples,
                        ranks[:, i].astype(float) / n_samples,
                        'o', color='grey', alpha=0.25)
                pl.xticks([0., 1.])
                pl.yticks([0., 1.])

            if j == 0 and labels is not None:
                pl.ylabel(labels[i])

            if i == n_features - 1 and labels is not None:
                pl.xlabel(labels[j])

            if grid:
                pl.grid()

    return pl.gcf()
Exemple #24
0
ot.RandomGenerator.SetSeed(0)

try:
    size = 100
    dim = 10
    R = ot.CorrelationMatrix(dim)
    for i in range(dim):
        for j in range(i):
            R[i, j] = (i + j + 1.0) / (2.0 * dim)

    mean = [2.0] * dim
    sigma = [3.0] * dim
    distribution = ot.Normal(mean, sigma, R)

    sample = distribution.getSample(size)
    sampleX = ot.NumericalSample(size, dim - 1)
    sampleY = ot.NumericalSample(size, 1)
    for i in range(size):
        sampleY[i] = ot.NumericalPoint(1, sample[i, 0])
        p = ot.NumericalPoint(dim - 1)
        for j in range(dim - 1):
            p[j] = sample[i, j + 1]
        sampleX[i] = p

    sampleZ = ot.NumericalSample(size, 1)
    for i in range(size):
        sampleZ[i] = ot.NumericalPoint(1, sampleY[i, 0] * sampleY[i, 0])
    print("LinearModelAdjustedRSquared=",
          ot.LinearModelTest.LinearModelAdjustedRSquared(sampleY, sampleZ))
    print("LinearModelFisher=",
          ot.LinearModelTest.LinearModelFisher(sampleY, sampleZ))
    def run(self):
        """
        Launch the algorithm and build the POD models.

        Notes
        -----
        This method launches the iterative algorithm. First the censored data
        are filtered if needed. The Box Cox transformation is performed if it is
        enabled. Then the enrichment of the design of experiments is performed.
        Once the algorithm stops, it builds the POD models : conditional samples are 
        simulated for each defect size, then the distributions of the probability
        estimator (for MC simulation) are built. Eventually, a sample of this
        distribution is used to compute the mean POD and the POD at the confidence
        level.
        """

        # Create an initial uniform distribution if not given
        if self._distribution is None:
            inputMin = self._input.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = self._input.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)]
            self._distribution = ot.ComposedDistribution(marginals)

        # Create the design of experiments of the candidate points where the
        # criterion is computed
        if self._distribution.hasIndependentCopula():
            # without copula use low discrepancy experiment as first doe
            doeCandidate = ot.LowDiscrepancyExperiment(ot.SobolSequence(), 
                            self._distribution, self._candidateSize).generate()
        else:
            # else simple Monte Carlo distribution
            doeCandidate = self._distribution.getSample(self._candidateSize)

        # build initial kriging model
        # build the kriging model without optimization
        algoKriging = self._buildKrigingAlgo(self._input, self._signals)
        if self._verbose:
            print('Building the kriging model')
            print('Optimization of the covariance model parameters...')

        if LooseVersion(ot.__version__) == '1.9':
            llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension()
        else:
            llDim = algoKriging.getLogLikelihoodFunction().getInputDimension()
        lowerBound = [0.001] * llDim
        upperBound = [50] * llDim               
        algoKriging = self._estimKrigingTheta(algoKriging,
                                              lowerBound, upperBound,
                                              self._initialStartSize)
        algoKriging.run()

        # Get kriging results
        self._krigingResult = algoKriging.getResult()
        self._covarianceModel = self._krigingResult.getCovarianceModel()
        self._basis = self._krigingResult.getBasisCollection()
        metamodel = self._krigingResult.getMetaModel()

        self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult)
        if self._verbose:
            print('Kriging validation Q2 (>0.9): {:0.4f}\n'.format(self._Q2))

        plt.ion()
        # Start the improvment loop
        iteration = 0
        while iteration < self._nIteration:
            iteration += 1
            if self._verbose:
                print('Iteration : {}/{}'.format(iteration, self._nIteration))

            # compute POD (ptrue = pn-1) for bias reducing in the criterion
            # Monte Carlo for all defect sizes in a vectorized way.
            # get Sample for all parameters except the defect size
            samplePred = self._distribution.getSample(self._samplingSize)[:,1:]
            fullSamplePred = ot.NumericalSample(self._samplingSize * self._defectNumber,
                                                self._dim)
            # Add the defect sizes as first value 
            for i, defect in enumerate(self._defectSizes):
                fullSamplePred[self._samplingSize*i:self._samplingSize*(i+1), :] = \
                                        self._mergeDefectInX(defect, samplePred)
            meanPredictionSample = metamodel(fullSamplePred)
            meanPredictionSample = np.reshape(meanPredictionSample, (self._samplingSize,
                                                    self._defectNumber), 'F')
            # compute the POD for all defect sizes
            currentPOD = np.mean(meanPredictionSample > self._detectionBoxCox, axis=0)

            # Compute criterion for all candidate in the candidate doe
            criterion = 1000000000
            for icand, candidate in enumerate(doeCandidate):

                # add the current candidate to the kriging doe
                inputAugmented = self._input[:]
                inputAugmented.add(candidate)
                signalsAugmented = self._signals[:]
                # predict the signal value of the candidate using the current
                # kriging model
                signalsAugmented.add(metamodel(candidate))
                # create a temporary kriging model with the new doe and without
                # updating the covariance model parameters
                if LooseVersion(ot.__version__) == '1.9':
                    algoKrigingTemp = ot.KrigingAlgorithm(inputAugmented, signalsAugmented,
                                                          self._covarianceModel,
                                                          self._basis,
                                                          True)
                else:
                    algoKrigingTemp = ot.KrigingAlgorithm(inputAugmented, signalsAugmented,
                                                          self._basis,
                                                          self._covarianceModel,
                                                          True)
                if LooseVersion(ot.__version__) > '1.6':
                    optimizer = algoKrigingTemp.getOptimizationSolver()
                    optimizer.setMaximumIterationNumber(0)
                    algoKrigingTemp.setOptimizationSolver(optimizer)

                algoKrigingTemp.run()
                krigingResultTemp = algoKrigingTemp.getResult()

                # compute the criterion for all defect size
                crit = []
                # save results, used to compute the PODModel et PODCLModel
                PODPerDefect = ot.NumericalSample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
                for idef, defect in enumerate(self._defectSizes):
                    podSample = self._computePODSamplePerDefect(defect,
                        self._detectionBoxCox, krigingResultTemp,
                        self._distribution, self._simulationSize, self._samplingSize)
                    PODPerDefect[:, idef] = podSample

                    meanPOD = podSample.computeMean()[0]
                    varPOD = podSample.computeVariance()[0]
                    crit.append(varPOD + (meanPOD - currentPOD[idef])**2)
                # compute the criterion aggregated for all defect sizes
                newCriterion = np.sqrt(np.mean(crit))

                # check if the result is better or not
                if newCriterion < criterion:
                    self._PODPerDefect = PODPerDefect
                    criterion = newCriterion
                    indexOpt = icand
                
                if self._verbose:
                    updateProgress(icand, int(doeCandidate.getSize()), 'Computing criterion')

            # get the best candidate
            candidateOpt = doeCandidate[indexOpt]
            # add new point to DOE
            self._input.add(candidateOpt)
            # add the signal computed by the physical model
            if self._boxCox:
                self._signals.add(self._boxCoxTransform(self._physicalModel(candidateOpt)))
            else:
                self._signals.add(self._physicalModel(candidateOpt))
            # remove added candidate from the doeCandidate
            doeCandidate.erase(indexOpt)
            if self._verbose:
                print('Criterion value : {:0.4f}'.format(criterion))
                print('Added point : {}'.format(candidateOpt))
                print('Update the kriging model')

            # update the kriging model without optimization
            algoKriging = self._buildKrigingAlgo(self._input, self._signals)
            if LooseVersion(ot.__version__) == '1.7':
                optimizer = algoKriging.getOptimizationSolver()
                optimizer.setMaximumIterationNumber(0)
                algoKriging.setOptimizationSolver(optimizer)
            elif LooseVersion(ot.__version__) == '1.8':
                algoKriging.setOptimizeParameters(False)

            algoKriging.run()

            self._Q2 = self._computeQ2(self._input, self._signals, algoKriging.getResult())

            # Check the quality of the kriging model if it needs optimization
            if self._Q2 < 0.95:
                if self._verbose:
                    print('Optimization of the covariance model parameters...')

                if LooseVersion(ot.__version__) == '1.9':
                    llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension()
                else:
                    llDim = algoKriging.getLogLikelihoodFunction().getInputDimension()
                lowerBound = [0.001] * llDim
                upperBound = [50] * llDim               
                algoKriging = self._estimKrigingTheta(algoKriging,
                                                      lowerBound, upperBound,
                                                      self._initialStartSize)
                algoKriging.run()

            # Get kriging results
            self._krigingResult = algoKriging.getResult()
            self._covarianceModel = self._krigingResult.getCovarianceModel()
            self._basis = self._krigingResult.getBasisCollection()
            metamodel = self._krigingResult.getMetaModel()

            self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult)
            if self._verbose:
                print('Kriging validation Q2 (>0.9): {:0.4f}'.format(self._Q2))

            if self._graph:
                # create the interpolate function of the POD model
                meanPOD = self._PODPerDefect.computeMean()
                interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
                self._PODmodel = ot.PythonFunction(1, 1, interpModel)
                # The POD at confidence level is built in getPODCLModel() directly
                fig, ax = self.drawPOD(self._probabilityLevel, self._confidenceLevel)
                plt.draw()
                plt.pause(0.001)
                plt.show()
                if self._graphDirectory is not None:
                    fig.savefig(os.path.join(self._graphDirectory, 'AdaptiveSignalPOD_')+str(iteration),
                                bbox_inches='tight', transparent=True)

        # Compute the final POD with the last updated kriging model
        if self._verbose:
                print('\nStart computing the POD with the last updated kriging model')
        # compute the sample containing the POD values for all defect 
        self._PODPerDefect = ot.NumericalSample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
        for i, defect in enumerate(self._defectSizes):
            self._PODPerDefect[:, i] = self._computePODSamplePerDefect(defect,
                self._detectionBoxCox, self._krigingResult, self._distribution,
                self._simulationSize, self._samplingSize)
            if self._verbose:
                updateProgress(i, self._defectNumber, 'Computing POD per defect')

        # compute the mean POD 
        meanPOD = self._PODPerDefect.computeMean()
        # create the interpolate function of the POD model
        interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        # The POD at confidence level is built in getPODCLModel() directly

        # remove the interactive plotting
        plt.ioff()
Exemple #26
0
fileName = 'myStudy.xml'

# Create a Study Object
myStudy = ot.Study()
myStudy.setStorageManager(ot.XMLStorageManager(fileName))

# Add a PersistentObject to the Study (here a NumericalPoint)
p1 = ot.NumericalPoint(3, 0.)
p1.setName("Good")
p1[0] = 10.
p1[1] = 11.
p1[2] = 12.
myStudy.add(p1)

# Add another PersistentObject to the Study (here a NumericalSample)
s1 = ot.NumericalSample(3, 2)
s1.setName("mySample")
p2 = ot.NumericalPoint(2, 0.)
p2.setName("One")
p2[0] = 100.
p2[1] = 200.
s1[0] = p2
p3 = ot.NumericalPoint(2, 0.)
p3.setName("Two")
p3[0] = 101.
p3[1] = 201.
s1[1] = p3
p4 = ot.NumericalPoint(2, 0.)
p4.setName("Three")
p4[0] = 102.
p4[1] = 202.
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

ot.RandomGenerator.SetSeed(0)

size = 200

# input sample
inputSample = ot.Uniform(-1.0, 1.0).getSample(size)
outputSample = ot.NumericalSample(inputSample)

# Evaluation of y = ax + b (a: scale, b: translate)

# scale
scale = [3.0]
outputSample *= scale

# translate sample
translate = [3.1]
outputSample += translate

# Finally inverse transform using an arbitrary lambda
lamb = [1.8]
boxCoxFunction = ot.InverseBoxCoxEvaluationImplementation(lamb)

# transform y using BoxCox function
outputSample = boxCoxFunction(outputSample)

# Add small noise
Exemple #28
0
import otpod
import numpy as np

inputSample = ot.NumericalSample(
    [[4.59626812e+00, 7.46143339e-02, 1.02231538e+00, 8.60042277e+01],
     [4.14315790e+00, 4.20801346e-02, 1.05874908e+00, 2.65757364e+01],
     [4.76735111e+00, 3.72414824e-02, 1.05730385e+00, 5.76058433e+01],
     [4.82811977e+00, 2.49997658e-02, 1.06954641e+00, 2.54461380e+01],
     [4.48961094e+00, 3.74562922e-02, 1.04943946e+00, 6.19483646e+00],
     [5.05605334e+00, 4.87599783e-02, 1.06520409e+00, 3.39024904e+00],
     [5.69679328e+00, 7.74915877e-02, 1.04099514e+00, 6.50990466e+01],
     [5.10193991e+00, 4.35520544e-02, 1.02502536e+00, 5.51492592e+01],
     [4.04791970e+00, 2.38565932e-02, 1.01906882e+00, 2.07875350e+01],
     [4.66238956e+00, 5.49901237e-02, 1.02427200e+00, 1.45661275e+01],
     [4.86634219e+00, 6.04693570e-02, 1.08199374e+00, 1.05104730e+00],
     [4.13519347e+00, 4.45225831e-02, 1.01900124e+00, 5.10117047e+01],
     [4.92541940e+00, 7.87692335e-02, 9.91868726e-01, 8.32302238e+01],
     [4.70722074e+00, 6.51799251e-02, 1.10608515e+00, 3.30181002e+01],
     [4.29040932e+00, 1.75426222e-02, 9.75678838e-01, 2.28186756e+01],
     [4.89291400e+00, 2.34997929e-02, 1.07669835e+00, 5.38926138e+01],
     [4.44653744e+00, 7.63175936e-02, 1.06979154e+00, 5.19109415e+01],
     [3.99977452e+00, 5.80430585e-02, 1.01850716e+00, 7.61988190e+01],
     [3.95491570e+00, 1.09302814e-02, 1.03687664e+00, 6.09981789e+01],
     [5.16424368e+00, 2.69026464e-02, 1.06673711e+00, 2.88708887e+01],
     [5.30491620e+00, 4.53802273e-02, 1.06254792e+00, 3.03856837e+01],
     [4.92809155e+00, 1.20616369e-02, 1.00700410e+00, 7.02512744e+00],
     [4.68373805e+00, 6.26028935e-02, 1.05152117e+00, 4.81271603e+01],
     [5.32381954e+00, 4.33013582e-02, 9.90522007e-01, 6.56015973e+01],
     [4.35455857e+00, 1.23814619e-02, 1.01810539e+00, 1.10769534e+01]])

signals = ot.NumericalSample(
    [[37.305445], [35.466919], [43.187991], [45.305165], [40.121222],
st = ot.Study()
st.setStorageManager(ot.XMLStorageManager(fileName))

st.load()

st.fillObject("f1", f1)
st.fillObject("f2", f2)
print('loaded f1=', f1)
print('loaded f2=', f2)

inPt = ot.NumericalPoint(2, 2.)
outPt = f1(inPt)
print(repr(outPt))

outPt = f1((10., 11.))
print(repr(outPt))

inSample = ot.NumericalSample(10, 2)
for i in range(10):
    inSample[i] = ot.NumericalPoint((i, i))
print(repr(inSample))

outSample = f1(inSample)
print(repr(outSample))

outSample = f1(((100., 100.), (101., 101.), (102., 102.)))
print(repr(outSample))

os.remove(fileName)
Exemple #30
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

myFunc = ot.NumericalMathFunction(
    ['x1', 'x2'], ['f1', 'f2', 'f3'],
    ['x1*sin(x2)', 'cos(x1+x2)', '(x2+1)*exp(x1-2*x2)'])
data = ot.NumericalSample(9, myFunc.getInputDimension())
point = ot.NumericalPoint(myFunc.getInputDimension())
point[0] = 0.5
point[1] = 0.5
data[0] = point
point[0] = -1
point[1] = -1
data[1] = point
point[0] = -1
point[1] = 1
data[2] = point
point[0] = 1
point[1] = -1
data[3] = point
point[0] = 1
point[1] = 1
data[4] = point
point[0] = -0.5
point[1] = -0.5
data[5] = point
point[0] = -0.5
point[1] = 0.5
data[6] = point