def _buildKrigingAlgo(self, inputSample, outputSample): """ Build the functional chaos algorithm without running it. """ if self._basis is None: # create linear basis only for the defect parameter (1st parameter), # constant otherwise input = ['x' + str(i) for i in range(self._dim)] functions = [] # constant functions.append(ot.NumericalMathFunction(input, ['y'], ['1'])) # linear for the first parameter only functions.append(ot.NumericalMathFunction(input, ['y'], [input[0]])) self._basis = ot.Basis(functions) if self._covarianceModel is None: # anisotropic squared exponential covariance model covColl = ot.CovarianceModelCollection(self._dim) for i in range(self._dim): if LooseVersion(ot.__version__) == '1.6': covColl[i] = ot.SquaredExponential(1, 1.) elif LooseVersion(ot.__version__) > '1.6': covColl[i] = ot.SquaredExponential([1], [1.]) self._covarianceModel = ot.ProductCovarianceModel(covColl) if LooseVersion(ot.__version__) == "1.9": algoKriging = ot.KrigingAlgorithm(inputSample, outputSample, self._covarianceModel, self._basis) else: algoKriging = ot.KrigingAlgorithm(inputSample, outputSample, self._basis, self._covarianceModel, True) algoKriging.run() return algoKriging
# np.testing.assert_almost_equal(detectionSize1[1], 4.634627604344363, decimal=5) # def test_1_Q2_90(): # np.testing.assert_almost_equal(POD1.getQ2(), 0.99993575194237017, decimal=4) # Test kriging with censored data without Box Cox np.random.seed(0) ot.RandomGenerator.SetSeed(0) ot.RandomGenerator.SetState(ot.RandomGeneratorState(ot.Indices([0] * 768), 0)) POD2 = otpod.KrigingPOD(inputSample, signals, detection, noiseThres, saturationThres, boxCox=False) if ot.__version__ == '1.6': covColl = ot.CovarianceModelCollection(4) scale = [5.03148, 13.9442, 20, 20] for i in range(4): c = ot.SquaredExponential(1, scale[i]) c.setAmplitude([15.1697]) covColl[i] = c covarianceModel = ot.ProductCovarianceModel(covColl) elif ot.__version__ > '1.6': covarianceModel = ot.SquaredExponential([5.03148, 13.9442, 20, 20], [15.1697]) POD2.setCovarianceModel(covarianceModel) POD2.setInitialStartSize(0) POD2.setSamplingSize(100) POD2.setSimulationSize(100) POD2.run() detectionSize2 = POD2.computeDetectionSize(0.6, 0.95)
distX = ot.ComposedDistribution([X1, X2, X3]) # Get a sample of it size = 100 X = distX.getSample(size) # The Ishigami model modelIshigami = ot.SymbolicFunction( ["X1", "X2", "X3"], ["sin(X1) + 5.0 * (sin(X2))^2 + 0.1 * X3^4 * sin(X1)"]) # Apply model: Y = m(X) Y = modelIshigami(X) # We define the covariance models for the HSIC indices. # For the input, we consider a SquaredExponential covariance model. covarianceModelCollection = ot.CovarianceModelCollection() # Input sample for i in range(3): Xi = X.getMarginal(i) Cov = ot.SquaredExponential(1) Cov.setScale(Xi.computeStandardDeviation()) covarianceModelCollection.add(Cov) # Output sample with squared exponential covariance Cov2 = ot.SquaredExponential(1) Cov2.setScale(Y.computeStandardDeviation()) covarianceModelCollection.add(Cov2) # We choose an estimator type : # - unbiased: HSICUStat;
distX = ot.ComposedDistribution([X1, X2, X3]) # Get a sample of it size = 100 X = distX.getSample(size) # The Ishigami model modelIshigami = ot.SymbolicFunction( ["X1", "X2", "X3"], ["sin(X1) + 5.0 * (sin(X2))^2 + 0.1 * X3^4 * sin(X1)"]) # Apply model: Y = m(X) Y = modelIshigami(X) # We define the covariance models for the HSIC indices. # For the input, we consider a SquaredExponential covariance model. covarianceList = ot.CovarianceModelCollection() # Input sample for i in range(3): Xi = X.getMarginal(i) Cov = ot.SquaredExponential(1) Cov.setScale(Xi.computeStandardDeviation()) covarianceList.add(Cov) # Output sample with squared exponential covariance Cov2 = ot.SquaredExponential(1) Cov2.setScale(Y.computeStandardDeviation()) covarianceList.add(Cov2) # We choose an estimator type : # - unbiased: HSICUStat (not available here!!);