def test_ProcessHDRAlgorithmPC1(self):
        # With 1 principal component
        setup_HDRenv()

        # Dataset
        fname = os.path.join(othdr.__path__[0], "..", "tests", "data",
                             "npfda-elnino.dat")
        processSample = readProcessSample(fname)

        # Customize the dimension reduction
        reduction = othdr.KarhunenLoeveDimensionReductionAlgorithm(
            processSample, 1)
        reduction.run()
        reducedComponents = reduction.getReducedComponents()

        # Distribution fit in reduced space
        ks = ot.KernelSmoothing()
        reducedDistribution = ks.build(reducedComponents)

        # Check higher dimension
        hdr = othdr.ProcessHighDensityRegionAlgorithm(processSample,
                                                      reducedComponents,
                                                      reducedDistribution,
                                                      [0.1, 0.6])
        hdr.run()

        # Plot outlier trajectories
        graph = hdr.draw(drawInliers=True, discreteMean=True)
        otv.View(graph)

        graph = hdr.draw(bounds=False)
        otv.View(graph)
        return
def draw_pressure(graph, pressure):
    print("Build and draw pressure distribution")
    t0 = time()
    dist_stationary_pressure = ot.KernelSmoothing(ot.Normal(), False, 1000000,
                                                  True).build(pressure)
    print("t=", time() - t0, "s")
    graph.add(dist_stationary_pressure.drawPDF())
示例#3
0
def define_distribution():
    """
    Define the distribution of the training example (beam).
    Return a ComposedDistribution object from openTURNS
    """
    sample_E = ot.Sample.ImportFromCSVFile("sample_E.csv")
    kernel_smoothing = ot.KernelSmoothing(ot.Normal())
    bandwidth = kernel_smoothing.computeSilvermanBandwidth(sample_E)
    E = kernel_smoothing.build(sample_E, bandwidth)
    E.setDescription(['Young modulus'])

    F = ot.LogNormal()
    F.setParameter(ot.LogNormalMuSigma()([30000, 9000, 15000]))
    F.setDescription(['Load'])

    L = ot.Uniform(250, 260)
    L.setDescription(['Length'])

    I = ot.Beta(2.5, 4, 310, 450)
    I.setDescription(['Inertia'])

    marginal_distributions = [F, E, L, I]
    SR_cor = ot.CorrelationMatrix(len(marginal_distributions))
    SR_cor[2, 3] = -0.2
    copula = ot.NormalCopula(ot.NormalCopula.GetCorrelationFromSpearmanCorrelation(SR_cor))

    return(ot.ComposedDistribution(marginal_distributions, copula))
示例#4
0
    def test_HighDensityRegionAlgorithm1D(self):
        # With 1D
        ot.RandomGenerator.SetSeed(0)
        numberOfPointsForSampling = 500
        ot.ResourceMap.SetAsBool("Distribution-MinimumVolumeLevelSetBySampling", True)
        ot.ResourceMap.Set(
            "Distribution-MinimumVolumeLevelSetSamplingSize",
            str(numberOfPointsForSampling),
        )

        # Dataset
        ot.RandomGenerator_SetSeed(1976)
        sample = ot.Normal().getSample(100)

        # Creation du kernel smoothing
        ks = ot.KernelSmoothing()
        distribution = ks.build(sample)

        dp = othdr.HighDensityRegionAlgorithm(sample, distribution)
        dp.run()

        # Draw contour/inliers/outliers
        otv.View(dp.draw())

        otv.View(dp.draw(drawInliers=True))

        otv.View(dp.draw(drawOutliers=False))

        outlierIndices = dp.computeIndices()
        expected_outlierIndices = [16, 24, 33, 49, 71, 84]
        assert_equal(outlierIndices, expected_outlierIndices)
    def test_SquaredExponential(self):
        # With 2 principal components
        setup_HDRenv()
        # Test with no outlier in the band
        xmin = 0.0
        step = 0.1
        n = 100
        timeGrid = ot.RegularGrid(xmin, step, n + 1)
        amplitude = [7.0]
        scale = [1.5]
        covarianceModel = ot.SquaredExponential(scale, amplitude)
        process = ot.GaussianProcess(covarianceModel, timeGrid)
        nbTrajectories = 50
        processSample = process.getSample(nbTrajectories)
        # KL decomposition
        reduction = othdr.KarhunenLoeveDimensionReductionAlgorithm(
            processSample, 2)
        reduction.run()
        reducedComponents = reduction.getReducedComponents()

        # Distribution fit in reduced space
        ks = ot.KernelSmoothing()
        reducedDistribution = ks.build(reducedComponents)
        hdr = othdr.ProcessHighDensityRegionAlgorithm(processSample,
                                                      reducedComponents,
                                                      reducedDistribution,
                                                      [0.95, 0.5])
        hdr.run()
        graph = hdr.draw()
        otv.View(graph)
示例#6
0
def CBN_PC(data, result_structure_path):
    print("CBN with PC")

    skeleton_path = result_structure_path.joinpath("skeleton")
    skeleton_path.mkdir(parents=True, exist_ok=True)

    pdag_path = result_structure_path.joinpath("pdag")
    pdag_path.mkdir(parents=True, exist_ok=True)

    dag_path = result_structure_path.joinpath("dag")
    dag_path.mkdir(parents=True, exist_ok=True)

    skeleton_file_name = "skeleton_" + str(size).zfill(7) + ".dot"
    skeleton_done = skeleton_path.joinpath(skeleton_file_name).exists()

    pdag_file_name = "pdag_" + str(size).zfill(7) + ".dot"
    pdag_done = pdag_path.joinpath(pdag_file_name).exists()

    dag_file_name = "dag_" + str(size).zfill(7) + ".dot"
    dag_done = dag_path.joinpath(dag_file_name).exists()

    alpha = 0.01
    conditioningSet = 4

    learner = otagr.ContinuousPC(data, conditioningSet, alpha)
    learner.setVerbosity(True)

    if not skeleton_done:
        skel = learner.learnSkeleton()
        gu.write_graph(
            skel,
            skeleton_path.joinpath("skeleton_" + str(size).zfill(7) + ".dot"))

    if not pdag_done:
        pdag = learner.learnPDAG()
        gu.write_graph(
            pdag, pdag_path.joinpath("pdag_" + str(size).zfill(7) + ".dot"))

    if not dag_done:
        dag = learner.learnDAG()
        gu.write_graph(dag,
                       dag_path.joinpath("dag_" + str(size).zfill(7) + ".dot"))
    else:
        dag, names = gu.read_graph(
            dag_path.joinpath("dag_" + str(size).zfill(7) + ".dot"))
        dag = otagr.NamedDAG(dag, names)

    print("Learning parameters")
    factories = [
        ot.KernelSmoothing(ot.Epanechnikov()),
        ot.BernsteinCopulaFactory()
    ]
    ot.Log.SetFile("log")
    ot.Log.Show(ot.Log.INFO)
    model = otagr.ContinuousBayesianNetworkFactory(factories, dag, alpha,
                                                   conditioningSet,
                                                   False).build(data)
    ot.Log.Show(ot.Log.INFO)
    return model
def KS_learning(data):
    # Naive estimation of the coefficients distribution using
    # a multivariate kernel smoothing
    print("Build KS coefficients distribution")
    t0 = time()
    distribution = ot.KernelSmoothing().build(data)
    print("t=", time() - t0, "s")
    return distribution
def kernel_fit_distribution(sample):
    var = sample[:, 0]
    #Define the type of kernel
    kernel_distribution = ot.Epanechnikov()
    # Estimate Kernel Smoothing marginals
    kernel_function = ot.KernelSmoothing(kernel_distribution)
    kernel_distribution = kernel_function.build(var)
    return kernel_distribution
def ot_kernel_copula_fit(Pared):
    kernel_distribution = ot.Epanechnikov()
    sample_Pared = ot.Sample(Pared)
    marginals = ot_kernel_Marginals(sample_Pared)
    KernelSmoothing_copula_distribution = ot.KernelSmoothing(
        kernel_distribution).build(sample_Pared).getCopula()
    bivariate_distribution = ot.ComposedDistribution(
        marginals, KernelSmoothing_copula_distribution)
    return bivariate_distribution
示例#10
0
    def test_DrawUnivariateSampleDistribution(self):
        sample = ot.Normal().getSample(500)

        # Distribution fit in reduced space
        ks = ot.KernelSmoothing()
        distribution = ks.build(sample)

        graph = othdr.DrawUnivariateSampleDistribution(sample, distribution)
        otv.View(graph)
        return
 def _drawResiduals1Dimension(self, outputObservations, outputAtPrior,
                              outputAtPosterior, observationsError):
     """
     Plot the distribution of the residuals and 
     the distribution of the observation errors. 
     Can manage only 1D samples.
     """
     ydescription = outputObservations.getDescription()
     xlabel = "%s Residuals" % (ydescription[0])
     graph = ot.Graph("Residuals analysis", xlabel,
                      "Probability distribution function", True, "topright")
     yDim = outputObservations.getDimension()
     yPriorDim = outputAtPrior.getDimension()
     yPosteriorDim = outputAtPrior.getDimension()
     if (yDim == 1) and (yPriorDim == 1):
         posteriorResiduals = outputObservations - outputAtPrior
         kernel = ot.KernelSmoothing()
         fittedDist = kernel.build(posteriorResiduals)
         residualPDF = fittedDist.drawPDF()
         residualPDF.setColors([self.priorColor])
         residualPDF.setLegends(["Prior"])
         graph.add(residualPDF)
     else:
         raise TypeError('Output prior observations are not 1D.')
     if (yDim == 1) and (yPosteriorDim == 1):
         posteriorResiduals = outputObservations - outputAtPosterior
         kernel = ot.KernelSmoothing()
         fittedDist = kernel.build(posteriorResiduals)
         residualPDF = fittedDist.drawPDF()
         residualPDF.setColors([self.posteriorColor])
         residualPDF.setLegends(["Posterior"])
         graph.add(residualPDF)
     else:
         raise TypeError('Output posterior observations are not 1D.')
     # Plot the distribution of the observation errors
     if (observationsError.getDimension() == 1):
         # In the other case, we just do not plot
         obserrgraph = observationsError.drawPDF()
         obserrgraph.setColors([self.observationColor])
         obserrgraph.setLegends(["Observation errors"])
         graph.add(obserrgraph)
     return graph
 def runKS(self):
     # Create kernel smoothing
     myks = ot.KernelSmoothing()
     principalComponentsSample = ot.Sample(self.principalComponents)
     sampleDistribution = myks.build(principalComponentsSample)
     # Create DensityPlot
     self.densityPlot = HighDensityRegionAlgorithm(
         principalComponentsSample, sampleDistribution)
     self.densityPlot.setContoursAlpha(self.contoursAlpha)
     self.densityPlot.setOutlierAlpha(self.outlierAlpha)
     self.densityPlot.run()
    def runKS(self):
        """Create kernel smoothing."""
        ks = ot.KernelSmoothing()
        sample_distribution = ks.build(self.principalComponents)
        # Create DensityPlot
        self.densityPlot = HighDensityRegionAlgorithm(self.principalComponents,
                                                      sample_distribution)
        self.densityPlot.setContoursAlpha(self.contoursAlpha)
        self.densityPlot.setOutlierAlpha(self.outlierAlpha)

        self.densityPlot.run()
示例#14
0
def test_MatrixPlot(mock_show):
    fname = os.path.join(os.path.dirname(__file__), 'data', 'gauss-mixture.csv')
    sample = ot.Sample.ImportFromCSVFile(fname)
    
    mp = MatrixPlot(sample)
    mp.draw()
    
    ks = ot.KernelSmoothing()
    distribution = ks.build(sample)
    mp = MatrixPlot(sample,distribution)
    mp.draw()
    
    fname = os.path.join(os.path.dirname(__file__), 'data', 'gauss-mixture-3D.csv')
    sample = ot.Sample.ImportFromCSVFile(fname)
    
    mp = MatrixPlot(sample)
    mp.draw()
    
    ks = ot.KernelSmoothing()
    distribution = ks.build(sample)
    mp = MatrixPlot(sample,distribution)
    mp.draw()
def test_HighDensityRegionAlgorithm():
    ot.RandomGenerator.SetSeed(0)
    numberOfPointsForSampling = 500
    ot.ResourceMap.Set('Distribution-MinimumVolumeLevelSetBySampling', 'true')
    ot.ResourceMap.Set('Distribution-MinimumVolumeLevelSetSamplingSize',
                       str(numberOfPointsForSampling))

    # Dataset
    fname = os.path.join(os.path.dirname(__file__), 'data',
                         'gauss-mixture.csv')
    sample = ot.Sample.ImportFromCSVFile(fname)

    # Creation du kernel smoothing
    ks = ot.KernelSmoothing()
    sample_distribution = ks.build(sample)

    dp = HighDensityRegionAlgorithm(sample, sample_distribution)
    dp.run()

    # Draw contour/inliers/outliers
    graph = ot.Graph('High Density Region draw', '', '', True, 'topright')

    fig, axs, graphs = dp.drawContour()
    plt.show()

    fig, axs, graphs = dp.drawContour(drawData=True)
    plt.show()

    fig, axs, graphs = dp.drawContour(drawOutliers=False)
    plt.show()

    graph.add(dp.drawInliers())
    View(graph)
    plt.show()

    # Plot data
    graph.add(dp.drawOutliers())
    View(graph)
    plt.show()

    outlierIndices = dp.computeOutlierIndices()
    expected_outlierIndices = [
        31, 60, 84, 105, 116, 121, 150, 151, 200, 207, 215, 218, 220, 248, 282,
        284, 291, 359, 361, 378, 382, 404, 412, 418, 425, 426, 433, 449, 450,
        457, 461, 466, 474, 490, 498, 567, 587, 616, 634, 638, 652, 665, 687,
        714, 729, 730, 748, 751, 794, 876, 894, 896, 903, 925, 928, 963, 968,
        987
    ]
    assert_equal(outlierIndices, expected_outlierIndices)
    def test_ProcessHDRAlgorithmDefault(self):
        # With 2 principal components
        setup_HDRenv()

        # Dataset
        fname = os.path.join(othdr.__path__[0], "..", "tests", "data",
                             "npfda-elnino.dat")
        processSample = readProcessSample(fname)

        # KL decomposition
        reduction = othdr.KarhunenLoeveDimensionReductionAlgorithm(
            processSample, 2)
        reduction.run()
        reducedComponents = reduction.getReducedComponents()

        # Distribution fit in reduced space
        ks = ot.KernelSmoothing()
        reducedDistribution = ks.build(reducedComponents)

        # Compute HDRPlot
        hdr = othdr.ProcessHighDensityRegionAlgorithm(processSample,
                                                      reducedComponents,
                                                      reducedDistribution,
                                                      [0.8, 0.5])
        hdr.run()

        # Plot outlier trajectories
        graph = hdr.draw(drawInliers=True, discreteMean=True)
        otv.View(graph)
        #
        for discreteMean in [True, False]:
            graph = hdr.draw(discreteMean=discreteMean)
            otv.View(graph)
        # Do not plot outlier trajectories
        graph = hdr.draw(drawOutliers=False, discreteMean=True)
        otv.View(graph)
        #
        graph = hdr.draw(bounds=False)
        otv.View(graph)
        #
        outlier_indices = hdr.computeIndices()
        expected_outlier_indices = [3, 7, 22, 32, 33, 41, 47]
        assert_equal(outlier_indices, expected_outlier_indices)
        #
        inlier_indices = hdr.computeIndices(False)
        assert_equal(len(inlier_indices), 47)
        return
示例#17
0
    def test_HighDensityRegionAlgorithm3D(self):
        # With 3D
        ot.RandomGenerator.SetSeed(0)
        numberOfPointsForSampling = 500
        ot.ResourceMap.SetAsBool("Distribution-MinimumVolumeLevelSetBySampling", True)
        ot.ResourceMap.Set(
            "Distribution-MinimumVolumeLevelSetSamplingSize",
            str(numberOfPointsForSampling),
        )

        # Dataset
        fname = os.path.join(othdrplot.__path__[0], "data", "gauss-mixture-3D.csv")
        sample = ot.Sample.ImportFromCSVFile(fname)

        # Creation du kernel smoothing
        ks = ot.KernelSmoothing()
        distribution = ks.build(sample)

        dp = othdr.HighDensityRegionAlgorithm(sample, distribution, [0.8, 0.3])
        dp.run()

        # Draw contour/inliers/outliers
        otv.View(dp.draw())
        otv.View(dp.draw(drawInliers=True))
        otv.View(dp.draw(drawOutliers=False))

        outlierIndices = dp.computeIndices()
        expected_outlierIndices = [
            75,
            79,
            145,
            148,
            189,
            246,
            299,
            314,
            340,
            351,
            386,
            471,
        ]
        assert_equal(outlierIndices, expected_outlierIndices)
示例#18
0
def test_HighDensityRegionAlgorithm(mock_show):
    ot.RandomGenerator.SetSeed(0)
    numberOfPointsForSampling = 500
    ot.ResourceMap.Set('Distribution-MinimumVolumeLevelSetBySampling', 'true')
    ot.ResourceMap.Set('Distribution-MinimumVolumeLevelSetSamplingSize',
                       str(numberOfPointsForSampling))

    fname = os.path.join(os.path.dirname(__file__), 'data',
                         'gauss-mixture.csv')
    sample = ot.Sample.ImportFromCSVFile(fname)

    # Creation du kernel smoothing
    myks = ot.KernelSmoothing()
    sampleDistribution = myks.build(sample)

    mydp = HighDensityRegionAlgorithm(sample, sampleDistribution)

    mydp.run()

    # Draw contour
    plotData = False
    mydp.plotContour(plotData)
    plt.show()

    # Plot data
    mydp.plotContour(True)
    plt.show()

    outlierIndices = mydp.computeOutlierIndices()
    expected_outlierIndices = [
        31, 60, 84, 105, 116, 121, 150, 151, 200, 207, 215, 218, 220, 248, 282,
        284, 291, 359, 361, 378, 382, 404, 412, 418, 425, 426, 433, 449, 450,
        457, 461, 466, 474, 490, 498, 567, 587, 616, 634, 638, 652, 665, 687,
        714, 729, 730, 748, 751, 794, 876, 894, 896, 903, 925, 928, 963, 968,
        987
    ]
    assert_equal(outlierIndices, expected_outlierIndices)
示例#19
0
# myXproc R^2 --> R
amplitude = [1.0]
scale = [0.2, 0.2]
myCovModel = ot.ExponentialModel(scale, amplitude)
myXproc = ot.GaussianProcess(myCovModel, myMesh)

# Transform myXproc to make its variance depend on the vertex (s,t)
# and to get a positive process
# thanks to the spatial function g
# myXtProcess R --> R
g = ot.SymbolicFunction(['x1'], ['exp(x1)'])
myDynTransform = ot.ValueFunction(g, 2)
myXtProcess = ot.CompositeProcess(myDynTransform, myXproc)

myField = myXtProcess.getRealization()
graphMarginal1 = ot.KernelSmoothing().build(myField.getValues()).drawPDF()
graphMarginal1.setTitle("")
graphMarginal1.setXTitle("X")
graphMarginal1.setLegendPosition("")

# Initiate a BoxCoxFactory
myBoxCoxFactory = ot.BoxCoxFactory()

graph = ot.Graph()
shift = [0.0]

# We estimate the lambda parameter from the field myField
# All values of the field are positive
myModelTransform = myBoxCoxFactory.build(myField, shift, graph)
graphMarginal2 = ot.KernelSmoothing().build(
    myModelTransform(myField).getValues()).drawPDF()
示例#20
0
import openturns as ot
from openturns.viewer import View

sample = ot.Gamma(6.0, 1.0).getSample(100)
ks = ot.KernelSmoothing()
bandwidth = [0.9]
fittedDist = ks.build(sample, bandwidth)

graph = fittedDist.drawPDF()
graph.add( ot.Gamma(6.0, 1.0).drawPDF())
graph.setColors(ot.Drawable.BuildDefaultPalette(2))
graph.setLegends(['KS dist', 'Gamma'])
View(graph)
#View(graph, figure_kw={'figsize': (8, 4)})
示例#21
0
sampleSize = 10000
sample = sampler.getSample(sampleSize)

# %%
# Look at the acceptance rate (basic check of the sampling efficiency:
# values close to :math:`0.2` are usually recommended
# for Normal posterior distributions).

# %%
[mh.getAcceptanceRate() for mh in sampler.getMetropolisHastingsCollection()]

# %%
# Build the distribution of the posterior by kernel smoothing.

# %%
kernel = ot.KernelSmoothing()
posterior = kernel.build(sample)

# %%
# Display prior vs posterior for each parameter.

# %%

fig = pl.figure(figsize=(12, 4))

for parameter_index in range(paramDim):
    graph = posterior.getMarginal(parameter_index).drawPDF()
    priorGraph = prior.getMarginal(parameter_index).drawPDF()
    priorGraph.setColors(["blue"])
    graph.add(priorGraph)
    graph.setLegends(["Posterior", "Prior"])
示例#22
0
    def test_HighDensityRegionAlgorithm2D(self):
        # With 2D
        ot.RandomGenerator.SetSeed(0)
        numberOfPointsForSampling = 500
        ot.ResourceMap.SetAsBool("Distribution-MinimumVolumeLevelSetBySampling", True)
        ot.ResourceMap.Set(
            "Distribution-MinimumVolumeLevelSetSamplingSize",
            str(numberOfPointsForSampling),
        )

        # Dataset
        fname = os.path.join(othdrplot.__path__[0], "data", "gauss-mixture.csv")
        sample = ot.Sample.ImportFromCSVFile(fname)

        # Creation du kernel smoothing
        ks = ot.KernelSmoothing()
        distribution = ks.build(sample)

        dp = othdr.HighDensityRegionAlgorithm(sample, distribution)
        dp.run()

        # Draw contour/inliers/outliers
        otv.View(dp.draw())

        otv.View(dp.draw(drawInliers=True))

        otv.View(dp.draw(drawOutliers=False))

        outlierIndices = dp.computeIndices()
        expected_outlierIndices = [
            31,
            60,
            84,
            105,
            116,
            121,
            150,
            151,
            200,
            207,
            215,
            218,
            220,
            248,
            282,
            284,
            291,
            359,
            361,
            378,
            382,
            404,
            412,
            418,
            425,
            426,
            433,
            449,
            450,
            457,
            461,
            466,
            474,
            490,
            498,
            567,
            587,
            616,
            634,
            638,
            652,
            665,
            687,
            714,
            729,
            730,
            748,
            751,
            794,
            876,
            894,
            896,
            903,
            925,
            928,
            963,
            968,
            987,
        ]
        assert_equal(outlierIndices, expected_outlierIndices)
        # Mode
        mode_index = dp.getMode()
        assert_equal(mode_index, 424)
示例#23
0
]
mixture = ot.Mixture(dists)

# 3-d test
R1 = ot.CovarianceMatrix(3)
R1[2, 1] = -0.25
R2 = ot.CovarianceMatrix(3)
R2[1, 0] = 0.5
R2[2, 1] = -0.3
R2[0, 0] = 1.3
print(R2)
dists = [ot.Normal([1.0, -2.0, 3.0], R1), ot.Normal([-1.0, 2.0, -2.0], R2)]
mixture = ot.Mixture(dists, [2.0 / 3.0, 1.0 / 3.0])

sample = mixture.getSample(1000)
distribution = ot.KernelSmoothing().build(sample)
algo = ot.MinimumVolumeClassifier(distribution, 0.8)
threshold = algo.getThreshold()
print("threshold=", threshold)
assert m.fabs(threshold - 0.0012555) < 1e-3, "wrong threshold"
cls_ref = [
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
    1, 0, 1, 1, 0, 1, 1, 1, 1, 1
]
for i in range(35):
    x = sample[i]
    cls = algo.classify(x)
    pdf = mixture.computePDF(x)
    print(i, x, cls, pdf - threshold)
    assert cls == cls_ref[i], "wrong class"
示例#24
0
algo = ot.ExpectationSimulationAlgorithm(Y)
algo.setMaximumOuterSampling(1000)
algo.setCoefficientOfVariationCriterionType('NONE')
algo.run()
print('model evaluation calls number=', f.getEvaluationCallsNumber())
expectation_result = algo.getResult()
expectation_mean = expectation_result.getExpectationEstimate()
print('monte carlo mean=', expectation_mean, 'var=',
      expectation_result.getVarianceEstimate())

# %%
# Central dispersion analysis based on a sample
# ---------------------------------------------

# %%
# Directly compute statistical moments based on a sample of Y. Sometimes the probabilistic model is not available and the study needs to start from the data.

# %%
Y_s = Y.getSample(1000)
y_mean = Y_s.computeMean()
y_stddev = Y_s.computeStandardDeviation()
y_quantile_95p = Y_s.computeQuantilePerComponent(0.95)
print('mean=', y_mean, 'stddev=', y_stddev, 'quantile@95%', y_quantile_95p)

# %%
graph = ot.KernelSmoothing().build(Y_s).drawPDF()
graph.setTitle("Kernel smoothing approximation of the output distribution")
view = viewer.View(graph)

plt.show()
示例#25
0
                                     ot.TruncatedDistribution.UPPER)
graph = truncated.drawPDF()
view = viewer.View(graph)

# %%
# truncated on both bounds
truncated = ot.TruncatedDistribution(distribution, 0.2, 1.5)
graph = truncated.drawPDF()
view = viewer.View(graph)

# %%
# Define a multivariate distribution
dimension = 2
size = 70
sample = ot.Normal(dimension).getSample(size)
ks = ot.KernelSmoothing().build(sample)

# %%
# Truncate it between (-2;2)^n
bounds = ot.Interval([-2.0] * dimension, [2.0] * dimension)
truncatedKS = ot.Distribution(ot.TruncatedDistribution(ks, bounds))

# %%
# Draw its PDF
graph = truncatedKS.drawPDF([-2.5] * dimension, [2.5] * dimension,
                            [256] * dimension)
graph.add(ot.Cloud(truncatedKS.getSample(200)))
graph.setColors(["blue", "red"])
view = viewer.View(graph)
plt.show()
示例#26
0
import openturns as ot
from math import *

ot.TESTPREAMBLE()

# Instantiate one distribution object
dim = 2
meanPoint = [0.5, -0.5]
sigma = [2.0, 3.0]
R = ot.CorrelationMatrix(dim)
for i in range(1, dim):
    R[i, i - 1] = 0.5

distribution = ot.Normal(meanPoint, sigma, R)
discretization = 100
kernel = ot.KernelSmoothing()
sample = distribution.getSample(discretization)
kernels = ot.DistributionCollection(0)
kernels.add(ot.Normal())
kernels.add(ot.Epanechnikov())
kernels.add(ot.Uniform())
kernels.add(ot.Triangular())
kernels.add(ot.Logistic())
kernels.add(ot.Beta(2.0, 2.0, -1.0, 1.0))
kernels.add(ot.Beta(3.0, 3.0, -1.0, 1.0))
meanExact = distribution.getMean()
covarianceExact = distribution.getCovariance()
for i in range(kernels.getSize()):
    kernel = kernels[i]
    print("kernel=", kernel.getName())
    smoother = ot.KernelSmoothing(kernel)
示例#27
0
                               (res, res)),
                    rstride=1,
                    cstride=1,
                    cmap=pl.matplotlib.cm.jet)
    ax.set_xlabel('$x_1$')
    ax.set_ylabel('$x_2$')
    ax.set_zlabel('$\\varphi_i(\mathbf{x})$')
    pl.savefig('2D_identification_eigensolution_%d.png' % i)
    pl.close()

# Calculation of the KL coefficients by functional projection using
# Gauss-Legendre quadrature
xi = estimated_random_field.compute_coefficients(sample_paths)

# Statistical inference of the KL coefficients' distribution
kernel_smoothing = ot.KernelSmoothing(ot.Normal())
xi_marginal_distributions = ot.DistributionCollection([
    kernel_smoothing.build(xi[:, i][:, np.newaxis])
    for i in xrange(truncation_order)
])
try:
    xi_copula = ot.NormalCopulaFactory().build(xi)
except RuntimeError:
    print('ERR: The normal copula correlation matrix built from the given\n' +
          'Spearman correlation matrix is not definite positive.\n' +
          'This would require expert judgement on the correlation\n' +
          'coefficients significance (using e.g. Spearman test).\n' +
          'Assuming an independent copula in the sequel...')
    xi_copula = ot.IndependentCopula(truncation_order)
xi_estimated_distribution = ot.ComposedDistribution(xi_marginal_distributions,
                                                    xi_copula)
示例#28
0
    def run(self):
        """
        Run all active methods.
        """

        # run the univariate linear model analysis with gaussian residuals hypothesis
        if self._verbose:
            print("\nStart univariate linear model analysis...")
        self._analysis = UnivariateLinearModelAnalysis(self._inputSample[:, 0],
                                                 self._signals, self._noiseThres,
                                                 self._saturationThres,
                                                 ot.NormalFactory(), self._boxCox)

        # run the univariate linear model with gaussian residuals
        if self._activeMethods['LinearGauss']:
            if self._verbose:
                print("\nStart univariate linear model POD with Gaussian residuals...")
            self._PODgauss = UnivariateLinearModelPOD(self._inputSample[:, 0], self._signals,
                                                self._detection, self._noiseThres,
                                                self._saturationThres,
                                                ot.NormalFactory(), self._boxCox)
            self._PODgauss.setVerbose(self._verbose)
            self._PODgauss.setSimulationSize(self._simulationSize)
            self._PODgauss.run()


        # run the univariate linear model with no hypothesis on the residuals
        if self._activeMethods['LinearBinomial']:
            if self._verbose:
                print("\nStart univariate linear model POD with no hypothesis on the residuals...")
            self._PODbin = UnivariateLinearModelPOD(self._inputSample[:, 0], self._signals,
                                                self._detection, self._noiseThres,
                                                self._saturationThres,
                                                None, self._boxCox)
            self._PODbin.setVerbose(self._verbose)
            self._PODbin.run()

        # run the univariate linear model with kernel smoothing on the residuals
        if self._activeMethods['LinearKernelSmoothing']:
            if self._verbose:
                print("\nStart univariate linear model POD with kernel smoothing on the residuals...")
            self._PODks = UnivariateLinearModelPOD(self._inputSample[:, 0], self._signals,
                                                self._detection, self._noiseThres,
                                                self._saturationThres,
                                                ot.KernelSmoothing(), self._boxCox)
            self._PODks.setVerbose(self._verbose)
            self._PODks.setSimulationSize(self._simulationSize)
            self._PODks.run()

        # run the quantile regression 
        if self._activeMethods['QuantileRegression']:
            if self._verbose:
                print("\nStart quantile regression POD...")
            self._PODqr = QuantileRegressionPOD(self._inputSample[:, 0], self._signals,
                                                self._detection, self._noiseThres,
                                                self._saturationThres, self._boxCox)
            self._PODqr.setVerbose(self._verbose)
            self._PODqr.setSimulationSize(self._simulationSize)
            self._PODqr.run()


        # run the polynomial chaos
        if self._activeMethods['PolynomialChaos']:
            if self._verbose:
                print("\nStart polynomial chaos POD...")
            self._PODchaos = PolynomialChaosPOD(self._inputSample, self._signals,
                                       self._detection, self._noiseThres,
                                       self._saturationThres, self._boxCox)
            self._PODchaos.setVerbose(self._verbose)
            self._PODchaos.setSimulationSize(self._simulationSize)
            self._PODchaos.setSamplingSize(self._samplingSize)
            self._PODchaos.run()

        # run the kriging
        if self._dim > 1 and self._activeMethods['Kriging']:
            if self._verbose:
                print("\nStart kriging POD...")
            self._PODkriging = KrigingPOD(self._inputSample, self._signals,
                               self._detection, self._noiseThres,
                               self._saturationThres, self._boxCox)
            self._PODkriging.setVerbose(self._verbose)
            self._PODkriging.setSimulationSize(self._simulationSize)
            self._PODkriging.setSamplingSize(self._samplingSize)
            self._PODkriging.run()
示例#29
0
def get_KS_marginals(data):
    print("Marginal KS")
    dimension = data.getDimension()
    KS = ot.KernelSmoothing(ot.Epanechnikov(), False, 0, False)
    marginals = [KS.build(data.getMarginal(i)) for i in range(dimension)]
    return marginals
示例#30
0
def fullKS(data):
    # First model: full KS
    print("Full KS")
    model = ot.KernelSmoothing(ot.Epanechnikov(), False, 0, False).build(data)
    return model