def _PODgaussModelCl(self, defects, intercept, slope, stderr, detection):
        class buildPODModel():
            def __init__(self, intercept, slope, sigmaEpsilon, detection):

                self.intercept = intercept
                self.slope = slope
                self.sigmaEpsilon = sigmaEpsilon
                self.detection = detection

            def PODmodel(self, x):
                t = (self.detection -
                     (self.intercept + self.slope * x)) / self.sigmaEpsilon
                return ot.DistFunc.pNormal(t, True)

        N = defects.getSize()
        X = ot.Sample(N, [1, 0])
        X[:, 1] = defects
        X = ot.Matrix(X)
        covMatrix = X.computeGram(True).solveLinearSystem(ot.IdentityMatrix(2))
        sampleNormal = ot.Normal([0, 0],
                                 ot.CovarianceMatrix(
                                     covMatrix.getImplementation())).getSample(
                                         self._simulationSize)
        sampleSigmaEpsilon = (ot.Chi(N - 2).inverse() * np.sqrt(N - 2) *
                              stderr).getSample(self._simulationSize)

        PODcoll = []
        for i in range(self._simulationSize):
            sigmaEpsilon = sampleSigmaEpsilon[i][0]
            interceptSimu = sampleNormal[i][0] * sigmaEpsilon + intercept
            slopeSimu = sampleNormal[i][1] * sigmaEpsilon + slope
            PODcoll.append(
                buildPODModel(interceptSimu, slopeSimu, sigmaEpsilon,
                              detection).PODmodel)
        return PODcoll
Example #2
0
# %%
from __future__ import print_function
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt
import math as m
import time
ot.Log.Show(ot.Log.NONE)

# %%
# Define an event to compute a probability
myFunction = ot.SymbolicFunction(['E', 'F', 'L', 'I'], ['-F*L^3/(3.0*E*I)'])
dim = myFunction.getInputDimension()
mean = [50.0, 1.0, 10.0, 5.0]
sigma = [1.0] * dim
R = ot.IdentityMatrix(dim)
myDistribution = ot.Normal(mean, sigma, R)
vect = ot.RandomVector(myDistribution)
output = ot.CompositeRandomVector(myFunction, vect)
myEvent = ot.ThresholdEvent(output, ot.Less(), -3.0)

# %%
# **Stop a FORM algorithm using a calls number limit**
#
# A FORM algorithm termination can be controlled by the maximum number of iterations
#
# of its underlying optimization solver, but not directly by a maximum number of evaluations.

# %%
# Create the optimization algorithm
myCobyla = ot.Cobyla()
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View

f = ot.SymbolicFunction(['x'], ['17-exp(0.1*(x-1.0))'])
graph = f.draw(0.0, 12.0)

dist = ot.Normal([5.0, 15.0], [1.0, 0.25], ot.IdentityMatrix(2))
N = 1000
sample = dist.getSample(N)
sample1 = ot.Sample(0, 2)
sample2 = ot.Sample(0, 2)
for X in sample:
    x, y = X
    if f([x])[0] > y:
        sample1.add(X)
    else:
        sample2.add(X)

cloud = ot.Cloud(sample1)
cloud.setColor('green')
cloud.setPointStyle('square')
graph.add(cloud)

cloud = ot.Cloud(sample2)
cloud.setColor('red')
cloud.setPointStyle('square')
graph.add(cloud)

graph.setTitle('Monte Carlo simulation (Pf=0.048, N=1000)')
graph.setLegends(['domain Df', 'simulations'])
import sys

ot.TESTPREAMBLE()
ot.PlatformInfo.SetNumericalPrecision(3)

m = 10
x = [[0.5 + i] for i in range(m)]

inVars = ["a", "b", "c", "x"]
formulas = ["a + b * exp(c * x)", "(a * x^2 + b) / (c + x^2)"]
model = ot.SymbolicFunction(inVars, formulas)
p_ref = [2.8, 1.2, 0.5]
params = [0, 1, 2]
modelX = ot.ParametricFunction(model, params, p_ref)
y = modelX(x)
y += ot.Normal([0.0] * 2, [0.05] * 2, ot.IdentityMatrix(2)).getSample(m)
candidate = [1.0] * 3
priorCovariance = ot.CovarianceMatrix(3)
for i in range(3):
    priorCovariance[i, i] = 3.0 + (1.0 + i) * (1.0 + i)
    for j in range(i):
        priorCovariance[i, j] = 1.0 / (1.0 + i + j)
errorCovariance = ot.CovarianceMatrix(2)
for i in range(2):
    errorCovariance[i, i] = 2.0 + (1.0 + i) * (1.0 + i)
    for j in range(i):
        errorCovariance[i, j] = 1.0 / (1.0 + i + j)
globalErrorCovariance = ot.CovarianceMatrix(2 * m)
for i in range(2 * m):
    globalErrorCovariance[i, i] = 2.0 + (1.0 + i) * (1.0 + i)
    for j in range(i):
Example #5
0
    graph.add(cloud)
    return graph, s


# %%
# **Definition of some IFS**

# %%
# Spiral
rho1 = 0.9
theta1 = 137.5 * m.pi / 180.0
f1 = [[0.0]*2, ot.SquareMatrix(2, [rho1 * m.cos(theta1), -rho1 * m.sin(theta1), \
                                   rho1 * m.sin(theta1),  rho1 * m.cos(theta1)])]

rho2 = 0.15
f2 = [[1.0, 0.0], rho2 * ot.IdentityMatrix(2)]
f_i = [f1, f2]
graph, s = drawIFS(f_i, skip = 100, iterations = 100000, batch_size = 1, name="Spiral", color="blue")
print("Box counting dimension=%.3f" % s)
view = viewer.View(graph)

# %%
# Fern
f1 = [[0.0]*2, ot.SquareMatrix(2, [0.0, 0.0, 0.0, 0.16])]
f2 = [[0.0, 1.6], ot.SquareMatrix(2, [0.85, 0.04, -0.04, 0.85])]
f3 = [[0.0, 1.6], ot.SquareMatrix(2, [0.2, -0.26, 0.23, 0.22])]
f4 = [[0.0, 0.44], ot.SquareMatrix(2, [-0.15, 0.28, 0.26, 0.24])]
f_i = [f1, f2, f3, f4]
graph, s = drawIFS(f_i, skip = 100, iterations = 100000, batch_size = 1, name="Fern", color="green")
print("Box counting dimension=%.3f" % s)
view = viewer.View(graph)
from __future__ import print_function
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
import math as m

Id = ot.IdentityMatrix(2)
atoms = [
    ot.Normal([1.0, 2.0], [0.5, 0.8], Id),
    ot.Normal([1.0, -2.0], [0.9, 0.8], Id),
    ot.Normal([-1.0, 0.0], [0.5, 0.6], Id)
]
weights = [0.3, 0.3, 0.4]
mixture = ot.Mixture(atoms, weights)
data = mixture.getSample(1000)
classifier = ot.MixtureClassifier(mixture)
graph = mixture.drawPDF(data.getMin(), data.getMax())
graph.setLegendPosition("")
graph.setTitle("MixtureClassifier example")
classes = classifier.classify(data)
palette = ot.Drawable.BuildDefaultPalette(len(atoms))
symbols = ot.Drawable.GetValidPointStyles()
for i in range(classes.getSize()):
    index = classes[i]
    graph.add(
        ot.Cloud([data[i]], palette[index % len(palette)],
                 symbols[index % len(symbols)]))

fig = plt.figure(figsize=(4, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
Example #7
0
    print('Distribution Parameters ', distParam)

    non_native = distParam.getValues()
    desc = distParam.getDescription()
    print('non-native=', non_native, desc)
    native = distParam.evaluate()
    print('native=', native)
    non_native = distParam.inverse(native)
    print('non-native=', non_native)
    print('built dist=', distParam.getDistribution())

    # derivative of the native parameters with regards the parameters of the
    # distribution
    print(distParam.gradient())

    # by the finite difference technique
    eps = 1e-5
    dim = len(non_native)
    nativeParamGrad = ot.SquareMatrix(ot.IdentityMatrix(dim))

    for i in range(dim):
        for j in range(dim):
            xp = list(non_native)
            xp[i] += eps
            xm = list(non_native)
            xm[i] -= eps
            nativeParamGrad[i, j] = 0.5 * \
                (distParam(xp)[j] - distParam(xm)[j]) / eps

    print(nativeParamGrad)
# %%
# First, import the python modules:

# %%
import openturns as ot
from openturns.viewer import View

# %%
# Create the probabilistic model :math:`Y = g(X)`
# -----------------------------------------------

# %%
# Create the input random vector :math:`X`:

# %%
X = ot.RandomVector(ot.Normal([0.25] * 2, [1] * 2, ot.IdentityMatrix(2)))

# %%
# Create the function :math:`g`:

# %%
g = ot.SymbolicFunction(['x1', 'x2'], ['20-(x1-x2)^2-8*(x1+x2-4)^3'])
print('function g: ', g)

# %%
# In order to be able to get the subset samples used in the algorithm, it is necessary to transform the *SymbolicFunction* into a *MemoizeFunction*:

# %%
g = ot.MemoizeFunction(g)

# %%
Example #9
0
Theta1 = ot.Dirac(trueParameter[0])
Theta2 = ot.Dirac(trueParameter[1])
Theta3 = ot.Dirac(trueParameter[2])

inputRandomVector = ot.ComposedDistribution([Theta1, Theta2, Theta3])

candidate = ot.Point([8.0, 9.0, -6.0])

calibratedIndices = [0, 1, 2]
model = ot.ParametricFunction(g, calibratedIndices, candidate)

outputObservationNoiseSigma = 0.01
meanNoise = ot.Point(outputDimension)
covarianceNoise = ot.Point(outputDimension, outputObservationNoiseSigma)
R = ot.IdentityMatrix(outputDimension)
observationOutputNoise = ot.Normal(meanNoise, covarianceNoise, R)

size = 100
inputObservations = ot.Sample(size, 0)

# Generate exact outputs
inputSample = inputRandomVector.getSample(size)
outputStress = g(inputSample)
# Add noise
sampleNoiseH = observationOutputNoise.getSample(size)
outputObservations = outputStress + sampleNoiseH

priorCovariance = ot.CovarianceMatrix(inputDimension)
for i in range(inputDimension):
    priorCovariance[i, i] = 3.0 + (1.0 + i) * (1.0 + i)
Example #10
0
for measure in measures:
    print(measure, '(continuous)', measure(x))
    N = 10000
    experiment = ot.LHSExperiment(N)
    factory = otrobopt.MeasureFactory(experiment)
    discretizedMeasure = factory.build(measure)
    print(discretizedMeasure, '(discretized LHS)', discretizedMeasure(x))
    N = 4
    experiment = ot.GaussProductExperiment([N])
    factory = otrobopt.MeasureFactory(experiment)
    discretizedMeasure = factory.build(measure)
    print(discretizedMeasure, '(discretized Gauss)', discretizedMeasure(x))

# Second test: theta of dimension 2
thetaDist = ot.Normal([2.0] * 2, [0.1] * 2, ot.IdentityMatrix(2))
f_base = ot.SymbolicFunction(['x', 'theta0', 'theta1'], ['x*theta0+theta1'])
f = ot.ParametricFunction(f_base, [1, 2], thetaDist.getMean())

x = [1.0]

measures = [otrobopt.MeanMeasure(f, thetaDist),
            otrobopt.VarianceMeasure(f, thetaDist),
            otrobopt.WorstCaseMeasure(
                f, ot.ComposedDistribution([ot.Uniform(-1.0, 4.0)] * 2)),
            otrobopt.WorstCaseMeasure(
                f, ot.ComposedDistribution(
                    [ot.Uniform(-1.0, 4.0)] * 2), False),
            otrobopt.JointChanceMeasure(
                f, thetaDist, ot.GreaterOrEqual(), 0.95),
            otrobopt.IndividualChanceMeasure(
#! /usr/bin/env python

import openturns as ot

ot.TESTPREAMBLE()

# Instantiate one distribution object
dimension = 3
meanPoint = ot.Point([0.5, -0.5, 1])
sigma = [2, 3, 1]

sample = ot.Sample(0, dimension)
# Create a collection of distribution
aCollection = ot.DistributionCollection()

aCollection.add(ot.Normal(meanPoint, sigma, ot.IdentityMatrix(dimension)))
sample.add(meanPoint)
meanPoint += [1.0] * dimension
aCollection.add(ot.Normal(meanPoint, sigma, ot.IdentityMatrix(dimension)))
sample.add(meanPoint)
meanPoint += [1.0] * dimension
aCollection.add(ot.Normal(meanPoint, sigma, ot.IdentityMatrix(dimension)))
sample.add(meanPoint)

# Instantiate one distribution object
distribution = ot.KernelMixture(ot.Normal(), sigma, sample)
print("Distribution ", repr(distribution))
print("Distribution ", distribution)
distributionRef = ot.Mixture(aCollection)

# Is this distribution elliptical ?
Example #12
0
    def run(self):
        """
        Build the POD models.

        Notes
        -----
        This method build the polynomial chaos model. First the censored data
        are filtered if needed. The Box Cox transformation is performed if it is
        enabled. Then it builds the POD models, the Monte Carlo simulation is
        performed for each given defect sizes. The confidence interval is 
        computed by simulating new coefficients of the polynomial chaos, then
        Monte Carlo simulations are performed.
        """

        # run the chaos algorithm and get result if not given
        if not self._userChaos:
            if self._verbose:
                print('Start build polynomial chaos model...')
            self._algoChaos = self._buildChaosAlgo(self._input, self._signals)
            self._algoChaos.run()
            if self._verbose:
                print('Polynomial chaos model completed')
            self._chaosResult = self._algoChaos.getResult()

            # get the metamodel
        self._chaosPred = self._chaosResult.getMetaModel()
        # get the basis, coef and transformation, needed for the confidence interval
        self._chaosCoefs = self._chaosResult.getCoefficients()
        self._reducedBasis = self._chaosResult.getReducedBasis()
        self._transformation = self._chaosResult.getTransformation()
        self._basisFunction = ot.NumericalMathFunction(
            ot.NumericalMathFunction(self._reducedBasis), self._transformation)

        # compute the residuals and stderr
        inputSize = self._input.getSize()
        basisSize = self._reducedBasis.getSize()
        self._residuals = self._signals - self._chaosPred(
            self._input)  # residuals
        self._stderr = np.sqrt(
            np.sum(np.array(self._residuals)**2) / (inputSize - basisSize - 1))

        # Check the quality of the chaos model
        R2 = self.getR2()
        Q2 = self.getQ2()
        if self._verbose:
            print('Polynomial chaos validation R2 (>0.8) : {:0.4f}'.format(R2))
            print('Polynomial chaos validation Q2 (>0.8) : {:0.4f}'.format(Q2))

        # Compute the POD values for each defect sizes
        self.POD = self._computePOD(self._defectSizes, self._chaosCoefs)
        # create the interpolate function
        interpModel = interp1d(self._defectSizes, self.POD, kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        ####################### confidence interval ############################
        dof = inputSize - basisSize - 1
        varEpsilon = (ot.ChiSquare(dof).inverse() * dof *
                      self._stderr**2).getRealization()[0]
        gramBasis = ot.Matrix(self._basisFunction(self._input)).computeGram()
        covMatrix = gramBasis.solveLinearSystem(
            ot.IdentityMatrix(basisSize)) * varEpsilon
        self._coefsDist = ot.Normal(
            np.hstack(self._chaosCoefs),
            ot.CovarianceMatrix(covMatrix.getImplementation()))
        coefsRandom = self._coefsDist.getSample(self._simulationSize)

        self._PODPerDefect = ot.NumericalSample(self._simulationSize,
                                                self._defectNumber)
        for i, coefs in enumerate(coefsRandom):
            self._PODPerDefect[i, :] = self._computePOD(
                self._defectSizes, coefs)
            if self._verbose:
                updateProgress(i, self._simulationSize,
                               'Computing POD per defect')
Example #13
0
# This is capital J: J(x,xi) = calJ(x+xi), the parametric objective function
JFull = ot.ComposedFunction(calJ, noise)
J = ot.ParametricFunction(JFull, [2, 3], [0.0] * 2)

# This is g, the parametric constraints
gFull = ot.ComposedFunction(calG, noise)
g = ot.ParametricFunction(gFull, [2, 3], [0.0] * 2)

bounds = ot.Interval([-3.0] * 2, [3.0] * 2)
solver = ot.NLopt('LD_SLSQP')
solver.setMaximumIterationNumber(100)

for sigma_xi in [0.1, 0.2, 0.3, 0.4, 0.5]:

    thetaDist = ot.Normal([0.0] * 2, [sigma_xi] * 2, ot.IdentityMatrix(2))
    robustnessMeasure = otrobopt.MeanMeasure(J, thetaDist)
    reliabilityMeasure = otrobopt.JointChanceMeasure(g, thetaDist, ot.Less(),
                                                     0.9)
    problem = otrobopt.RobustOptimizationProblem(robustnessMeasure,
                                                 reliabilityMeasure)
    problem.setBounds(bounds)

    algo = otrobopt.SequentialMonteCarloRobustAlgorithm(problem, solver)
    algo.setMaximumIterationNumber(11)
    algo.setMaximumAbsoluteError(1e-6)
    algo.setInitialSamplingSize(2)  # size of initial xsi discretization
    algo.setSamplingSizeIncrement(
        ot.PythonFunction(1, 1, lambda x: [1.0 * x[0]]))
    algo.setInitialSearch(
        1000)  # number of multi-start tries, uniform law using bounds
Example #14
0
def run_ImportanceSampling(
    event,
    pstar,
    sd=1.0,
    coefVar=0.05,
    outerSampling=1000,
    blockSize=10,
    seed=1234,
    verbose=False,
    failure_domain=None,
):
    """
    Run an importance sampling simulation.

    Parameters
    ----------
    event : openturns.Event
        The failure event.
    pstar : list of points
        Design points in the standard space where to centered the instrumental
        distribution.
    sd : positive float
        The standard deviation of the instrumental distribution.
    coefVar : float
         The target coefficient of variation.
    outerSampling : int
        The maximum number of outer iterations.
        Nb of iterations = outerSampling x blockSize.
    blockSize : int
        The number of samples send to evaluate simultaneously.
    seed : int
        Seed for the openturns random generator.
    logfile : bool
        Enable or not to write the log in ImportanceSampling.log file.
    verbose : bool
        Enable or not the display of the result.
    activeCache : bool
        Enable or not the cache mechanism of the NumericalMathFunction.
    activeHistory : bool
        Enable or not the history mechanism of the NumericalMathFunction.
    failure_domain : string
        Type of failure domain form : either 'union' or 'intersection'. Only
        needed if the event is a list.
    """

    # case with the limit state defined as an intersection
    # or a union of the event
    if type(event) is list:
        n_event = len(event)
        antecedent = event[0].getAntecedent()

        if failure_domain == "union":

            def function_union(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.sum(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_union,
            )
            output = ot.RandomVector(model, antecedent)
            event = ot.ThresholdEvent(output, ot.Greater(), 0.0)

        elif failure_domain == "intersection":

            def function_intersection(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.prod(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_intersection,
            )
            output = ot.RandomVector(model, antecedent)
            new_event = ot.ThresholdEvent(output, ot.Greater(), 0.0)
    else:
        model = event.getFunction()
        new_event = event

    # Initialize the random generator
    ot.RandomGenerator.SetSeed(seed)

    dim = model.getInputDimension()
    pstar = np.atleast_2d(pstar)
    nPoint = pstar.shape[0]

    stdev = [sd] * dim
    corr = ot.IdentityMatrix(dim)
    if nPoint > 1:
        distribution_list = list()
        for point in pstar:
            distribution_list.append(ot.Normal(point, stdev, corr))
        instrumental_distribution = ot.Mixture(distribution_list)
    elif nPoint == 1:
        instrumental_distribution = ot.Normal(pstar[0], stdev, corr)

    # Run importance sampling simulation
    experiment = ot.ImportanceSamplingExperiment(instrumental_distribution)
    simulation = ot.ProbabilitySimulationAlgorithm(ot.StandardEvent(new_event),
                                                   experiment)
    simulation.setMaximumOuterSampling(outerSampling)
    simulation.setBlockSize(blockSize)
    simulation.setMaximumCoefficientOfVariation(coefVar)

    # try:
    simulation.run()
    # except Exception as e:
    #     dump_cache(model, 'Cache/physicalModelMathFunction')
    #     raise e

    result = simulation.getResult()

    dfResult = pd.DataFrame()
    dfResult = dfResult.append(
        pd.DataFrame([result.getProbabilityEstimate()],
                     index=["Probability of failure"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getCoefficientOfVariation()],
            index=["Coefficient of varation"],
        ))
    dfResult = dfResult.append(
        pd.DataFrame([result.getConfidenceLength()],
                     index=["95 % Confidence length"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getOuterSampling() * result.getBlockSize()],
            index=["Number of calls"],
        ))
    dfResult = dfResult.reset_index()
    dfResult.columns = ["", "Results - Importance Sampling"]

    if verbose:
        print(dfResult, "\n")

    return simulation
Example #15
0
description = ndag.getDescription()
print("description=", description)

for node in order:
    print(" parents(", description[node], ") : ",
          [description[i] for i in ndag.getParents(node)])
    print("children(", description[node], ") : ",
          [description[i] for i in ndag.getChildren(node)])

# Conditional linear coefficients, b matrix:
# X3, X5, X6, X1, X2, X4
b = [[], [0.5], [0.0, 0.75], [0.0, 0.00, 0.0], [0.5, 0.00, 0.0, 0.75],
     [0.0, 1.00, 0.0, 0.00, 1.0]]

T = buildT(b)
I = ot.IdentityMatrix(T.getDimension())
sigma = ot.CovarianceMatrix(np.array(T.solveLinearSystem(I)))

sigma_ordered = ot.CovarianceMatrix(len(b))
for i in range(len(b)):
    for j in range(i + 1):
        sigma_ordered[order[i], order[j]] = sigma[i, j]
print("Sigma matrix: ", sigma_ordered)

# Marginal mean, mu vector:
mu = [0.0] * 6

distribution = ot.Normal(mu, sigma_ordered)
distribution.setDescription(description)
size = 100000
sample = distribution.getSample(size)
data = ot.Sample([[53, 1], [57, 1], [58, 1], [63, 1], [66, 0], [67,
                                                                0], [67, 0],
                  [67, 0], [68, 0], [69, 0], [70, 0], [70, 0], [70,
                                                                1], [70, 1],
                  [72, 0], [73, 0], [75, 0], [75, 1], [76, 0], [76, 0],
                  [78, 0], [79, 0], [81, 0]])

data.setDescription(['Temp. (°F)', 'Failure'])
print(data)

fun = ot.SymbolicFunction(
    ["alpha", "beta", "x"],
    ["exp(alpha + beta * x) / (1 + exp(alpha + beta * x))"])
linkFunction = ot.ParametricFunction(fun, [2], [0.0])
instrumental = ot.Normal([0.0] * 2, [0.5, 0.05], ot.IdentityMatrix(2))

target = ot.ComposedDistribution([ot.Uniform(-100.0, 100.0)] * 2)
rwmh = ot.RandomWalkMetropolisHastings(target, [0.0] * 2, instrumental)
conditional = ot.Bernoulli()
observations = data[:, 1]
covariates = data[:, 0]
rwmh.setLikelihood(conditional, observations, linkFunction, covariates)

# try to generate a sample
sample = rwmh.getSample(10000)
mu = sample.computeMean()
sigma = sample.computeStandardDeviation()
print('mu=', mu, 'sigma=', sigma)
ott.assert_almost_equal(mu, [14.8747, -0.230384])
ott.assert_almost_equal(sigma, [7.3662, 0.108103])