Beispiel #1
0
    def generate(self, n_samples=2):
        """Generate samples.

        Using the KDE, generate new samples following the PDF.
        The sample giving the best improvement in terms of discrepancy is kept.

        Update the KDE after each sample is added to the sampling.

        :param int n_samples: Number of samples to generate.
        :return: Sample.
        :rtype: array_like, shape (n_samples, n_features)
        """
        self.kde = copy.deepcopy(self.kde_)
        sample = list(copy.deepcopy(self.space))
        self.n_samples = len(sample)

        for _ in range(n_samples - 1):
            sample_ = self.sample_kde(500)

            self.sample_ = sample_
            self.kde_prev = copy.deepcopy(self.kde)

            # Normal strategy
            # disc = [ot.SpaceFillingPhiP(1000).evaluate(np.vstack([sample, s]))
            #         for s in sample_]

            # disc = [Space.discrepancy(np.vstack([sample, s]), method='WD')
            #         for s in sample_]

            disc = [
                ot.SpaceFillingC2().evaluate(np.vstack([sample, s]))
                for s in sample_
            ]

            # Subprojections
            # disc = [discrepancy_2D(np.vstack([sample, s]))
            #         for s in sample_]

            # Sobol consideration
            # disc = [ot.SpaceFillingC2().evaluate(np.concatenate([np.array(sample)[:, 0].reshape(-1, 1), np.array(s)[0].reshape(1, 1)]))
            #         for s in sample_]

            sample.append(sample_[np.argmin(disc)])

            # For constrain
            # disc = [ot.SpaceFillingMinDist().evaluate(np.vstack([sample, s]))
            #         for s in sample_]

            # Max probability point
            # disc = self.kde_.score_samples(sample_)

            # sample.append(sample_[np.argmax(disc)])

            self.n_samples = len(sample)
            self.kde.set_params(bandwidth=self.bw / self.n_samples**(1 / 2),
                                metric_params={'func': self.metric_func})
            self.kde.fit(sample)

        return np.array(sample)
def myOptimalLHSExperiment(distribution, size, model):
    # Build standard randomized LHS algorithm
    lhs = ot.LHSExperiment(distribution, size)
    #lhs.setAlwaysShuffle(False) # randomized
    # Defining space fillings
    spaceFilling = ot.SpaceFillingC2()
    # RandomBruteForce MonteCarlo with N designs (LHS with C2 optimization)
    N = 10000
    optimalLHSAlgorithm = ot.MonteCarloLHS(lhs, N, spaceFilling)
    experiment = optimalLHSAlgorithm.getLHS()
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
    return sensitivity_algorithm
Beispiel #3
0
def discrepancy_2D(sample):
    """Mean discrepancy of all 2D subprojections."""
    sample = np.asarray(sample)
    dim = sample.shape[1]

    # disc = ot.SpaceFillingC2().evaluate(np.stack([sample[:, 20],
    #                                                sample[:, 8]], axis=-1))

    disc = []
    for i, j in combinations_with_replacement(range(dim), 2):
        if i < j:
            disc_ = ot.SpaceFillingC2().evaluate(
                np.stack([sample[:, i], sample[:, j]], axis=-1))
            # disc_ = ot.SpaceFillingMinDist().evaluate(np.stack([sample[:, i],
            #                                                     sample[:, j]], axis=-1))
            # disc_ = Space.discrepancy(np.stack([sample[:, i],
            #                                     sample[:, j]], axis=-1), method='MD')

            disc.append(disc_)

    return np.mean(disc)
Beispiel #4
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

# Defining parameters
dimension = 5
size = 100

# Build OT LHS algorithm
lhs = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform()] * dimension),
                       size)

# Generate design without optimization
design = lhs.generate()

# Defining space fillings
spaceFillingC2 = ot.SpaceFillingC2()
spaceFillingMinDist = ot.SpaceFillingMinDist()
spaceFillingPhiP = ot.SpaceFillingPhiP()
spaceFillingPhiP50 = ot.SpaceFillingPhiP(50)

# print the criteria on this design
print("C2=%f MinDist=%f PhiP=%f, PhiP(50)=%f" % tuple([
    sf.evaluate(design) for sf in [
        spaceFillingC2, spaceFillingMinDist, spaceFillingPhiP,
        spaceFillingPhiP50
    ]
]))
Beispiel #5
0
distribution = ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension)
lhs = ot.LHSExperiment(distribution, size)
lhs.setRandomShift(False)  # centered
lhs.setAlwaysShuffle(True)  # randomized

# print the object
print("lhs=", lhs)
bounds = distribution.getRange()
print("Bounds of uniform distributions=", bounds)

# Generate design without optimization
design = lhs.generate()
print("design=", design)

# Defining space fillings
spaceFillingC2 = ot.SpaceFillingC2()
spaceFillingPhiP = ot.SpaceFillingPhiP()

# print the criteria on this design
print("PhiP=%f, C2=%f" % (ot.SpaceFillingPhiP().evaluate(design),
                          ot.SpaceFillingC2().evaluate(design)))

# Parameters for drawing design ==> Number of points are for the "grid"
Nx = 50
Ny = 50

# --------------------------------------------------#
# ------------ MonteCarlo algorithm  ------------- #
# --------------------------------------------------#

# RandomBruteForce MonteCarlo with N designs
Beispiel #6
0
            disc.append(disc_)

    return np.mean(disc)


dim = 2
n_sample = 10
sigma = 0.5
sampler = KdeSampler(sample=[[0.5, 0.7]], dim=dim, bw=sigma)
sample_kde = sampler.generate(n_sample)

dists = [ot.Uniform(0, 1) for _ in range(dim)]
dists = ot.ComposedDistribution(dists)
lhs = ot.LHSExperiment(dists, n_sample)
lhs_opt = ot.SimulatedAnnealingLHS(lhs, ot.GeometricProfile(),
                                   ot.SpaceFillingC2())

sample_lhs = np.array(lhs.generate())
sample_lhs_opt = np.array(lhs_opt.generate())
sample_sobol = np.array(ot.SobolSequence(dim).generate(n_sample))

print(f'Discrepancy CD:\n'
      f'-> KDE: {ot.SpaceFillingC2().evaluate(sample_kde)}\n'
      f'-> LHS opt: {ot.SpaceFillingC2().evaluate(sample_lhs_opt)}\n'
      f'-> LHS: {ot.SpaceFillingC2().evaluate(sample_lhs)}\n'
      f'-> Sobol: {ot.SpaceFillingC2().evaluate(sample_sobol)}\n')

print(f'Discrepancy WD:\n'
      f"-> KDE: {Space.discrepancy(sample_kde, method='WD')}\n"
      f"-> LHS opt: {Space.discrepancy(sample_lhs_opt, method='WD')}\n"
      f"-> LHS: {Space.discrepancy(sample_lhs, method='WD')}\n"
# %%
distributions = ot.DistributionCollection()
for i in range(dim):
    distributions.add(ot.Uniform(lbounds[i], ubounds[i]))
boundedDistribution = ot.ComposedDistribution(distributions)

# %%
# We first generate a Latin Hypercube Sampling (LHS) design made of 25 points in the sample space. This LHS is optimized so as to fill the space.

# %%
K = 25 # design size
LHS = ot.LHSExperiment(boundedDistribution, K)
LHS.setAlwaysShuffle(True)
SA_profile = ot.GeometricProfile(10., 0.95, 20000)
LHS_optimization_algo = ot.SimulatedAnnealingLHS(LHS, SA_profile, ot.SpaceFillingC2())
LHS_optimization_algo.generate()
LHS_design = LHS_optimization_algo.getResult()
starting_points = LHS_design.getOptimalDesign()
starting_points.getSize()

# %%
# We can check that the minimum and maximum in the sample correspond to the bounds of the design of experiment.

# %%
lbounds, ubounds

# %%
starting_points.getMin(), starting_points.getMax()

# %%
Beispiel #8
0
# Size of sample
size = 20

# Factory: lhs generates
lhsDesign = ot.LHSExperiment(
    ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size)
lhsDesign.setRandomShift(False)  # centered
lhsDesign.setAlwaysShuffle(True)  # randomized

# For each design, evaluation of some criteria
fp = open("space_filling_criteria_perturbation.val", "w")
design = lhsDesign.generate()
filename = "design_dim_2_size_20_centered_perturbLHS.csv"
design.exportToCSVFile(filename, ";")
c2 = ot.SpaceFillingC2().evaluate(design)
phip = ot.SpaceFillingPhiP().evaluate(design)
mindist = ot.SpaceFillingMinDist().evaluate(design)
fp.write("initial design=%s\n" % filename)
fp.write("c2=%1.10e, phip=%1.10e, mindist=%1.10e\n" % (c2, phip, mindist))

# Perturbations
# Echange of type:
#tmp = design[row1, column]
#design[row1, column] = design[row2, column]
#design[row2, column] = tmp
for row1 in range(size):
    for row2 in range(size):
        for column in range(dimension):
            # The 3-upplet (row1, row2, column)
            # Criteria exchange design[row1, column] and design[row2, column]
Beispiel #9
0
input_variables = ['xi1', 'xi2', 'xi3', 'a', 'b']
formula = ['sin(xi1) + a * (sin(xi2)) ^ 2 + b * xi3^4 * sin(xi1)']
full = ot.SymbolicFunction(input_variables, formula)
ishigami_model = ot.ParametricFunction(full, [3, 4], [a, b])

#  Generating a design of size
N = 150
# Considering independent Uniform distributions of dimension 3
# Bounds are (-pi,pi), (-pi,pi) and (-pi,pi)
distribution = ot.ComposedDistribution([ot.Uniform(-pi, pi)] * dimension)
bounds = distribution.getRange()
# Random LHS
lhs = ot.LHSExperiment(distribution, N)
lhs.setAlwaysShuffle(True)  # randomized
# Fixing C2 crit
space_filling = ot.SpaceFillingC2()
# Defining a temperature profile
temperatureProfile = ot.GeometricProfile()
# Pre conditionning : generate an optimal design with MC
nSimu = 100
algo = ot.MonteCarloLHS(lhs, nSimu, space_filling)
initialDesign = algo.generate()
result = algo.getResult()

print('initial design pre-computed. Performing SA optimization...')
# Use of initial design
algo = ot.SimulatedAnnealingLHS(initialDesign, distribution,
                                temperatureProfile, space_filling)
# Retrieve optimal design
input_database = algo.generate()
distribution = ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension)
lhs = ot.LHSExperiment(distribution, size)
lhs.setRandomShift(False) # centered
lhs.setAlwaysShuffle(True) # randomized

# print the object
print("lhs=", lhs)
bounds = distribution.getRange()
print("Bounds of uniform distributions=", bounds)

# Generate design without optimization
design = lhs.generate()
print("design=", design)

# Defining space fillings
spaceFillingC2 = ot.SpaceFillingC2()
spaceFillingPhiP = ot.SpaceFillingPhiP()

# print the criteria on this design
print("PhiP=%f, C2=%f"%(ot.SpaceFillingPhiP().evaluate(design), ot.SpaceFillingC2().evaluate(design)))

# Parameters for drawing design ==> Number of points are for the "grid"
Nx = 50
Ny = 50

# Show the design
# ot.Show(lhsGraph)

#--------------------------------------------------#
# ------------ MonteCarlo algorithm  ------------- #
#--------------------------------------------------#
Beispiel #11
0
    def __init__(self, n_samples, bounds, kind, dists=None, discrete=None):
        """Initialize the DOE generation.

        In case of :attr:`kind` is ``uniform``, :attr:`n_samples` is decimated
        in order to have the same number of points in all dimensions.

        If :attr:`kind` is ``discrete``, a join distribution between a discrete
        uniform distribution is made with continuous distributions.

        Another possibility is to set a list of PDF to sample from. Thus one
        can do: `dists=['Uniform(15., 60.)', 'Normal(4035., 400.)']`. If not
        set, uniform distributions are used.

        :param int n_samples: number of samples.
        :param array_like bounds: Space's corners [[min, n dim], [max, n dim]]
        :param str kind: Sampling Method if string can be one of
          ['halton', 'sobol', 'faure', '[o]lhs[c]', 'sobolscramble', 'uniform',
          'discrete'] otherwize can be a list of openturns distributions.
        :param lst(str) dists: List of valid openturns distributions as string.
        :param int discrete: Position of the discrete variable.
        """
        self.n_samples = n_samples
        self.bounds = np.asarray(bounds)
        self.kind = kind
        self.dim = self.bounds.shape[1]

        self.scaler = preprocessing.MinMaxScaler()
        self.scaler.fit(self.bounds)

        if dists is None:
            dists = [ot.Uniform(float(self.bounds[0][i]),
                                float(self.bounds[1][i]))
                     for i in range(self.dim)]
        else:
            dists = bat.space.dists_to_ot(dists)

        if discrete is not None:
            # Creating uniform discrete distribution for OT
            disc_list = [[i] for i in range(int(self.bounds[0, discrete]),
                                            int(self.bounds[1, discrete] + 1))]
            disc_dist = ot.UserDefined(disc_list)

            dists.pop(discrete)
            dists.insert(discrete, disc_dist)

        # Join distribution
        self.distribution = ot.ComposedDistribution(dists)

        if self.kind == 'halton':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.HaltonSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'sobol':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'faure':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.FaureSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif (self.kind == 'lhs') or (self.kind == 'lhsc'):
            self.sequence_type = ot.LHSExperiment(self.distribution, self.n_samples)
        elif self.kind == 'olhs':
            lhs = ot.LHSExperiment(self.distribution, self.n_samples)
            self.sequence_type = ot.SimulatedAnnealingLHS(lhs, ot.GeometricProfile(),
                                                          ot.SpaceFillingC2())
        elif self.kind == 'saltelli':
            # Only relevant for computation of Sobol' indices
            size = self.n_samples // (2 * self.dim + 2)  # N(2*dim + 2)
            self.sequence_type = ot.SobolIndicesExperiment(self.distribution,
                                                           size, True).generate()
Beispiel #12
0
distribution = ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension)
distribution.setDescription(['U'+str(i) for i in range(dimension)])
lhs = ot.LHSExperiment(distribution, size)
lhs.setRandomShift(False)  # centered
lhs.setAlwaysShuffle(True)  # randomized

# print the object
print("lhs=", lhs)
print("Bounds of uniform distributions=", distribution.getRange())

# Generate design without optimization
design = lhs.generate()
print("design=", design)

# Defining space fillings
spaceFillingC2 = ot.SpaceFillingC2()
spaceFillingPhiP = ot.SpaceFillingPhiP(10)

# print the criteria on this design
print("PhiP=%f, C2=%f" %
      (ot.SpaceFillingPhiP().evaluate(design), ot.SpaceFillingC2().evaluate(design)))

# --------------------------------------------------#
# ------------- Simulated annealing  ------------- #
# --------------------------------------------------#
# Geometric profile
T0 = 10.0
iMax = 2000
c = 0.95
geomProfile = ot.GeometricProfile(T0, c, iMax)
Beispiel #13
0
# **LHS and space filling**

# %%
N = 100
# Considering independent Uniform distributions of dimension 3
# Bounds are (-1,1), (0,2) and (0, 0.5)
distribution = ot.ComposedDistribution(
    [ot.Uniform(-1.0, 1.0),
     ot.Uniform(0.0, 2.0),
     ot.Uniform(0.0, 0.5)])
# Random LHS
lhs = ot.LHSExperiment(distribution, N)
lhs.setAlwaysShuffle(True)  # randomized
design = lhs.generate()
# C2
c2 = ot.SpaceFillingC2().evaluate(design)
# PhiP with default p
phip = ot.SpaceFillingPhiP().evaluate(design)
# mindist
mindist = ot.SpaceFillingMinDist().evaluate(design)
# For p->infinity
phip_inf = ot.SpaceFillingPhiP(100).evaluate(design)
print(phip, mindist, phip_inf)

# %%
# **Optimized LHS using Monte Carlo**

# %%
# As with Monte Carlo, user decides of a fixed number of iterations,
# but this time this number is part of the temperature profile.
#