Exemple #1
0
# %%
# 1. Sobol sequence
dimension = 2
size = 1024
sequence = ot.SobolSequence(2)
sample = sequence.generate(size)
graph = ot.Graph("Sobol", "", "", True, "")
cloud = ot.Cloud(sample)
graph.add(cloud)
view = viewer.View(graph)

# %%
# 2. Halton sequence
dimension = 2
sequence = ot.HaltonSequence(2)
sample = sequence.generate(size)
graph = ot.Graph("Halton", "", "", True, "")
cloud = ot.Cloud(sample)
graph.add(cloud)
view = viewer.View(graph)

# %%
# 3. Reverse Halton sequence
sequence = ot.ReverseHaltonSequence(dimension)
sample = sequence.generate(size)
print('discrepancy=',
      ot.LowDiscrepancySequenceImplementation.ComputeStarDiscrepancy(sample))
graph = ot.Graph("Reverse Halton", "", "", True, "")
cloud = ot.Cloud(sample)
graph.add(cloud)
fig.set_size_inches(6, 6)

# %%
# We have elementary intervals in 2 dimensions, each having a volume equal to 1/8. Since there are 32 points, the Sobol' sequence is so that each elementary interval contains exactly 32/8 = 4 points. Notice that each elementary interval is closed on the left (or bottom) and open on the right (or top). 

# %%
# Halton low discrepancy sequence
# -------------------------------

# %%
dim = 2
distribution = ot.ComposedDistribution([ot.Uniform()]*dim)
bounds = distribution.getRange()

# %%
sequence = ot.HaltonSequence(dim)

# %%
samplesize = 2**2 * 3**2 # Halton sequence uses prime numbers 2 and 3 in two dimensions.
experiment = ot.LowDiscrepancyExperiment(sequence, distribution, samplesize, False)
sample = experiment.generate()

# %%
samplesize

# %%
subdivisions = [2**2, 3]
fig = otv.PlotDesign(sample, bounds, subdivisions);
fig.set_size_inches(6, 6)

# %%
Exemple #3
0
    def __init__(self, n_samples, bounds, kind, dists=None, discrete=None):
        """Initialize the DOE generation.

        In case of :attr:`kind` is ``uniform``, :attr:`n_samples` is decimated
        in order to have the same number of points in all dimensions.

        If :attr:`kind` is ``discrete``, a join distribution between a discrete
        uniform distribution is made with continuous distributions.

        Another possibility is to set a list of PDF to sample from. Thus one
        can do: `dists=['Uniform(15., 60.)', 'Normal(4035., 400.)']`. If not
        set, uniform distributions are used.

        :param int n_samples: number of samples.
        :param array_like bounds: Space's corners [[min, n dim], [max, n dim]]
        :param str kind: Sampling Method if string can be one of
          ['halton', 'sobol', 'faure', '[o]lhs[c]', 'sobolscramble', 'uniform',
          'discrete'] otherwize can be a list of openturns distributions.
        :param lst(str) dists: List of valid openturns distributions as string.
        :param int discrete: Position of the discrete variable.
        """
        self.n_samples = n_samples
        self.bounds = np.asarray(bounds)
        self.kind = kind
        self.dim = self.bounds.shape[1]

        self.scaler = preprocessing.MinMaxScaler()
        self.scaler.fit(self.bounds)

        if dists is None:
            dists = [ot.Uniform(float(self.bounds[0][i]),
                                float(self.bounds[1][i]))
                     for i in range(self.dim)]
        else:
            dists = bat.space.dists_to_ot(dists)

        if discrete is not None:
            # Creating uniform discrete distribution for OT
            disc_list = [[i] for i in range(int(self.bounds[0, discrete]),
                                            int(self.bounds[1, discrete] + 1))]
            disc_dist = ot.UserDefined(disc_list)

            dists.pop(discrete)
            dists.insert(discrete, disc_dist)

        # Join distribution
        self.distribution = ot.ComposedDistribution(dists)

        if self.kind == 'halton':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.HaltonSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'sobol':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'faure':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.FaureSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif (self.kind == 'lhs') or (self.kind == 'lhsc'):
            self.sequence_type = ot.LHSExperiment(self.distribution, self.n_samples)
        elif self.kind == 'olhs':
            lhs = ot.LHSExperiment(self.distribution, self.n_samples)
            self.sequence_type = ot.SimulatedAnnealingLHS(lhs, ot.GeometricProfile(),
                                                          ot.SpaceFillingC2())
        elif self.kind == 'saltelli':
            # Only relevant for computation of Sobol' indices
            size = self.n_samples // (2 * self.dim + 2)  # N(2*dim + 2)
            self.sequence_type = ot.SobolIndicesExperiment(self.distribution,
                                                           size, True).generate()
# %%
# 1. Sobol sequence
dimension = 2
size = 1024
sequence = ot.SobolSequence(dimension)
sample = sequence.generate(size)
graph = ot.Graph("Sobol", "", "", True, "")
cloud = ot.Cloud(sample)
graph.add(cloud)
view = viewer.View(graph)

# %%
# 2. Halton sequence
dimension = 2
sequence = ot.HaltonSequence(dimension)
sample = sequence.generate(size)
graph = ot.Graph("Halton", "", "", True, "")
cloud = ot.Cloud(sample)
graph.add(cloud)
view = viewer.View(graph)

# %%
# 3. Halton sequence in high dimension: bad filling in upper dimensions
dimension = 20
sequence = ot.HaltonSequence(dimension)
sample = sequence.generate(size).getMarginal([dimension - 2, dimension - 1])
graph = ot.Graph(
    "Halton (" + str(dimension - 2) + "," + str(dimension - 1) + ")",
    "dim " + str(dimension - 2), "dim " + str(dimension - 1), True, "")
cloud = ot.Cloud(sample)
def myHaltonLowDiscrepancyExperiment(distribution, size, model):
    sequence = ot.HaltonSequence(distribution.getDimension())
    experiment = ot.LowDiscrepancyExperiment(sequence, distribution, size)
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
    return sensitivity_algorithm
Exemple #6
0
    def __init__(self):

        # Load data
        df = pd.read_stata('sample.dta')

        # x1 variables enter the linear part of the estimation
        self.x1 = df.as_matrix(['var8', 'price', 'char1', 'char2'])

        # x2 variables enter the non-linear part
        self.x2 = df.as_matrix(['char1', 'char2'])

        # number of random coefficients
        self.nrc = self.x2.shape[1]

        # number of simulated "indviduals" per market
        self.ns = 200

        # number of markets, mktid = 1, 2, ..., 100

        self.nmkt = df['mktid'].max() - df['mktid'].min() + 1

        # number of brands per market. if the numebr differs by market this requires some "accounting" vector
        self.num_prod = df[['prodid', 'mktid'
                            ]].groupby(['mktid']).agg(['count']).as_matrix()

        # this vector relates each observation to the market it is in
        self.cdid = np.kron(
            np.array([i for i in range(self.nmkt)], ndmin=2).T,
            np.ones((100, 1)))
        self.cdid = self.cdid.reshape(self.cdid.shape[0]).astype('int')

        # this vector provides for each index the of the last observation
        # in the data used here all brands appear in all markets. if this
        # is not the case the two vectors, cdid and cdindex, have to be
        # created in a different fashion but the rest of the program works fine.
        # cdindex = [nbrn:nbrn:nbrn*nmkt]';
        self.cdindex = np.array(
            [i for i in range((100 - 1), 100 * self.nmkt, 100)])

        # the market share of product j in market t
        self.s_jt = df.as_matrix(['sjt'])

        # the outside option share of product j in market t
        self.s_0t = df.as_matrix(['s0t'])

        # Load IV for the instruments and the x's.
        self.IV = df.as_matrix([
            'var8', 'char1', 'char2', 'z1', 'z2', 'z3', 'z4', 'z5', 'z6', 'z7',
            'z8', 'z9', 'z10'
        ])

        # create initial weight matrix
        self.invA = np.linalg.inv(self.IV.T @ self.IV)

        # The following codes compute logit results and save the mean utility as initial values for the search below

        # compute logit results and save the mean utility as initial values for the search below
        y = np.log(self.s_jt / self.s_0t)
        mid = self.x1.T @ self.IV @ self.invA @ self.IV.T
        self.init_theta = np.linalg.inv(mid @ self.x1) @ mid @ y
        self.old_delta = self.x1 @ self.init_theta
        self.old_delta_exp = np.exp(self.old_delta)

        self.V_names = ['Constant', 'Price', 'Char1', 'Char2']
        self.df_logit_iv = pd.DataFrame(self.init_theta, index=self.V_names)
        self.df_logit_iv.columns = ['Coef.']

        # create initial values for random coefficients, price, char1, char2
        # np.random.seed(1)
        # self.init_theta2 = np.absolute(np.random.normal(0, 1 , (self.nrc,1)))
        self.init_theta2 = np.ones((self.nrc, 1))

        # drawing from Halton sequence with self.ns draws across self.nrc, the number of random coefficients
        # x = halton_sequence(self.ns + 1, self.nrc)
        # v = np.asarray(x)

        # drawing from Halton sequence with self.ns draws across self.nrc, the number of random coefficients
        self.v = np.array(ot.HaltonSequence(self.nrc).generate(self.ns))
        # convert halton sequence into normal dist. draws
        self.vi = norm.ppf(self.v)

        self.gmmvalold = 0
        self.gmmdiff = 1

        self.iter = 0
        # self.theta2 = self.theta2_init
        self.delta = self.meanval(self.init_theta2)
        self.gmmresid = self.delta - self.x1 @ self.init_theta