Example #1
0
def sobol_test_generate():
    """
    sobol_test02 tests i4_sobol_generate.
    """
    print('\nSOBOL_TEST_GENERATE'
          '  I4_BIT_ returns the location of the high 1 bit.'
          '\n     I     I4_BIT_HI1(I)\n')

    target = np.array([
       [    0.5,     0.5,     0.5,     0.5,     0.5],
       [   0.75,    0.25,    0.75,    0.25,    0.75],
       [   0.25,    0.75,    0.25,    0.75,    0.25],
       [  0.375,   0.375,   0.625,   0.125,   0.875],
       [  0.875,   0.875,   0.125,   0.625,   0.375],
       [  0.625,   0.125,   0.375,   0.375,   0.125],
       [  0.125,   0.625,   0.875,   0.875,   0.625],
       [ 0.1875,  0.3125,  0.3125,  0.6875,  0.5625],
       [ 0.6875,  0.8125,  0.8125,  0.1875,  0.0625],
       [ 0.9375,  0.0625,  0.5625,  0.9375,  0.3125]])

    results = i4_sobol_generate(5, 10)

    assert np.all(target == results), "Array values not as expected"

    return
Example #2
0
def initial_sobol(bounds, count):
    '''
    Generates count parameter configurations using a sobol sequence
    NOTE: the upper bounds are never reached

    bounds: list of tuples (or lists) containing lower and upper bounds [(l1,u1), ..., (ln,un)]
    count: number of configurations to generate
    '''
    return lambda: [num_to_par([i[j] * (bounds[j][1] - bounds[j][0]) + bounds[j][0] for j in range(len(bounds))]) 
        for i in i4_sobol_generate(len(bounds), int(count), 2).T]
Example #3
0
def main():
    fs, audio = read(SONGPATH + SONGNAME + '.wav')
    audio = audio[:, 0]
    audio = audio.astype(np.int64)

    window_time = 0.33
    sensitivity = 1
    history = 40

    # Analysis
    ts = 1 / fs
    windowSize = int(window_time / ts)
    audio = audio[0:audio.shape[0] - (audio.shape[0] % windowSize)]
    nWindows = int(audio.shape[0] / windowSize)
    windows = np.hsplit(audio, nWindows)
    count = -1
    result = []
    previous = []

    for window in windows:
        count += 1
        energy = np.sum(np.square(window))
        previous.append(energy)
        if len(previous) > history:
            previous.pop(0)
        avg_history = mean(previous)
        if energy * sensitivity > avg_history:
            if len(result) > 1 and count * window_time * 1000 - result[-1][
                    0] <= 300:  # no overlapping circles
                continue
            result.append([roundHalfUp(count * window_time * 1000)])

    sob = sobol.i4_sobol_generate(1, len(result), 5)
    no_change = 0

    for i in range(len(result)):
        if i > 1 and result[i][0] - result[
                i - 1][0] < 660:  # minimum time for possible switch
            result[i].append(result[i - 1][1])
        else:
            k = roundCeiling(sob[0][i] * 3) - 1
            if k < 0:
                k = 0
            if i > 1 and result[i - 1][1] == k:
                k += 1
            if k > 2:
                k = 0
            result[i].append(k)

    with open(JSONPATH + SONGNAME + '.json', 'w') as f:
        json.dump(result, f)
Example #4
0
def SobolGrid(state0, GridWidth, nSobol=100):
    """Returns a Sobol grid centered on state0 with total width in each dimension
    given by GridWidth.  Both state0 and GridWidth should be row vectors"""

    SOBOLSKIP = 2

    # ---- generate Sobol points ----
    X = sobol.i4_sobol_generate(len(state0), nSobol, SOBOLSKIP)

    # scale accoring to the bounds in config file
    for i in range(len(state0)):
        X[i] = state0[i] + GridWidth[i] * (X[i] - 0.5)

    return X
Example #5
0
def subrandom_particle_positions(nparticles, box_vectors, ndim):
    """Generate a deterministic list of subrandom particle positions."""
    # Create positions array.
    positions = unit.Quantity(np.zeros([nparticles, 3], np.float32), unit.nanometers)

    # Generate Sobol' sequence.
    import sobol
    ivec = sobol.i4_sobol_generate(ndim, nparticles, 1)
    x = np.array(ivec, np.float32)
    for dim in range(ndim):
        l = box_vectors[dim][dim]
        positions[:, dim] = unit.Quantity((x[dim, :] - 0.5) * l / l.unit, l.unit)

    return positions
def nextPointToEvaluate(aei):
  candidates = sobol.i4_sobol_generate(aei.gp.dataDimnension, NR_CANDIDATES, 100)
  # TODO: add the bounds for the optimization
  bestCandidate = None
  bestFunVal = 0
  for candidate in candidates:
    candidate = optimize.fmin_l_bfgs_b(aei.expectedImprovement, candidate, aei.expectedImprovementGrad)
    # Here you take into account if you do a min or a max
    # So far, I have gone for a max
    if aei.expectedImprovement(candidate) > bestFunVal:
      bestCandidate = candidate
      bestFunVal = aei.expectedImprovement(candidate)

  return bestCandidate
Example #7
0
    def GlobalSearch(self):

        nsobol = self.global_search_options["num_points"]

        SOBOLSKIP = 2

        # ---- generate Sobol points ----
        xstarts = sobol.i4_sobol_generate(self.nparam, nsobol, SOBOLSKIP).T
        # scale accoring to the bounds
        xstarts = self.lower_bounds[np.newaxis, :] + (
            self.upper_bounds - self.lower_bounds)[np.newaxis, :] * xstarts

        # --- evaluate f on many points ----
        y = np.array(self.mppool.map(self.f, xstarts))

        # sort the results
        I = np.argsort(y)
        xstarts = xstarts[I]
        y = y[I]

        # take the best Imax
        xstarts = xstarts[:self.local_search_options["num_restarts"]]

        return xstarts
Example #8
0
del parser, args

print('dists:  ',dists)
print('method: ',method)
print("")


npara = len(dists)
skip  = 30000

if method[0] == 'sobol':
    
    # total number of sets (=model runs):
    #      nsets_sobol = (npara+2) * nsets
    
    sobol_sets = sobol.i4_sobol_generate(2*(npara+3),nsets,skip)    # add three MVA parameters z0, z1, and z2

    # ---------------------
    # scale parameter sets
    # ---------------------
    for ipara in range(npara):
        if dists[ipara][0] == 'uniform':
            sobol_sets[ipara,:]         = sobol_sets[ipara,:]         * (dists[ipara][2]-dists[ipara][1]) + dists[ipara][1]
            sobol_sets[ipara+npara+3,:] = sobol_sets[ipara+npara+3,:] * (dists[ipara][2]-dists[ipara][1]) + dists[ipara][1]
        elif dists[ipara][0] == 'gaussian':
            # N[0,1]; cut at 1% and 99% percentiles
            sobol_sets[ipara,:]         = 0.01 + 0.98 * sobol_sets[ipara,:]
            sobol_sets[ipara+npara+3,:] = 0.01 + 0.98 * sobol_sets[ipara+npara+3,:]
            sobol_sets[ipara,:]         = stats.norm.ppf(sobol_sets[ipara,:],         loc=dists[ipara][1], scale=dists[ipara][2])
            sobol_sets[ipara+npara+3,:] = stats.norm.ppf(sobol_sets[ipara+npara+3,:], loc=dists[ipara][1], scale=dists[ipara][2])
        else:
Example #9
0
File: h2co.py Project: avcopan/sfgb
# Rij equilibrium coordinates in a.u.
COeq = numpy.linalg.norm(Ceq - Oeq)
CH1eq = numpy.linalg.norm(Ceq - H1eq)
CH2eq = numpy.linalg.norm(Ceq - H2eq)
OH1eq = numpy.linalg.norm(Oeq - H1eq)
OH2eq = numpy.linalg.norm(Oeq - H2eq)
H1H2eq = numpy.linalg.norm(H1eq - H2eq)
Req = numpy.array([COeq, CH1eq, CH2eq, OH1eq, OH2eq, H1H2eq])

try:
    print('try')
    internal = numpy.load('sobseq.npy')
except FileNotFoundError:
    print('except')
    internal = sobol.i4_sobol_generate(6, Nsample)
    numpy.save('sobseq', internal)
print(internal)

# in ang and degrees, for Vmax = 15000
minmaxinternal = numpy.array([[1.03, 1.50], [0.84, 1.69], [0.84, 1.69],
                              [83, 162], [83, 162], [105, 255]])
# convert angstroms to bohr
minmaxinternal[:3, :] *= 1.88973
# convert degrees to radians
minmaxinternal[3:, :] *= numpy.pi / 180.

for i in range(6):
    internal[:, i] = (
        numpy.ones((Nsample, )) * minmaxinternal[i, 0] + numpy.ones(
            (Nsample, )) *
Example #10
0
 def reseed(self):
     import sobol
     arr = sobol.i4_sobol_generate(self.m, self.n, self.skip)
     self.data.from_numpy(arr)
     self.skip += 8
Example #11
0
                    npara_b = len(options_paras_disjoint[1][ioption_b])
                    npara_c = len(options_paras_disjoint[2][ioption_c])
                    nnparas = npara_a + npara_b + npara_c

                    # --------------------
                    # Weights for current set of options to 1.0; others are 0.0
                    # --------------------
                    block_weights_nested = copy.deepcopy(zero_weight)
                    block_weights_nested[0][ioption_a] = 1.0
                    block_weights_nested[1][ioption_b] = 1.0
                    block_weights_nested[2][ioption_c] = 1.0

                    # --------------------
                    # get sobol sequences
                    # --------------------
                    sobol_sets = sobol.i4_sobol_generate(nparas*2,nsets,40000)
                    sobol_sets = np.transpose(sobol_sets)

                    # --------------------
                    # scale parameter sets A
                    # --------------------
                    block_a_paras  = copy.deepcopy(sobol_sets[:,0:nparas])
                    block_a_paras *= (para_ranges[:,1]-para_ranges[:,0])
                    block_a_paras += para_ranges[:,0]

                    # --------------------
                    # Run model for A-sets
                    # --------------------
                    f_a = np.array([ model_function_disjoint(block_a_paras[iset], block_weights_nested, constants=[para_a,para_b]) for iset in range(nsets) ])
                    model_runs += nsets
Example #12
0
    def train(self):
        """Train generator and discriminator."""
        if self.sobol_noise:
            sobol_noise = sobol.i4_sobol_generate(self.batch_size, self.z_dim)
            sobol_noise = np.transpose(sobol_noise)
            sobol_noise = torch.from_numpy(sobol_noise)
            sobol_noise = sobol_noise.float()
            fixed_noise = self.to_variable(sobol_noise)
            #print(sobol_noise)
            #print(sobol_noise.size())
        else:
            fixed_noise = self.to_variable(
                torch.randn(self.batch_size, self.z_dim))
        total_step = len(self.data_loader)
        for epoch in range(self.num_epochs):
            for i, images in enumerate(self.data_loader):

                #===================== Train D =====================#
                images = self.to_variable(images)
                batch_size = images.size(0)
                if self.sobol_noise:
                    sobol_noise = sobol.i4_sobol_generate(
                        batch_size, self.z_dim)
                    sobol_noise = np.transpose(sobol_noise)
                    sobol_noise = torch.from_numpy(sobol_noise)
                    sobol_noise = sobol_noise.float()
                    noise = self.to_variable(sobol_noise)
                    #print(sobol_noise)
                    #print(sobol_noise.size())
                else:
                    noise = self.to_variable(
                        torch.randn(batch_size, self.z_dim))
                    #print(noise.size())

                # Train D to recognize real images as real.
                outputs = self.discriminator(images)
                real_loss = torch.mean(
                    (outputs - 1)**2
                )  # L2 loss instead of Binary cross entropy loss (this is optional for stable training)

                # Train D to recognize fake images as fake.
                fake_images = self.generator(noise)
                outputs = self.discriminator(fake_images)
                fake_loss = torch.mean(outputs**2)

                # Backprop + optimize
                d_loss = real_loss + fake_loss
                self.reset_grad()
                d_loss.backward()
                self.d_optimizer.step()

                #===================== Train G =====================#
                if self.sobol_noise:
                    sobol_noise = sobol.i4_sobol_generate(
                        batch_size, self.z_dim)
                    sobol_noise = np.transpose(sobol_noise)
                    sobol_noise = torch.from_numpy(sobol_noise)
                    sobol_noise = sobol_noise.float()
                    noise = self.to_variable(sobol_noise)
                    #print(sobol_noise)
                    #print(sobol_noise.size())
                else:
                    noise = self.to_variable(
                        torch.randn(batch_size, self.z_dim))

                # Train G so that D recognizes G(z) as real.
                fake_images = self.generator(noise)
                outputs = self.discriminator(fake_images)
                g_loss = torch.mean((outputs - 1)**2)

                # Backprop + optimize
                self.reset_grad()
                g_loss.backward()
                self.g_optimizer.step()

                # print the log info
                if (i + 1) % self.log_step == 0:
                    print(
                        'Epoch [%d/%d], Step[%d/%d], d_real_loss: %.4f, '
                        'd_fake_loss: %.4f, g_loss: %.4f' %
                        (epoch + 1, self.num_epochs, i + 1, total_step,
                         real_loss.data[0], fake_loss.data[0], g_loss.data[0]))

                    # save the sampled images
                if (i + 1) % self.sample_step == 0:
                    fake_images = self.generator(fixed_noise)
                    torchvision.utils.save_image(
                        self.denorm(fake_images.data),
                        os.path.join(
                            self.sample_path,
                            'fake_samples-%d-%d-sobol-%s.png' %
                            (epoch + 1, i + 1, self.sobol_noise)))

            # save the model parameters for each epoch
            g_path = os.path.join(
                self.model_path, 'generator-%d-sobol-%s.pkl' %
                (epoch + 1, str(self.sobol_noise)))
            d_path = os.path.join(
                self.model_path, 'discriminator-%d-sobol-%s.pkl' %
                (epoch + 1, str(self.sobol_noise)))
            torch.save(self.generator.state_dict(), g_path)
            torch.save(self.discriminator.state_dict(), d_path)
Example #13
0
def sa_model_multiple_processes_DVM(paras_per_option,
                                    para_ranges,
                                    model_function,
                                    constants=None,
                                    nsets=None,
                                    budget=None,
                                    save=None,
                                    nsets_parasets=128):
    """
        This function that estimates the Sobol' sensitivity estimates for models with mutiple process options. 
        The options and the parameters of those options are given in a nested list 'paras_per_option'. 
        Further, the range of each parameter needs to be given and a function that returns model outputs 
        when a set of parameters and weights are given. The weights are used to weight all the process option 
        outputs. Hence, the returned model output is a weighted model output. The sampling of all weights and 
        parameters is done internally in this method. Sobol' sequences are used for this purpose.

        Definition
        ----------
        def sa_model_multiple_processes_DVM(paras_per_option, para_ranges, model_function, constants=None, nsets=None)


        Input               Format               Description
        -----               -----                -----------
        paras_per_option    list of lists        lists the parameters of each process option
                                                 example: process A has option A1 using {x1,x2} and option A2 using {x1}
                                                          process B has option B1 using {x3,x4} and option B2 using {}
                                                          process C has option C1 using {} and option C2 using {x5,x6} and C3 using {x5}
                                                          --> paras_per_option = [[[0,1],[0]],   # process A
                                                                                  [[2,3],[]],    # process B
                                                                                  [[],[4,5],[4]] # process C
        para_ranges         list of lists        lists lower and upper bound for each parameter
                                                 example: para_ranges = [[ 0.0,3.0], #x1
                                                                         [ 1.0,3.0], #x2
                                                                         [-1.0,0.0], #x3
                                                                         [ 0.0,3.0], #x4
                                                                         [ 0.0,3.0], #x5
                                                                         [ 5.0,9.0]] #x6
        model_function      function             - function dictionary that can contain multiple weighted model output of all model options (scalar or 1D output), e.g.,
                                                       { 'Q':        np.array([2.5,3.5,...,4.5]),
                                                         'NSE':      0.6,
                                                         'baseflow': np.array([20.5,23.5,...,24.5])
                                                       }
                                                 - the internal sampling will make sure that all weights are between 0 and 1 
                                                   and sum up to 1 for each process
                                                 - interface must look like: 
                                                   model_function(set_of_parameters, set_of_weights, constants=constants, run_id=None) 
                                                 - weights are given as nested list of list (similar to 'paras_per_option')
                                                 example: 
                                                       def model_function(pp,ww,constant=None,run_id=None):
                                                            # process A
                                                            proc_a = ww[0][0] * (pp[0]**2+pp[1]) + ww[0][1] * (sin(pp[0]))                  
                                                            # process B
                                                            proc_b = ww[1][0] * (pp[2]**4+pp[3]**2) + ww[1][1] * (7.0)                      
                                                            # process C
                                                            proc_c = ww[2][0] * (9.81) + ww[2][1] * (pp[4]+cos(pp[5])) + ww[2][2] * pp[4]   
                                                            # model output: this is totally fake...
                                                            model['out'] = proc_a * proc_b + proc_c
                                                            return model
        constants           list                 optional: list of constants that 'model_function' might need
                                                 default: None
        nsets               integer              optional: number of reference parameter sets
                                                 default: 1024
        budget              integer              optional: total number of model runs allowed for analysis; overwrites nsets
                                                 budget = nsets / ((nprocess+2)+(nweights+np.sum(noptions)+2)+(nparas+nweights+2))
                                                 default: None
        save                string               filename to save parameter sets and respective model outputs in pickle file
                                                 default: None (nothing saved to file)
        nsets_parasets      integer              optional: number of pre-sampled parameter sets per process
                                                 default: 128
        

        Output          Format        Description
        -----           -----         -----------
        sobol_indexes   dict          if model output is scalar:
                                          sobol_indexes['paras'][0]           ... main  Sobol' index of all parameters and weights (dims: nparas+nrand)
                                          sobol_indexes['paras'][1]           ... total Sobol' index of all parameters and weights (dims: nparas+nrand)
                                          sobol_indexes['process_options'][0] ... main  Sobol' index of all process options and weights (dims: nprocopts+nrand)
                                          sobol_indexes['process_options'][1] ... total Sobol' index of all process options and weights (dims: nprocopts+nrand)
                                          sobol_indexes['processes'][0]       ... main  Sobol' index of all processes (weights included) (dims: nprocesses)
                                          sobol_indexes['processes'][1]       ... total Sobol' index of all processes (weights included) (dims: nprocesses)
                                      if model output is 1D:
                                          sobol_indexes['paras'][0]           ...               main  Sobol' index of all parameters and weights (dimes: ntime, nparas+nrand)
                                          sobol_indexes['paras'][1]           ...               total Sobol' index of all parameters and weights (dimes: ntime, nparas+nrand)
                                          sobol_indexes['paras'][2]           ...          mean main  Sobol' index of all parameters and weights (dimes: ntime, nparas+nrand)
                                          sobol_indexes['paras'][3]           ...          mean total Sobol' index of all parameters and weights (dimes: ntime, nparas+nrand)
                                          sobol_indexes['paras'][4]           ... weighted mean main  Sobol' index of all parameters and weights (dimes: ntime, nparas+nrand)
                                          sobol_indexes['paras'][5]           ... weighted mean total Sobol' index of all parameters and weights (dimes: ntime, nparas+nrand)
                                          sobol_indexes['process_options'][0] ...               main  Sobol' index of all process options and weights (dimes: ntime, nprocopts+nrand)
                                          sobol_indexes['process_options'][1] ...               total Sobol' index of all process options and weights (dimes: ntime, nprocopts+nrand)
                                          sobol_indexes['process_options'][2] ...          mean main  Sobol' index of all process options and weights (dimes: ntime, nprocopts+nrand)
                                          sobol_indexes['process_options'][3] ...          mean total Sobol' index of all process options and weights (dimes: ntime, nprocopts+nrand)
                                          sobol_indexes['process_options'][4] ... weighted mean main  Sobol' index of all process options and weights (dimes: ntime, nprocopts+nrand)
                                          sobol_indexes['process_options'][5] ... weighted mean total Sobol' index of all process options and weights (dimes: ntime, nprocopts+nrand)
                                          sobol_indexes['processes'][0]       ...               main  Sobol' index of all processes (weights included) (dimes: ntime, nprocesses)
                                          sobol_indexes['processes'][1]       ...               total Sobol' index of all processes (weights included) (dimes: ntime, nprocesses)
                                          sobol_indexes['processes'][2]       ...          mean main  Sobol' index of all processes (weights included) (dimes: ntime, nprocesses)
                                          sobol_indexes['processes'][3]       ...          mean total Sobol' index of all processes (weights included) (dimes: ntime, nprocesses)
                                          sobol_indexes['processes'][4]       ... weighted mean main  Sobol' index of all processes (weights included) (dimes: ntime, nprocesses)
                                          sobol_indexes['processes'][5]       ... weighted mean total Sobol' index of all processes (weights included) (dimes: ntime, nprocesses)

        Description
        -----------
        


        Restrictions
        ------------
        Parameters can only be uniformly distributed in a range [a,b]. 
        No Gaussian distribution etc possible yet.

        Examples
        --------

        >>> import numpy as np
        >>> nsets = 1024
        >>> nsets_parasets = 128

        --------------------------------------------------
        Simple setup
        --------------------------------------------------

        >>> # list of parameters that go into each option (numbering starts with 0)
        >>> # (a) simple setup
        >>> paras_per_option = [ 
        ...       [[0], []],             # parameters of process options A1 and A2
        ...       [[1], [2], [3,4]],     # parameters of process options B1, B2, and B3
        ...       [[5], [6]]             # parameters of process options A1 and A2
        ...     ]
        >>> para_ranges = [ 
        ...       [-np.pi,np.pi],      # parameter range of x1
        ...       [-np.pi,np.pi],      # parameter range of x2
        ...       [-np.pi,np.pi],      # parameter range of x3
        ...       [-np.pi,np.pi],      # parameter range of x4
        ...       [-np.pi,np.pi],      # parameter range of x5
        ...       [-np.pi,np.pi],      # parameter range of x6
        ...       [-np.pi,np.pi]       # parameter range of x7
        ...     ]   
        >>> def model_function(paras, weights, constants=None, run_id=None):
        ...     # input:
        ...     #     paras     ... list of model parameters scaled to their range;
        ...     #                   values for all N model parameters have to be provided
        ...     #                   example:
        ...     #                        [ x1, x2, x3, x4, .... ]
        ...     #     weights   ... list of lists of weights to weight options of each process;
        ...     #                   each list of the lists need to sum up to 1.0;
        ...     #                   each sublist is the N_i weights for the N_i process options of process i;
        ...     #                   example:
        ...     #                        [ [w_a1, w_a2, ...], [w_b1, w_b2, w_b3, ...], [w_c1, w_c2, ...], ... ]
        ...     #     constants ... optional list of constants that are same for all models;
        ...     #                   like parameters a and b in Ishigami-Homma function
        ...     #                   example:
        ...     #                        [2.0, 1.0]
        ...     # output:
        ...     #     model output
        ...     #     example:
        ...     #           { 'Q':        np.array([2.5,3.5,...,4.5]),
        ...     #             'NSE':      0.6,
        ...     #             'baseflow': np.array([20.5,23.5,...,24.5])
        ...     #           }
        ...
        ...     # check that provided number of weights is correct:
        ...     # --> one weight per option per process
        ...     if ( [len(ilist) for ilist in weights] != [2,3,2] ):
        ...         print("Number of weights: ",[len(ilist) for ilist in weights])
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: provided number of weights must be [2,3,2]")
        ...     # check if sum up to 1.0:
        ...     if ( np.any(np.array([np.sum(ilist) for ilist in weights]) != 1.0) ):
        ...         print("Sum of weights per process: ",[np.sum(ilist) for ilist in weights])
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: sum of weights must be 1.0 for all processes")
        ...     # check if weights <= 1.0:
        ...     if ( np.any(np.array([item for ilist in weights for item in ilist]) > 1.0) ):
        ...         print("Weights: ",weights)
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: weights must be all less or equal 1.0")
        ...     # check if weights >= 0.0:
        ...     if ( np.any(np.array([item for ilist in weights for item in ilist]) < 0.0) ):
        ...         print("Weights: ",weights)
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: weights must be all greater or equal 0.0")
        ...     # check if number of parameters is correct:
        ...     if (len(paras) != 7):
        ...         print("Number of parameters: ",len(paras))
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: provided number of parameters must be 7")
        ...        
        ...     out = 0.0
        ...
        ...     if constants is None:
        ...         aa = 2.0
        ...         bb = 1.0
        ...     else:
        ...         aa = constants[0]
        ...         bb = constants[1]
        ...
        ...     # ---------------
        ...     # simple model
        ...     # ---------------
        ...        
        ...     # process A
        ...     out += ( weights[0][0] * np.sin(paras[0]) +              # A1
        ...              weights[0][1] * 1.0 )                           # A2
        ...     # process B
        ...     out *= ( weights[1][0] * (1.0 + bb * paras[1]**4) +      # B1
        ...              weights[1][1] * (1.0 + bb * paras[2]**2) +      # B2
        ...              weights[1][2] * (paras[3] + bb * paras[4]) )    # B3
        ...     # process C
        ...     out += ( weights[2][0] * (aa * np.sin(paras[5])**2) +    # C1
        ...              weights[2][1] * (1.0 + bb * paras[6]**4) )      # C2
        ...
        ...     model = {}
        ...     model['result'] = out
        ...
        ...     return model

        >>> # this is calling the actual tool
        >>> sobol_indexes = sa_model_multiple_processes_DVM(paras_per_option, para_ranges, model_function, constants=None, nsets=nsets, nsets_parasets=nsets_parasets)

        >>> # printing
        >>> print("process sensitivities:        S_A   = ",astr(sobol_indexes['processes']['si']['result'],prec=5))
        process sensitivities:        S_A   =  ['0.06200' '0.09874' '0.68941']
        >>> print("process sensitivities:        ST_A  = ",astr(sobol_indexes['processes']['sti']['result'],prec=5))
        process sensitivities:        ST_A  =  ['0.17770' '0.23122' '0.70231']

        --------------------------------------------------
        Realistic setup
        --------------------------------------------------

        >>> # list of parameters that go into each option (numbering starts with 0)
        >>> # (a) simple setup
        >>> paras_per_option = [ 
        ...       [[0], [0,1]],             # parameters of process options A1 and A2
        ...       [[1], [2], [3,4]],        # parameters of process options B1, B2, and B3
        ...       [[5], [2,6]]              # parameters of process options A1 and A2
        ...     ]
        >>> para_ranges = [ 
        ...       [-np.pi,np.pi],      # parameter range of x1
        ...       [-np.pi,np.pi],      # parameter range of x2
        ...       [-np.pi,np.pi],      # parameter range of x3
        ...       [-np.pi,np.pi],      # parameter range of x4
        ...       [-np.pi,np.pi],      # parameter range of x5
        ...       [-np.pi,np.pi],      # parameter range of x6
        ...       [-np.pi,np.pi]       # parameter range of x7
        ...     ]   
        >>> def model_function(paras, weights, constants=None, run_id=None):
        ...     # input:
        ...     #     paras     ... list of model parameters scaled to their range;
        ...     #                   values for all N model parameters have to be provided
        ...     #                   example:
        ...     #                        [ x1, x2, x3, x4, .... ]
        ...     #     weights   ... list of lists of weights to weight options of each process;
        ...     #                   each list of the lists need to sum up to 1.0;
        ...     #                   each sublist is the N_i weights for the N_i process options of process i;
        ...     #                   example:
        ...     #                        [ [w_a1, w_a2, ...], [w_b1, w_b2, w_b3, ...], [w_c1, w_c2, ...], ... ]
        ...     #     constants ... optional list of constants that are same for all models;
        ...     #                   like parameters a and b in Ishigami-Homma function
        ...     #                   example:
        ...     #                        [2.0, 1.0]
        ...     # output:
        ...     #     model output
        ...     #     example:
        ...     #           { 'Q':        np.array([2.5,3.5,...,4.5]),
        ...     #             'NSE':      0.6,
        ...     #             'baseflow': np.array([20.5,23.5,...,24.5])
        ...     #           }
        ...
        ...     # check that provided number of weights is correct:
        ...     # --> one weight per option per process
        ...     if ( [len(ilist) for ilist in weights] != [2,3,2] ):
        ...         print("Number of weights: ",[len(ilist) for ilist in weights])
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: provided number of weights must be [2,3,2]")
        ...     # check if sum up to 1.0:
        ...     if ( np.any(np.array([np.sum(ilist) for ilist in weights]) != 1.0) ):
        ...         print("Sum of weights per process: ",[np.sum(ilist) for ilist in weights])
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: sum of weights must be 1.0 for all processes")
        ...     # check if weights <= 1.0:
        ...     if ( np.any(np.array([item for ilist in weights for item in ilist]) > 1.0) ):
        ...         print("Weights: ",weights)
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: weights must be all less or equal 1.0")
        ...     # check if weights >= 0.0:
        ...     if ( np.any(np.array([item for ilist in weights for item in ilist]) < 0.0) ):
        ...         print("Weights: ",weights)
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: weights must be all greater or equal 0.0")
        ...     # check if number of parameters is correct:
        ...     if (len(paras) != 7):
        ...         print("Number of parameters: ",len(paras))
        ...         raise ValueError("sa_model_multiple_processes_DVM: model_function: provided number of parameters must be 7")
        ...        
        ...     out = 0.0
        ...
        ...     if constants is None:
        ...         aa = 2.0
        ...         bb = 1.0
        ...     else:
        ...         aa = constants[0]
        ...         bb = constants[1]
        ...
        ...     # ---------------
        ...     # realistic model
        ...     # ---------------
        ...
        ...     # process D
        ...     out += ( weights[0][0] * np.sin(paras[0]) +                            # D1
        ...              weights[0][1] * (paras[0]+paras[1]**2) )                      # D2
        ...     # process E
        ...     out *= ( weights[1][0] * (1.0 + bb * paras[1]**4) +                    # E1
        ...              weights[1][1] * (1.0 + bb * paras[2]**2) +                    # E2
        ...              weights[1][2] * (paras[3] + bb * paras[4]) )                  # E3
        ...     # process F
        ...     out += ( weights[2][0] * (aa * np.sin(paras[5])**2) +                  # F1          
        ...              weights[2][1] * (1.0 + bb * paras[6]**4) + paras[2]**2 )      # F2
        ...
        ...     model = {}
        ...     model['result'] = out
        ...
        ...     return model

        >>> # this is calling the actual tool
        >>> sobol_indexes = sa_model_multiple_processes_DVM(paras_per_option, para_ranges, model_function, constants=None, nsets=nsets, nsets_parasets=nsets_parasets)

        >>> # printing
        >>> print("process sensitivities:        S_A   = ",astr(sobol_indexes['processes']['si']['result'],prec=5))
        process sensitivities:        S_A   =  ['0.07335' '0.55102' '0.04634']
        >>> print("process sensitivities:        ST_A  = ",astr(sobol_indexes['processes']['sti']['result'],prec=5))
        process sensitivities:        ST_A  =  ['0.34189' '0.87986' '0.06088']


        
        License
        -------
        This file is part of the "SA for Models with Multiple Processes" Python package.

        The "SA for Models with Multiple Processes" Python package is free software: you 
        can redistribute it and/or modify it under the terms of the GNU Lesser General 
        Public License as published by the Free Software Foundation, either version 3 of 
        the License, or (at your option) any later version.

        The "SA for Models with Multiple Processes" Python package is distributed in the 
        hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied 
        warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
        GNU Lesser General Public License for more details.

        You should have received a copy of the GNU Lesser General Public License
        along with the PieShareDistribution project (cf. gpl.txt and lgpl.txt).
        If not, see <http://www.gnu.org/licenses/>.

        Copyright 2020 Juliane Mai - [email protected]


        History
        -------
        Written,  Juliane Mai, April 2020
    """

    # initialize return variable
    sobol_indexes = OrderedDict()

    para_ranges = np.array(para_ranges)

    # number of parameters
    nparas = np.shape(para_ranges)[0]
    # number of options per process
    noptions = np.array([len(oo) for oo in paras_per_option])
    # number of processes
    nprocess = np.shape(noptions)[0]
    # number of weights required
    nweights = np.sum(noptions)

    if (nsets is None):
        nsets = 1000

    if (nsets_parasets is None):
        nsets_parasets = 128

    if not (budget is None):
        # overwrite nsets if budget is given
        nsets = budget / ((nprocess + 2) + (nweights + np.sum(noptions) + 2) +
                          (nparas + nweights + 2))
        if nsets <= 0:
            print("Minimal budget:     ",
                  ((nprocess + 2) + (nweights + np.sum(noptions) + 2) +
                   (nparas + nweights + 2)))
            print("Recommended budget: ",
                  ((nprocess + 2) + (nweights + np.sum(noptions) + 2) +
                   (nparas + nweights + 2)) * 1000)
            raise ValueError(
                "sa_model_multiple_processes_DVM: Budget is too small!")

    if not (save is None):
        save_to_pickle = {}

    # sample parametersets for each process
    nsets_per_process = nsets_parasets  # DVM used 128 for observations, crop parameters, soil parameters, weather, and 8 for model structure
    para_sets = [[] for iprocess in np.arange(nprocess)]
    paras_per_process = [
        list(np.unique([item for sublist in iprocess for item in sublist]))
        for iprocess in paras_per_option
    ]  # [[0, 1], [1, 2, 3, 4], [2, 5, 6]]
    paras_per_process_flat = [
        item for sublist in paras_per_process for item in sublist
    ]  # [[0, 1, 1, 2, 3, 4, 2, 5, 6]
    nparas_per_process = [len(iprocess)
                          for iprocess in paras_per_process]  # [2,4,3]
    nweights_per_process = [len(iprocess)
                            for iprocess in paras_per_option]  # [2,3,2]
    for iprocess in np.arange(nprocess):

        # sampling
        # (last column is a dummy to convert rn into weight later)
        para_sets[iprocess] = sobol.i4_sobol_generate(
            (nparas_per_process[iprocess] + nweights_per_process[iprocess]),
            nsets_per_process, 40000)
        para_sets[iprocess] = np.transpose(para_sets[iprocess])
        para_sets[iprocess][:, -1] = -9999.0

        # scaling
        for iipara, ipara in enumerate(paras_per_process[iprocess]):
            para_sets[iprocess][:, iipara] *= (para_ranges[ipara, 1] -
                                               para_ranges[ipara, 0])
            para_sets[iprocess][:, iipara] += para_ranges[ipara, 0]

        # convert weight random numbers in weights
        para_sets[iprocess][:, nparas_per_process[
            iprocess]:] = psd.PieShareDistribution(
                nsets_per_process,
                nweights_per_process[iprocess],
                remainder=True,
                randomnumbers=para_sets[iprocess]
                [:, nparas_per_process[iprocess]:nparas_per_process[iprocess] +
                 nweights_per_process[iprocess] - 1])

    # (A) Sampling integer number that indicates parameter set ID for each process in unit interval using Sobol' sequences
    sobol_sets = sobol.i4_sobol_generate((nprocess) * 2, nsets, 40000)
    sobol_sets = np.transpose(sobol_sets)

    # (B) Convert into integer
    sobol_sets_int = np.array(
        [[int(ival) for ival in sobol_sets[iset] * nsets_per_process]
         for iset in np.arange(np.shape(sobol_sets)[0])])

    # (C) Construct block A, B and Ci (for processes)
    block_a = copy.deepcopy(sobol_sets_int[:, 0 * nprocess:1 * nprocess])
    block_b = copy.deepcopy(sobol_sets_int[:, 1 * nprocess:2 * nprocess])
    block_c = [[] for iprocess in np.arange(nprocess)]
    for iprocess in np.arange(nprocess):
        block_c[iprocess] = copy.deepcopy(block_a)
        block_c[iprocess][:, iprocess] = copy.deepcopy(block_b[:, iprocess])

    # --------------------------------------------
    # Derive model output for A sets
    # --------------------------------------------

    f_a = []
    for iset in np.arange(nsets):

        paraweightset_proc = [
            para_sets[iprocess][block_a[iset, iprocess]]
            for iprocess in np.arange(nprocess)
        ]  # parameter values and weights for each process
        paraset_proc_flat = [
            item for isublist, sublist in enumerate(paraweightset_proc)
            for item in sublist[0:nparas_per_process[isublist]]
        ]
        paraset = [-9999.0 for ipara in np.arange(nparas)
                   ]  # parameter set combined in right order
        weightset = [
            sublist[nparas_per_process[isublist]:nparas_per_process[isublist] +
                    nweights_per_process[isublist]]
            for isublist, sublist in enumerate(paraweightset_proc)
        ]  # weight    set combined in right order
        for iipara, ipara in enumerate(paras_per_process_flat):
            # MAJOR PROBLEM is that parameters that appear in multiple processes get overwritten
            paraset[ipara] = paraset_proc_flat[iipara]

        f_a.append(
            model_function(paraset,
                           weightset,
                           constants=constants,
                           run_id="a_set_" + str(iset)))

    # convert list of dicts into dict of lists:
    #       [{'result_1':1.0, 'result_2:2.0'}, {'result_1':3.0, 'result_2:4.0'}, ...] --> [{'result_1':[1.0,3.0,...],'result_2':[2.0,4.0,...]}]
    keys = f_a[0].keys()
    tmp = {}
    for ikey in keys:
        tmp_key = []
        for iset in np.arange(nsets):
            tmp_key.append(f_a[iset][ikey])

        tmp[ikey] = np.array(tmp_key)
    f_a = tmp
    # print('f_a = ',f_a)

    # --------------------------------------------
    # Derive model output for B sets
    # --------------------------------------------

    f_b = []
    for iset in np.arange(nsets):

        paraweightset_proc = [
            para_sets[iprocess][block_b[iset, iprocess]]
            for iprocess in np.arange(nprocess)
        ]  # parameter values and weights for each process
        paraset_proc_flat = [
            item for isublist, sublist in enumerate(paraweightset_proc)
            for item in sublist[0:nparas_per_process[isublist]]
        ]
        paraset = [-9999.0 for ipara in np.arange(nparas)
                   ]  # parameter set combined in right order
        weightset = [
            sublist[nparas_per_process[isublist]:nparas_per_process[isublist] +
                    nweights_per_process[isublist]]
            for isublist, sublist in enumerate(paraweightset_proc)
        ]  # weight    set combined in right order
        for iipara, ipara in enumerate(paras_per_process_flat):
            # MAJOR PROBLEM is that parameters that appear in multiple processes get overwritten
            paraset[ipara] = paraset_proc_flat[iipara]

        f_b.append(
            model_function(paraset,
                           weightset,
                           constants=constants,
                           run_id="b_set_" + str(iset)))

    # convert list of dicts into dict of lists:
    #       [{'result_1':1.0, 'result_2:2.0'}, {'result_1':3.0, 'result_2:4.0'}, ...] --> [{'result_1':[1.0,3.0,...],'result_2':[2.0,4.0,...]}]
    keys = f_b[0].keys()
    tmp = {}
    for ikey in keys:
        tmp_key = []
        for iset in np.arange(nsets):
            tmp_key.append(f_b[iset][ikey])

        tmp[ikey] = np.array(tmp_key)
    f_b = tmp

    # --------------------------------------------
    # Derive model output for Ci sets
    # --------------------------------------------

    ntime = {}
    for ikey in keys:
        if (len(np.shape(f_b[ikey])) == 2):
            ntime[ikey] = np.shape(f_b[ikey])[0]
        else:
            ntime[ikey] = 0

    # (1) parameter sensitivities:
    #     main effects:  [S_x1,  S_x2,  ..., S_w1,  S_w2,  ...]
    #     total effects: [ST_x1, ST_x2, ..., ST_w1, ST_w2, ...]
    f_c = {}
    for ikey in keys:
        if (ntime[ikey] == 0):
            f_c[ikey] = np.ones([nprocess, nsets]) * -9999.
        else:
            print('nsets = ', nsets)
            tttmp = np.ones([ntime[ikey], nprocess, nsets]) * -9999.
            f_c[ikey] = tttmp

    for iprocess in np.arange(nprocess):

        f_c_tmp = []
        for iset in np.arange(nsets):

            paraweightset_proc = [
                para_sets[iiprocess][block_c[iprocess][iset, iiprocess]]
                for iiprocess in np.arange(nprocess)
            ]  # parameter values and weights for each process
            paraset_proc_flat = [
                item for isublist, sublist in enumerate(paraweightset_proc)
                for item in sublist[0:nparas_per_process[isublist]]
            ]
            paraset = [-9999.0 for ipara in np.arange(nparas)
                       ]  # parameter set combined in right order
            weightset = [
                sublist[
                    nparas_per_process[isublist]:nparas_per_process[isublist] +
                    nweights_per_process[isublist]]
                for isublist, sublist in enumerate(paraweightset_proc)
            ]  # weight    set combined in right order
            for iipara, ipara in enumerate(paras_per_process_flat):
                # MAJOR PROBLEM is that parameters that appear in multiple processes get overwritten
                paraset[ipara] = paraset_proc_flat[iipara]

            f_c_tmp.append(
                model_function(paraset,
                               weightset,
                               constants=constants,
                               run_id="c_process_" + str(iprocess) + "_set_" +
                               str(iset)))
        f_c_tmp = np.array(f_c_tmp)

        # convert list of dicts into dict of lists:
        #       [{'result_1':1.0, 'result_2:2.0'}, {'result_1':3.0, 'result_2:4.0'}, ...] --> [{'result_1':[1.0,3.0,...],'result_2':[2.0,4.0,...]}]
        keys = f_c_tmp[0].keys()
        tmp = {}
        for ikey in keys:
            tmp_key = []
            for iset in np.arange(nsets):
                tmp_key.append(f_c_tmp[iset][ikey])

            tmp[ikey] = np.array(tmp_key)
        f_c_tmp = tmp
        # print('f_c_tmp = ',f_c_tmp)

        for ikey in keys:
            if (ntime[ikey] == 0):
                f_c[ikey][iprocess, :] = f_c_tmp[ikey]
            else:
                # if vector of model outputs f_c_tmp has shape (nsets,ntime) --> must be (ntime, nsets)
                f_c[ikey][:, iprocess, :] = np.transpose(f_c_tmp[ikey])

    # print("f_c = ",f_c)

    # (1c) calculate Sobol' indexes
    si = {}
    sti = {}
    msi = {}
    msti = {}
    wsi = {}
    wsti = {}
    for ikey in keys:
        if (ntime[ikey] == 0):
            si[ikey], sti[ikey] = sobol_index.sobol_index(ya=f_a[ikey],
                                                          yb=f_b[ikey],
                                                          yc=f_c[ikey],
                                                          si=True,
                                                          sti=True,
                                                          method='Mai1999')
        else:
            si[ikey], sti[ikey], msi[ikey], msti[ikey], wsi[ikey], wsti[
                ikey] = sobol_index.sobol_index(ya=f_a[ikey],
                                                yb=f_b[ikey],
                                                yc=f_c[ikey],
                                                si=True,
                                                sti=True,
                                                mean=True,
                                                wmean=True,
                                                method='Mai1999')

    for ikey in keys:
        if (ntime[ikey] == 0):
            tmp = {}
            tmp['si'] = si
            tmp['sti'] = sti
            sobol_indexes['processes'] = tmp
        else:
            tmp = {}
            tmp['si'] = si
            tmp['sti'] = sti
            tmp['msi'] = msi
            tmp['msti'] = msti
            tmp['wsi'] = wsi
            tmp['wsti'] = wsti
            sobol_indexes['processes'] = tmp

            print("si['" + ikey + "']   = ", si[ikey])
            print("sti['" + ikey + "']  = ", sti[ikey])
            print("msi['" + ikey + "']  = ", msi[ikey])
            print("msti['" + ikey + "'] = ", msti[ikey])
            print("wsi['" + ikey + "']  = ", wsi[ikey])
            print("wsti['" + ikey + "'] = ", wsti[ikey])

    # store to save in pickle later
    if not (save is None):
        save_to_pickle["sobol_indexes"] = sobol_indexes

    # save to pickle
    if not (save is None):
        pickle.dump(save_to_pickle, open(save, "wb"))

    # Done.
    return sobol_indexes
Example #14
0
def saltelli(params, nbase, lhs=False, nskip=1):
    """
        Samples parameters in given ranges so they can be used
        for the calculation of Sobol indices as described by
        Saltelli (2002) and Saltelli et al. (2007).


        Definition
        ----------
        def saltelli(params, nbase, lhs=False, nskip=1):


        Input
        -----
        params    (nparams,2) array of parameter ranges
        nbase     base sample size


        Optional Input
        --------------
        lhs       if True: use latin hypercube sampling in uniform distributions
                  if False: use sobol sequence
                            This uses the package sobol of Corrado Chisari
        nskip     number of sobol sequences to skip at the beginning


        Output
        ------
        array with (nparams,nbase*(nparam+2)) with sample values in the right order.


        Restrictions
        ------------
        1. Only uniform latin hypercube sampling.
        2. No parameter dependencies possible.


        References
        ----------
        Saltelli, A. (2002). Making best use of model evaluations to compute sensitivity indices.
            Computer Physics Communications, 145(2), 280-297.
        Saltelli, A. et al. (2008). Global sensitivity analysis. The primer.
            John Wiley & Sons Inc., NJ, USA, ISBN 978-0-470-05997-5 (pp. 1-292)


        Examples
        --------
        >>> import numpy as np
        >>> params = np.array([[  1.00000000e+00,   1.00000000e+06],
        ...                    [  1.00000000e+00,   2.00000000e+01]])
        >>> nbase = 10
        >>> # seed for reproducible results in doctest
        >>> np.random.seed(1)
        >>> out = saltelli(params, nbase)
        >>> from autostring import astr
        >>> print(astr(out[0:2,0:4],3,pp=True))
        [['     1.000' '500000.500' '750000.250' '250000.750']
         ['     1.000' '    10.500' '     5.750' '    15.250']]

        >>> out = saltelli(params, nbase, nskip=2)
        >>> print(astr(out[0:2,0:4],3,pp=True))
        [['500000.500' '750000.250' '250000.750' '375000.625']
         ['    10.500' '     5.750' '    15.250' '     8.125']]

        >>> out = saltelli(params, nbase, nskip=2, lhs=True)
        >>> print(astr(out[0:2,0:4],3,pp=True))
        [['341702.859' '872032.577' '500011.937' '930233.327']
         ['     1.796' '     6.102' '    16.588' '     4.568']]


        License
        -------
        This file is part of the JAMS Python package, distributed under the MIT
        License. The JAMS Python package originates from the former UFZ Python library,
        Department of Computational Hydrosystems, Helmholtz Centre for Environmental
        Research - UFZ, Leipzig, Germany.

        Copyright (c) 2012-2014 Matthias Cuntz - mc (at) macu (dot) de

        Permission is hereby granted, free of charge, to any person obtaining a copy
        of this software and associated documentation files (the "Software"), to deal
        in the Software without restriction, including without limitation the rights
        to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
        copies of the Software, and to permit persons to whom the Software is
        furnished to do so, subject to the following conditions:

        The above copyright notice and this permission notice shall be included in all
        copies or substantial portions of the Software.

        THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
        IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
        FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
        AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
        LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
        OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
        SOFTWARE.


        History
        -------
        Written,  MC, May 2012
        Modified, MC, Feb 2013 - ported to Python 3
                  MC, Apr 2014 - assert
    """
    #
    # Check input
    assert np.size(params[
        0, :]) == 2, 'parameter ranges must be given in form (nparams,2).'
    nparams = np.size(params[:, 0])
    #
    nso = nbase * (nparams + 2)
    # A=0:nbase, B=nbase:nbase*2, C=nbase*2:nso
    zoff = params[:, 0]
    zmul = params[:, 1] - params[:, 0]
    # Two random samples A and B
    pA = np.empty((nparams, nbase))
    pB = np.empty((nparams, nbase))
    if lhs:
        import scipy.stats as stats
        from jams.lhs import lhs
        dist = [stats.uniform for i in range(nparams)]
        pars = [(zoff[i], zmul[i]) for i in range(nparams)]
        dist = dist + dist  # 2*nparams
        pars = pars + pars
        lat = lhs(dist, pars, nbase)
        for i in range(nparams):
            pA[i, :] = lat[i, :]
            pB[i, :] = lat[i + nparams, :]
    else:
        import sobol
        sob = sobol.i4_sobol_generate(2 * nparams, nbase, nskip)
        for i in range(nparams):
            pA[i, :] = zoff[i] + zmul[i] * sob[i, :]
            pB[i, :] = zoff[i] + zmul[i] * sob[i + nparams, :]
    # The C sample is nparams the B sammple
    pC = np.array([pB for i in range(nparams)])
    # where on each repeat one column is replaced by the column of A
    for i in range(nparams):
        pC[i, i, :] = pA[i, :]

    # Reshape so that one can do runs over 2nd dim and then use jams.sobol_index
    pout = np.empty((nparams, nso))
    for i in range(nparams):
        pout[i, :] = np.concatenate((pA[i, :], pB[i, :], np.ravel(pC[:,
                                                                     i, :])))

    return pout