Exemplo n.º 1
0
    def sample(self, n_samples: int, space: Space, **kwargs):

        kwargs = kwargs['kwargs']

        if (self.cached_n_samples is not None
                and self.cached_n_samples == n_samples
                and self.cached_space is not None
                and space == self.cached_space and self.cached_algo is not None
                and self.cached_algo == kwargs['sample_algo']):

            #lhs = lhsmdu.resample() # YC: this can fall into too clustered samples if there are many constraints
            lhs = lhsmdu.sample(len(space), n_samples)

        else:

            self.cached_n_samples = n_samples
            self.cached_space = space
            self.cached_algo = kwargs['sample_algo']

            if (kwargs['sample_algo'] == 'LHS-MDU'):
                lhs = lhsmdu.sample(len(space), n_samples)
            elif (kwargs['sample_algo'] == 'MCS'):
                lhs = lhsmdu.createRandomStandardUniformMatrix(
                    len(space), n_samples)
            else:
                raise Excepetion(f"Unknown algorithm {kwargs['sample_algo']}")

        lhs = np.array(
            list(zip(*[np.array(lhs[k])[0] for k in range(len(lhs))])))
        # print(lhs,'normalized',n_samples)

        return lhs
Exemplo n.º 2
0
def LHS_for_guesses(sample_count):

  # we use latin hypercube sampling to obtain initial guesses for curve fitting
  lhsmdu.setRandomSeed(None)
  alpha_tau_sample = lhsmdu.sample(2,sample_count)
  epsilon_mu_sample = lhsmdu.sample(2,sample_count)
  alpha_tau_sample = alpha_tau_sample.tolist()
  epsilon_mu_sample = epsilon_mu_sample.tolist()

  # we then adjust the variables to the correct ranges
  adjusted_sample = []
  # for AT, we adjust to between 1 and 1/30
  for var_dist in alpha_tau_sample:
    var_dist = [(1 + var*(1/30-1)) for var in var_dist]
    adjusted_sample.append(var_dist)
  # for EM, we adjust to between 10 and 0.0001
  # however, we actually use theta where EM = 10^theta, so theta is between 1 and -3
  # prevents overweighting towards the top end of the spectrum
  for var_dist in epsilon_mu_sample:
    var_dist = [(1 + var*(-5-1)) for var in var_dist]
    adjusted_sample.append(var_dist)

  # then, for each pair of 4 variables, we run it through the fit and determine cost
  # for every guess, we check to see if that's the lowest cost generated thus far
  # if it is, we store it, and at the end, that's our result
  lowest_cost = [10000000000000, [0,0,0,0]]

  for i in range(sample_count):
    test_guesses = []
    for var_dist in adjusted_sample:
      test_guesses.append(var_dist[i])
    try:
      # we have to rearrange test_guesses so that it goes alpha, epsilon, tau, mu
      tau = test_guesses[1]
      test_guesses[1] = test_guesses[2]
      test_guesses[2] = tau
      # we then adjust epsilon and mu to be 10^[their value], because it's currently theta
      test_guesses[1] = 10**(test_guesses[1])
      test_guesses[3] = 10**(test_guesses[3])

      cost = SD_curve_fit(test_guesses).cost
      print(f"{test_guesses} cost: {cost}")

      if cost < lowest_cost[0]:
        lowest_cost = [cost, test_guesses]
    except OverflowError as e:
      print("Overflow while running LHS for initial guesses: ", e)
    except ValueError as e:
      print("Residual error while running LHS for initial guesses: ", e)
  
  print(f"LHS suggests that {lowest_cost[1]} is the best set of guesses")

  return lowest_cost[1]
Exemplo n.º 3
0
 def get_rate_samples(self):
     """
     Note: return type is matrix, access with rates[i, j] NOT rates[i][j]
     """
     if self._algorithm == 'lhs':
         rates = lhsmdu.sample(len(self._option_range_list),
                               self._sample_count)
         return rates
     if self._algorithm == 'mcs':
         rates = lhsmdu.createRandomStandardUniformMatrix(
             len(self._option_range_list), self._sample_count)
         return rates
     rates = lhsmdu.sample(len(self._option_range_list), self._sample_count)
     return rates
Exemplo n.º 4
0
 def _initialise_population_latin_hypercube(self):
     """
     Use a latin hypercube to seed the population of solutions.
     :return: a list of Candidate objects (with no assigned fitnesses)
     """
     k = lhsmdu.sample(len(self.solution_dimensions),
                       self.population_size)  # returns numpy matrix
     # scale the design for each dimension onto the correct range
     for i, dim in enumerate(self.solution_dimensions):
         dimension_range = dim.max_value - dim.min_value
         k[i] = (k[i] * dimension_range
                 ) + dim.min_value  # operates on entire slice at once
     # bind each dimension's putative value to valid intervals (granularity)
     for i, dim in enumerate(self.solution_dimensions):
         for c in range(self.population_size):
             k[i, c] = dim.bind(k[i, c])
     population = []  # a list of lists, each inner list being a candidate
     # convert into format needed, a list of lists.
     for c in range(self.population_size):
         candidate_solution = [
             k[d, c] for d in range(len(self.solution_dimensions))
         ]
         candidate = Candidate(solution=candidate_solution)
         population.append(candidate)
     return population
Exemplo n.º 5
0
def initialization_run(domain, num_DIM, num_init):
    start_points_norm = lhsmdu.sample(num_DIM, num_init)
    start_points_norm = start_points_norm.transpose()
    start_points = np.zeros([num_init, num_DIM])

    bounds = np.array(
        [[domain[0].get('domain')[0], domain[0].get('domain')[1]],
         [domain[1].get('domain')[0], domain[1].get('domain')[1]],
         [domain[2].get('domain')[0], domain[2].get('domain')[1]],
         [domain[3].get('domain')[0], domain[3].get('domain')[1]],
         [domain[4].get('domain')[0], domain[4].get('domain')[1]],
         [domain[5].get('domain')[0], domain[5].get('domain')[1]],
         [domain[6].get('domain')[0], domain[6].get('domain')[1]],
         [domain[7].get('domain')[0], domain[7].get('domain')[1]],
         [domain[8].get('domain')[0], domain[8].get('domain')[1]],
         [domain[9].get('domain')[0], domain[9].get('domain')[1]],
         [domain[10].get('domain')[0], domain[10].get('domain')[1]],
         [domain[11].get('domain')[0], domain[11].get('domain')[1]],
         [domain[12].get('domain')[0], domain[12].get('domain')[1]]])
    for i in range(num_init):
        for j in range(num_DIM):
            start_points[i, j] = bounds[
                j, 0] + start_points_norm[i, j] * (bounds[j, 1] - bounds[j, 0])

    return start_points
Exemplo n.º 6
0
 def lhs_mu_samples(self, seed=None):
     seed = np.random.randint(99999) if seed is None else seed  # set seed
     samples = lhsmdu.sample(
         self.d_enc, self.n_comp,
         randomSeed=seed).A.T  # get (n, d) array of samples
     samples = samples * (self.mu_init_range[1] - self.mu_init_range[0]
                          ) + self.mu_init_range[0]  # scale up from [0,1]
     return samples
Exemplo n.º 7
0
def problem_2in1out( xlower = 0, xupper = 10, nsamples = 10, ylower = 0, yupper = 20 ):
    '''n_dim_problem is a function to create a random, but well defined, problem
    landscape created by generating random points in a 3d landscape and 
    fitting a gp to the points. By constraining the hyperparameters we are able
    to produce a landscape with a controllable level of complexity.

    example :   my_problem_function = three_dim_problem( )

    INPUTS: (keywords, not required)
        xlower      -   Integer/Float - Defines the lower bound of all X's
        xupper      -   Integer/Float - Defines the upper bound of all X's
        nsamples    -   Integer       - number of points along each dimension
        ylower      -   Integer/Float - Defines the lower bound of Y
        yupper      -   Integer/Float - Defines the upper bound of Y  

    OUTPUT: 
        fun         -   Function      - A 3 dimensional function
            example :   yi = fun( [ xi1 , xi2 ] )
            example :   [yi,yj] = func( [ [ xi1 , xi2 ] , [ xj1 , xj2 ] )      
    '''
    X1   = np.linspace( xlower , xupper , nsamples )
    X2   = np.linspace( xlower , xupper , nsamples )
    Y    = np.ones( nsamples**2 )*np.random.uniform( ylower , yupper , nsamples**2 )
    kernel = GPy.kern.RBF( input_dim = 2 , variance = 0.001 , lengthscale = (xupper-xlower)/5 ) 
    X    = np.array (list( it.product( X1 , X2 ) ) ) 
    
    X    = np.array(lhsmdu.sample(nsamples**2,2))*10

    X = X.reshape( -1 , 2 )
    Y = Y.reshape( -1 , 1 )
    
    model = GPy.models.GPRegression( X, Y, noise_var = 1e-4 , kernel = kernel )

    def fun(x):
        assert( type(x) == list or type(x) == np.array , 'input must be a list or array')
        assert(len(x) >= 2  , 'Input is in incorrect format')  
        value = None
        try:
            value = [ float( model.predict( np.array( [ [ i ] ] ).reshape( -1 , 2 ) )[ 0 ] ) for i in x ]
            if value < ylower:
                value = ylower
            if value > yupper:
                value = yupper
            return( np.array(value) )
        except:
            pass
        try:
            value = float( model.predict( np.array( [ [ x ] ] ).reshape( -1 , 2 ) )[ 0 ] )
            if value < ylower:
                value = ylower
            if value > yupper:
                value = yupper
            return( np.array(value) )        
        except:
            print( '2in_1out problem ERROR - The x values do not match the required input size' )
        return( None )

    return( fun )
Exemplo n.º 8
0
def random_lhs(min_bound, max_bound, dimensions, number_samples=1):
    import lhsmdu
    r_range = abs(max_bound - min_bound)
    lolo = r_range * np.array(lhsmdu.sample(dimensions,
                                            number_samples)) + min_bound
    required_list = []
    for ii in range(number_samples):
        required_list.append(lolo[:, ii].reshape(-1))
    return required_list
Exemplo n.º 9
0
def genrandom(n_random):
    ### Use latin HyperCube sampling to choose intial points for nelder-mead search
    random_points = np.asarray(
        lhsmdu.sample(200 * n_random, len(
            mp.domain.valid_ranges)))  #Lhsmdu produce normalised numbers
    # The next few lines makes the points fit to the search domain
    mults = [vrange[1] - vrange[0] for vrange in mp.domain.valid_ranges]
    rangemin = [vrange[0] for vrange in mp.domain.valid_ranges]
    random_points = random_points * mults + rangemin
    return (np.array(random_points).reshape(-1, len(mp.example)))
Exemplo n.º 10
0
    def __init__(self, nn, eps=0, train_eps=False, **kwargs):
        self.aggr = 'laf'
        seed = 92
        atype = 'frac'
        shared = True
        embed_dim = '32'
        if 'aggr' in kwargs.keys():
            aggr = kwargs['aggr']
        if 'seed' in kwargs.keys():
            seed = kwargs['seed']
            del kwargs['seed']
        if 'style' in kwargs.keys():
            atype = kwargs['style']
            del kwargs['style']
        if 'shared' in kwargs.keys():
            shared = kwargs['shared']
            del kwargs['shared']
        if 'embed_dim' in kwargs.keys():
            embed_dim = int(kwargs['embed_dim'])
            del kwargs['embed_dim']

        super(GINLafConv, self).__init__(aggr='add', **kwargs)
        self.nn = nn
        self.initial_eps = eps
        if train_eps:
            self.eps = torch.nn.Parameter(torch.Tensor([eps]))
        else:
            self.register_buffer('eps', torch.Tensor([eps]))
        self.reset_parameters()

        if shared:
            params = torch.Tensor(lhsmdu.sample(13, 1, randomSeed=seed))
        else:
            params = torch.Tensor(lhsmdu.sample(13, 1, randomSeed=seed))
        #params = torch.Tensor(lhsmdu.sample(13, 1, randomSeed=seed))
        #par = torch.Tensor([[1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0]] * out_channels)
        #par = torch.Tensor([[1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0]])
        #params = par.t()
        if atype == 'minus':
            self.aggregation = ElementAggregationLayer(parameters=params)
        elif atype == 'frac':
            #self.aggregation = FractionalElementAggregationLayer(parameters=params)
            self.aggregation = ScatterAggregationLayer(parameters=params)
Exemplo n.º 11
0
def pertubate(point, beta, n=100):
    ### Use latin HyperCube sampling to choose intial points for nelder-mead search
    print(beta)
    print(n)
    random_points = np.asarray(lhsmdu.sample(n, len(
        mp.domain.valid_ranges)))  #Lhsmdu produce normalised numbers
    # The next few lines makes the points fit to the search domain
    mins = conformpoint(np.array([p - beta for p in point]))
    maxs = conformpoint(np.array([p + beta for p in point]))
    mults = [maxs[i] - mins[i] for i in range(len(maxs))]
    random_points = [
        random_point * mults + mins for random_point in random_points
    ]
    random_points = [list(conformpoint(point)) for point in random_points]
    return (random_points)
Exemplo n.º 12
0
    def initial_evaluations(self, n_samples, x_dims, limits):
        """
        Makes the initial evaluations of the parameter space and
        evaluates the objective function at these locaitons
        :param n_samples: number of initial samples to make
        """
        np.random.seed(self.seed)
        x = np.array(lhsmdu.sample(x_dims, n_samples, randomSeed=self.seed)).T\
            * (limits[1]-limits[0])+limits[0]
        # try:
        y = np.array([self.objective_function(xi, *self.of_args).flatten() for xi in x])
        # except :
        #     y = self.objective_function(x)

        # update evaluation number
        self.n_evaluations += n_samples

        return x, y
Exemplo n.º 13
0
def generate_local_data_lhs(y_sampler,
                            n_samples_per_dim,
                            step,
                            current_psi,
                            x_dim=1,
                            n_samples=2):
    xs = y_sampler.x_dist.sample(
        torch.Size([n_samples_per_dim * n_samples, x_dim]))  # .to(device)

    mus = torch.empty((len(xs), len(current_psi)))  # .to(device)
    mus = torch.tensor(
        lhsmdu.sample(
            len(current_psi), n_samples,
            randomSeed=np.random.randint(1e5)).T).float()  # .to(device)

    mus = step * (mus * 2 - 1) + current_psi  # .to(device)
    mus = mus.repeat(1, n_samples_per_dim).reshape(-1, len(current_psi))
    y_sampler.make_condition_sample({'mu': mus, 'X': xs})
    data = y_sampler.condition_sample().detach()  # .to(device)
    return data.reshape(-1, 1), torch.cat([mus, xs], dim=1)
Exemplo n.º 14
0
 def init_weights(self, initialisation, nb_init_points):
     print_sep("New Initialization")
     nb_var = self.neuralnet.count_params()
     if (initialisation == 0):
         loc = 0
         scale = 0
         initial_weights = np.random.normal(loc=loc,
                                            scale=scale,
                                            size=(nb_var, nb_init_points))
     if (initialisation == 1):
         loc = 0
         scale = 1
         initial_weights = np.random.normal(loc=loc,
                                            scale=scale,
                                            size=(nb_var, nb_init_points))
     if (initialisation == 2):
         loc = (np.random.rand() - 0.5) * 10
         scale = (np.random.rand()) * 10
         initial_weights = np.random.normal(loc=loc,
                                            scale=scale,
                                            size=(nb_var, nb_init_points))
     if (initialisation == 3):
         loc = 0
         scale = 10
         initial_weights = np.array(
             (lhsmdu.sample(nb_var, nb_init_points) - (0.5 + loc)) * scale)
     if (initialisation in [0, 1, 2]):
         print(
             "Weights've been initialized on a normal distribution of mean=%.3f and scale=%.3f."
             % (loc, scale))
     else:
         print(
             "Weights've been initialized inside a LH of size=%.3f and center=%.3f."
             % (scale, loc))
     to_return = []
     for i in initial_weights.T:
         i = np.reshape(i, (-1, 1))
         i = to_weights_shape(i, self.neuralnet.get_weights())
         to_return += [i]
     return to_return
def generate_LHS_draw(seed):
    # Dictionary of stochastic settings
    ST = {}

    # hard code the number of stochastic settings here
    # Number depends on stochastic cumulus scheme selected
    ns = 20
    nss = iter(range(ns))
    LHS = sample(ns, nens, randomSeed=seed)

    #### MICROPHYSICS #### (5)
    # Some of these distrubtions could be beta like Hacker et al 2011
    ST['nssl_ehw0'] = denormalise(LHS[next(nss), :], 0.4, 1.0)
    ST['nssl_alphar'] = denormalise(LHS[next(nss), :], 0.0, 2.5)
    ST['nssl_alphah'] = denormalise(LHS[next(nss), :], 0.0, 3.0)
    ST['nssl_cccn'] = denormalise(LHS[next(nss), :], 0.3e9, 1.3e9)

    # Hail collect eff. should be higher than graupel
    # ehlw0 must be computed last because of constraint
    ehlw0 = N.zeros([nens])
    ehln = next(nss)
    for xn, x in enumerate(ST['nssl_ehw0'][0, :].A1):
        ehlw0[xn] = denormalise(LHS[ehln, xn], ST['nssl_ehw0'][0, xn], 1.0)
    ST['nssl_ehlw0'] = N.matrix(ehlw0)

    #### SHORTWAVE #### (0)
    # No solar scattering any more
    # swscat = denormalise(LHS[4,:],0.2,2.0)

    ##### CUMULUS ##### ( )

    ##### LAND SURFACE ##### (8)
    # Radii - must be square root for uniform distribution round circle
    for key in ('morphr_crs', 'morphr_btr', 'morphr_rad', 'morphr_tbot'):
        ST[key] = N.sqrt(denormalise(LHS[next(nss), :], 0.0, 1.0))
    # Angles - TODO: Must be square root!
    for key in ('morphth_crs', 'morphth_btr', 'morphth_rad', 'morphth_tbot'):
        ST[key] = denormalise(LHS[next(nss), :], 0.0, 2 * N.pi)

    return ST
Exemplo n.º 16
0
A dirty script to create our LHS input for a mass ML training dataset using CLFO data 

'''

########## READ DATA
ignorecontinue = True
ignore = ['CO2','PXYL+MXYL','NO2','NOY','R','RO2','M','HO2','CH3O2','OH','TRICLETH','HONO','BUT2CHO', 'C3ME3CHO', 'C5H11CHO', 'CH2CL2', 'LIMONENE', 'MACR', 'MVK']
df = np.log10(pd.read_csv('../src/examples/clfo_describe.csv',index_col=0))
df = df[filter(lambda x: x not in ignore, df.columns)]

nruns = 1000#df.shape[1]**2
print(nruns)

# may be slow. 
lhs = lhsmdu.sample(df.shape[1],nruns)
#lhs = lhsmdu.createRandomStandardUniformMatrix(df.shape[1],nruns)#monte carlo
np.save('lhs.npy',lhs)
print 'lhs ready'

#### END LHS

#### set ics BASE
'''
If using file use new ics, we are just creating a new one 
'''
##ics = newics.newics(h5file ='clfo.h5',timestep=int(25*60*60))

ics = pd.DataFrame(
    [
           ['ii', 'TIME', '0',str(24*60*60*5)],
Exemplo n.º 17
0
def latin_hypercube(dic_factors, runs):
    """
    Parameters:
        dic_factors: The dictionary of factors to be included in the Latin Hypercube design.

        runs: The number of runs to be used in the design.

    Returns:
        df: The dataframe containing the Latin Hypercube design.

    Example:
        >>> import design
        >>> Factors = {'Height':[1.6,2],'Width':[0.2,0.4],'Depth':[0.2,0.3],'Temp':[10,20],'Pressure':[100,200]}
        >>> design.latin_hypercube(Factors,50)
              Height     Width     Depth       Temp    Pressure
        0   1.814372  0.316126  0.203734  12.633408  150.994350
        1   1.683852  0.327745  0.221157  10.833524  149.235694
        2   1.952938  0.220208  0.212877  14.207334  177.737810
        3   1.921001  0.306165  0.249451  13.747280  195.141219
        4   1.709485  0.286836  0.214973  12.132761  144.060774
        5   1.795442  0.339484  0.263747  16.494926  105.861897
        6   1.849604  0.390856  0.229801  17.768834  157.379054
        7   1.635933  0.295207  0.244843  15.561134  119.353027
        8   1.800514  0.257358  0.232554  19.117071  114.431350
        9   1.748656  0.311259  0.209185  19.573654  147.317771
        10  1.610152  0.200320  0.269825  14.041168  192.787729
        11  1.670380  0.283579  0.270421  11.422384  161.302466
        12  1.914483  0.374190  0.273246  15.253950  110.213186
        13  1.731642  0.363269  0.211263  15.011417  175.315691
        14  1.864093  0.245809  0.235466  10.506234  123.998827
        15  1.856580  0.314574  0.260263  11.787321  152.096424
        16  1.651140  0.262106  0.289432  14.407869  121.954348
        17  1.827840  0.278926  0.223818  12.824422  168.813816
        18  1.780800  0.380327  0.252359  12.290440  171.741507
        19  1.762333  0.224241  0.216475  18.386775  165.564771
        20  1.949560  0.300988  0.285943  10.063231  155.134033
        21  1.646881  0.248638  0.250362  16.701447  163.476898
        22  1.974239  0.379487  0.279709  17.208315  181.757031
        23  1.904317  0.216877  0.292985  18.829669  136.808281
        24  1.899844  0.343903  0.230494  13.197326  198.654066
        25  1.696839  0.329348  0.283741  18.193024  135.335187
        26  1.689936  0.272728  0.218891  19.800988  131.615692
        27  1.823893  0.299159  0.247030  10.790362  191.524570
        28  1.841140  0.210635  0.286718  10.327824  167.595627
        29  1.883991  0.385993  0.277186  18.773584  178.871167
        30  1.932945  0.358221  0.294327  16.890948  125.635668
        31  1.837620  0.370877  0.242782  17.103119  142.240418
        32  1.740477  0.352914  0.265939  14.697769  129.088978
        33  1.624078  0.347985  0.298516  13.933373  132.011517
        34  1.786612  0.351899  0.225313  15.827930  188.649172
        35  1.892142  0.206601  0.254650  14.805995  138.732923
        36  1.656703  0.252798  0.205547  18.461586  184.792345
        37  1.770805  0.270721  0.226262  11.940936  113.390934
        38  1.672266  0.288289  0.275940  15.640371  186.777116
        39  1.600629  0.240123  0.280908  17.934686  126.897387
        40  1.995175  0.237031  0.240472  16.393982  116.475088
        41  1.713062  0.265850  0.256147  17.418780  172.746504
        42  1.964540  0.235473  0.266340  11.334520  196.454539
        43  1.757516  0.366909  0.207040  13.488750  102.146392
        44  1.942405  0.214971  0.290674  13.373628  109.206897
        45  1.985601  0.229702  0.297658  12.435430  101.336426
        46  1.617340  0.321384  0.200862  19.338525  159.238981
        47  1.976837  0.393484  0.258497  16.167623  140.926988
        48  1.877091  0.399951  0.239234  19.788923  182.759572
        49  1.725652  0.332160  0.237414  11.136650  107.726667
    """
    df = pd.DataFrame()
    factor_names = []
    count = 0
    # Creates an array filled with a latin hypercube form 0 to 1
    array = lhsmdu.sample(len(dic_factors), runs)
    # This for loop converts the latin hypercube to have the levels entered into the dictionary of factors
    for name in dic_factors:
        factor_names.append(name)
        low = min(dic_factors[name])
        high = max(dic_factors[name])
        # non_coded stored the array being mapped to fit the levels of the factors entered
        non_coded = np.array(
            list(map(lambda x: low + ((high - low) * x), array[count])))
        # Converts non_coded (which is currently one column of the final dataframe) to a series
        s_add = pd.Series(non_coded[0][0])
        count += 1
        # Adds the series to the dataframe
        df = pd.concat([df, s_add], ignore_index=True, axis=1)
    df = df.rename(columns=lambda y: factor_names[y])
    return df
Exemplo n.º 18
0
import lhsmdu

lhsmdu.setRandomSeed(None)
samples = lhsmdu.sample(1, 100)
# initially, it is generated between 0 and 1
# we need to change it to a distribution between -100 and 100

adj = []
samples = samples.tolist()
print(samples[0])
Exemplo n.º 19
0
def runtime(numDimensions, numSamples):
    ''' Checks runtime using standard variables '''
    start_time = time()
    m = lhsmdu.sample(numDimensions, numSamples)
    end_time = time()
    print end_time - start_time
Exemplo n.º 20
0
domain_flag = 'parsec' #Change this to change your domain

#######
if domain_flag == 'ffd':
    sys.path.insert(0, cur_dir+'/domain/FFD_config')
if domain_flag == 'rastrigin':
    sys.path.insert(0, cur_dir+'/domain/Rastrigin_config')
if domain_flag == 'simple2d':
    sys.path.insert(0, cur_dir+'/domain/simple2d_config')
if domain_flag == 'parsec':
    sys.path.insert(0, cur_dir+'/domain/Parsec_config')
if domain_flag == 'rosenbrock6d':
    sys.path.insert(0, cur_dir+'/domain/Rosenbrock6d_config')

import domain_config as mp

import lhsmdu
import numpy as np
lhsx = lhsmdu.sample(10,1e6)
x = np.array(lhsx.T)
sum = 0
for xx in x:
    sum = np.nansum(sum, fitness_fun(xx))
mean = sum/1e6
print('mean is ', mean)
Exemplo n.º 21
0
# to convert lists to dictionary 
parameters_dictionary = {} 
for key in list_of_parameters: 
    for value in list_of_values: 
        parameters_dictionary[key] = value
        list_of_values.remove(value)
        break
# print(parameters_dictionary)


while k = 1:
    try:
        no_of_sample = int(input("enter number of samples needed")
        k = 2
    except:
        print("Enter the value again")
        k = 1

df = pd.DataFrame(index=list(range(no_of_sample)), columns=list_of_parameters)

z = len(df.columns)

for val in range(z):
    k = lhsmdu.sample(1, 150)
    k = (k*(list_of_maximum_values[val]-list_of_minimum_values[val]))+list_of_minimum_values[val]
    k = k.transpose()
    df_temp= pd.DataFrame(k)
    df.iloc[:,val] = df_temp
  
df.to_excel("latin_hypercube_sampling.xlsx")
Exemplo n.º 22
0
@author: WDN
Prior Data Generator
加入LHS采样方法,对空间中的数据进行采样,同时进行仿真,得到先验数据
"""
import WDNfeedback#反馈
import pandas as pd
import numpy as np
import lhsmdu
import math
pd.set_option('display.max_rows',None)
    
    

"利用LHS在空间中采样,这里用二维采样============================================="
k = lhsmdu.sample(6, 120) # Latin Hypercube Sampling with multi-dimensional uniformit
-"VBR其他流---------------------------------------------------------------------"
x1 = np.array(k[0])*100
x1=np.floor(x1)
vbrinterval=x1[0].tolist()
vbrinterval = [ math.ceil(x) for x in vbrinterval ]
vbrinterval
vbrinterval = [ x+1 for x in vbrinterval ]
vbrinterval
x1 = np.array(k[1])*64000
x1=np.floor(x1)
vbrsize=x1[0].tolist()
vbrsize = [ math.ceil(x) for x in vbrsize ]
vbrsize = [ x+1 for x in vbrsize ]
"Superapp视频流----------------------------------------------------------------"
x1 = np.array(k[2])*100
Exemplo n.º 23
0
def main(N_parts, f_simulate, f_summaries, f_discrepancy, target_data,
         params_mins, params_maxs, scale_param, desired_D, mesh, max_time):

    #import numpy as np
    import lhsmdu
    from multiprocessing import Pool, cpu_count, get_context
    import time
    from functools import partial
    import psutil
    import time
    from tqdm import tqdm
    '''
    %%%%%%%%%%%%%%%% VARIABLE INPUT %%%%%%%%%%%%%%%%%%%%%%%%%
    '''

    #Default options (can be changed here or adjusted on the fly by supplying options argument)
    #resample_weighting = 'weighted';

    # 'equal' - All kept particles are weighted equally for resampling steps
    # 'weighted' - Particles are weighted according to their discrepancy values when resampling (taking
    #              worst kept particle as three sigma in a normal distribution, and then normalising
    #              weights so they sum to one)

    #metric_weighting = 'variance';
    # 'mahalanobis' - Mahalanobis distance is used for calculations of discrepancy
    # 'variance' - Metrics are scaled according to their variance, but covariances are ignored
    # 'none' - Euclidean distance is used
    # (All options still include rescaling of angle metrics with respect to eccentricity)

    keep_fraction = 0.5  #0.75;
    # The proportion of particles to keep during each resample step (recommended ~0.5 as a starting value)
    if N_parts < 100:
        max_MCMC_steps = 50
    else:
        max_MCMC_steps = 300
    # The maximum number of 'jiggle' steps applied to all particles in the attempt to find unique locations
    verbose = 1
    #Output information about particle uniqueness and discrepancy targets
    '''
    %%%%%%%%%%%%%%%% INSTANTIATE%%%%%%%%%%%%%%%%%%%%%%%%%
    '''

    process_count = cpu_count()
    print('cpus: %d / %d' % (process_count, cpu_count()))
    max_runtime = time.time() + max_time
    print('beginning pool...')
    start = time.time()
    ESStarget = 0.8

    burnidx = 50  #disabled
    #5 #number of iterations to burn-in (use full R)

    #generate and warm up pool
    pool = get_context("spawn").Pool(processes=process_count,
                                     maxtasksperchild=1)
    time.sleep(1)
    end = time.time()
    print('time elapsed: ' + str(end - start))
    print('warming up...')
    start = time.time()
    pool.map(warmup, range(process_count), chunksize=1)
    time.sleep(1)
    end = time.time()
    print('time elapsed: ' + str(end - start))
    print('parent precent memory used: ' + str(psutil.virtual_memory()[2]))

    # Calculate the summaries for the target data
    print('calculating target summaries...')
    start = time.time()
    target_summaries = np.asarray(f_summaries(target_data), dtype='f8')
    end = time.time()
    #print('calculated, time elapsed: '+str(end-start))
    # Calculate the ordinal location of the last particle kept for convenience
    worst_keep = int(np.rint(N_parts * keep_fraction) + 1)
    # Read out the number of variables
    N_theta = len(params_mins)

    # Convert scaling parameters to log equivalents. The set of transformed
    # parameters is termed 'theta' in this code
    #print('scaling...')
    scale_param = scale_param == 1
    theta_mins = np.copy(params_mins).astype('f8')
    theta_mins[scale_param] = np.log(theta_mins[scale_param])
    theta_maxs = np.copy(params_maxs).astype('f8')
    theta_maxs[scale_param] = np.log(theta_maxs[scale_param])

    # Initialise particles using Latin Hypercube Sampling
    #print('generating lhsmdu...')
    part_lhs = lhsmdu.sample(N_parts, N_theta)
    part_thetas = theta_mins + np.multiply(part_lhs, (theta_maxs - theta_mins))

    # For simulation, convert scaling parameters back to true values
    part_params = np.copy(part_thetas).astype('f8')
    part_params[:, scale_param] = np.exp(part_params[:, scale_param])

    # Load seed data (generated by a separate file)
    #print('loading seed data...')
    seedinfo = np.load("seedinfo.npy", allow_pickle=True)
    Pt = np.asarray(seedinfo.item().get('permute_tables'), dtype='f8')
    Ot = np.asarray(seedinfo.item().get('offset_tables'), dtype='f8')
    #Count number of seeds created
    N_seeds = np.shape(Pt)[0]
    '''
    %%%%%%%%%%%%%%%% PARPOOL 1: FIRST MCMC STEP %%%%%%%%%%%%%%%%%%%%%%%%%
    '''

    # Generate the seed information for each particle, then simulate the model
    # using this seed information, storing both its output and the associated
    # summary statistics

    #Create an individual simulator object for this praticle, using the
    # seed information
    print('creating savespace...')
    seed_nums = np.random.choice(a=range(N_seeds), size=N_parts)
    part_outputs = np.zeros((N_parts, mesh['Nx'], mesh['Ny'])).astype('f8')
    part_summaries = np.zeros((N_parts, 27)).astype('f8')

    # Run the simulator and store summaries
    time.sleep(5)
    print('creating partial...')
    #runSimLoop  = partial(runSim, part_params=part_params, Pt=Pt, Ot=Ot, f_simulate=f_simulate, f_summaries=f_summaries, seed_nums=seed_nums)

    iterable = []
    for k in range(N_parts):
        iterable.append((k, part_params[k, :], Pt[seed_nums[k], :, :],
                         Ot[seed_nums[k], :, :], f_simulate, f_summaries))
    time.sleep(5)
    #runSim_lambda = lambda k: runSim(k,part_params[k,:],Pt[seed_nums[k],:,:],Ot[seed_nums[k],:,:],f_simulate,f_summaries)
    print('parent precent memory used: ' + str(psutil.virtual_memory()[2]))

    print('beginning map...')
    start_loop = time.time()
    #results = pool.map(runSimLoop,range(N_parts), chunksize=1)
    #results = []
    #for i,k,part_out, part_sum in tqdm(enumerate(pool.imap_unordered(runSimLoop,range(N_parts),chunksize=int(N_parts/process_count)))):
    #results.append(res)
    chunksize = int(N_parts / process_count)
    if chunksize < 1:
        chunksize = 1
    results = pool.starmap_async(runSim, iterable, chunksize=chunksize)
    results_get = results.get()
    for r in results_get:
        k = r[0]
        part_outputs[k, :, :] = r[1]
        part_summaries[k, :] = r[2]
        #pbar.update()
    del iterable
    print('time elapsed: ' + str(time.time() - start_loop))
    print('parent precent memory used: ' + str(psutil.virtual_memory()[2]) +
          '\n')
    '''
    %%%%%%%%%%%%%%%% WEIGHTING/RESAMPLING %%%%%%%%%%%%%%%%%%%%%%%%%
    '''

    # Calculate sample covariance matrix from the initial particles
    C_S = np.cov(np.transpose(part_summaries))
    # Use diagonals of this to extract only variances (no covariances)
    C_S = np.diag(np.diag(C_S))
    # Store the inverse, so that it only need be calculated once
    invC_S = np.linalg.inv(C_S)
    # Use discrepancy function with this inverse matrix included
    f_discrep = partial(f_discrepancy,
                        target_metrics_old=target_summaries,
                        invC=invC_S)
    # Calculate discrepancies for each particle based on this distance measure
    part_Ds = np.asarray(f_discrepancy(part_summaries, target_summaries,
                                       invC_S),
                         dtype='f8')

    # Loop until stopping criteria is hit
    looping = 1
    testitcount = 1
    while looping:
        print('Iteration count: %g' % (testitcount, ))
        if testitcount > burnidx:
            burnin = 1
        else:
            burnin = 0

        # First, sort all the particles according to their discrepancy
        ordering = np.argsort(part_Ds)
        part_Ds = part_Ds[ordering]
        part_outputs = part_outputs[ordering, :, :]
        part_thetas = part_thetas[ordering, :]
        part_summaries = part_summaries[ordering, :]

        # Now select the target discrepancy as the worst particle of the kept
        # fraction
        target_D = np.copy(part_Ds[worst_keep])

        # Select which particles, from those that are kept, to resample the
        # discarded particles onto.
        # Calculate weights for each particle. These are found by
        # assuming that the worst kept particle is three standard
        # deviations away from D = 0. Due to normalisation of weights,
        # which here takes place inside randsample, even if all
        # particles are far from D = 0 it will not be an issue.

        part_logweights = -9 * (part_Ds[:worst_keep]**
                                2) / part_Ds[worst_keep]**2
        part_logweights = part_logweights + part_logweights.max()

        #Normalise so largest weight is 1 (0 on log scale)
        #part_weights = np.exp(part_logweights)
        #part_weights = part_weights / np.sum(part_weights)

        # Tempering and Normalizing
        #via https://arxiv.org/pdf/1805.03317.pdf section 2.2
        BetaSet = np.linspace(0, 1, num=100)
        part_weights = np.exp(part_logweights)
        weights = np.tile(part_weights, (len(BetaSet), 1))
        weights = np.divide(
            np.power(weights, BetaSet[:, None]),
            np.sum(np.power(weights, BetaSet[:, None]), axis=1)[:, None])
        ESS = (1 / np.sum(weights**2, axis=1)) / (N_parts * keep_fraction)
        ESSind = np.argmin(np.abs(ESStarget - ESS))
        alpha = 20  #cooling rate for beta*(iteration/alpha)
        Beta = BetaSet[ESSind] * max(1, ((testitcount - 1) / alpha))
        part_weights = part_weights**Beta / np.sum(part_weights**Beta)
        print('Weight tempering exponent: %.3f' % (Beta))

        # Now select particles from the kept particles, according to
        # their weights, to decide which particles to copy ontio
        selection = np.random.choice(a=range(worst_keep),
                                     size=N_parts - worst_keep,
                                     replace=False,
                                     p=part_weights)

        # Now perform the particle copy
        part_thetas[worst_keep:, :] = part_thetas[selection, :]
        part_outputs[worst_keep:, :, :] = part_outputs[selection, :, :]
        part_summaries[worst_keep:, :] = part_summaries[selection, :]
        part_Ds[worst_keep:] = part_Ds[selection]
        accepted = np.zeros(np.shape(part_Ds))

        # Now the particles are 'jiggled' so that we return to having a set of
        # unique particles. This step can also be thought of as performing
        # local exploration, and is sometimes called 'mutation'. MCMC steps are
        # used to perform mutation, and an advantage of SMC approaches is that
        # the positions of all particles can be used to decide on a sensible
        # jumping distribution.

        # Create a weighted covariance matrix to use in a multivariate
        # normal jumping distribution. 2.38^2/N is the 'optimal' factor
        # under some assumptions
        Ctheta = (2.38**2 / N_theta) * np.cov(np.transpose(part_thetas))
        '''
        %%%%%%%%%%%%%%%% PARPOOL 2: LOOP MCMC STEP %%%%%%%%%%%%%%%%%%%%%%%%%
        '''

        #Create an individual simulator object for this praticle, using the

        # Perform one MCMC step to evaluate how many are expected to be required

        #MVN_loop_set = partial(MVN_move, N_moves=1, part_thetas=part_thetas, part_outputs=part_outputs, part_summaries=part_summaries, part_Ds=part_Ds, Ctheta=Ctheta, theta_mins=theta_mins, theta_maxs=theta_maxs, scale_param=scale_param, target_D=target_D, Pt=Pt, Ot=Ot, f_simulate=f_simulate, f_summaries=f_summaries, f_discrep=f_discrep, seed_nums=seed_nums)
        iterable = []
        for k in range(worst_keep, N_parts):
            iterable.append(
                (k, 1, np.array(part_thetas[k, :]).ravel(),
                 np.squeeze(part_outputs[k, :, :]),
                 np.squeeze(part_summaries[k, :]), np.squeeze(part_Ds[k]),
                 Ctheta, theta_mins, theta_maxs, scale_param, target_D,
                 np.squeeze(Pt[seed_nums[k], :, :]),
                 np.squeeze(Ot[seed_nums[k], :, :]), f_simulate, f_summaries,
                 f_discrep, burnin))

        print('mvn1...')
        start = time.time()
        #for i,k, part_theta, part_output, part_summary, part_D, accept  in tqdm(enumerate(pool.imap_unordered(MVN_loop_set,range(worst_keep,N_parts)))):
        chunksize = int((N_parts - worst_keep) / process_count)
        if chunksize < 1:
            chunksize = 1
        results = pool.starmap_async(MVN_move, iterable, chunksize=chunksize)
        for r in results.get():
            k = r[0]
            part_thetas[k, :] = r[1]
            part_outputs[k, :, :] = r[2]
            part_summaries[k, :] = r[3]
            part_Ds[k] = r[4]
            accepted[k] = r[5]
        del iterable
        print('mvn1 duration: ' + str(time.time() - start))

        # Calculate the acceptance rate, and ensure that the case where
        # all or no particles are accepted does not result in a number
        # of MCMC steps that cannot be calculated
        est_accept_rate = np.mean(accepted)
        est_accept_rate = np.where(est_accept_rate != 0, est_accept_rate,
                                   1 * 10**(-6))
        est_accept_rate = np.where(est_accept_rate != 1, est_accept_rate,
                                   1 - 1 * 10**(-6))

        # Calculate the expected number of MCMC steps required from the
        # estimated acceptance rate. The number of steps cannot go
        # above a user-specified value
        R = np.ceil(np.log(0.05) / np.log(1 - est_accept_rate))
        R = np.min([R, max_MCMC_steps])
        print('MCMC steps: %d' % (R))
        # Perform the remaining MCMC steps

        #MVN_loop_set = partial(MVN_move, N_moves=int(R-1), part_thetas=part_thetas, part_outputs=part_outputs, part_summaries=part_summaries, part_Ds=part_Ds, Ctheta=Ctheta, theta_mins=theta_mins, theta_maxs=theta_maxs, scale_param=scale_param, target_D=target_D, Pt=Pt, Ot=Ot, f_simulate=f_simulate, f_summaries=f_summaries, f_discrep=f_discrep, seed_nums=seed_nums)

        #print('Accept rate: %g'%(est_accept_rate))
        #print('Running remaining MVN loops %d to %d '%(worst_keep,N_parts))
        #print('Creating %d particles'%(int(R-1)))

        iterable = []
        for k in range(worst_keep, N_parts):
            iterable.append(
                (k, int(R - 1), np.array(part_thetas[k, :]).ravel(),
                 np.squeeze(part_outputs[k, :, :]),
                 np.squeeze(part_summaries[k, :]), np.squeeze(part_Ds[k]),
                 Ctheta, theta_mins, theta_maxs, scale_param, target_D,
                 np.squeeze(Pt[seed_nums[k], :, :]),
                 np.squeeze(Ot[seed_nums[k], :, :]), f_simulate, f_summaries,
                 f_discrep, burnin))

        print('mvn2...')
        start = time.time()
        #for i,k, part_theta, part_output, part_summary, part_D, accept  in tqdm(enumerate(pool.imap_unordered(MVN_loop_set,range(worst_keep,N_parts)))):
        chunksize = int((N_parts - worst_keep) / process_count)
        if chunksize < 1:
            chunksize = 1
        brokeout = 0
        results = pool.starmap_async(MVN_move, iterable, chunksize=chunksize)
        for r in results.get():
            k = r[0]
            part_thetas[k, :] = r[1]
            part_outputs[k, :, :] = r[2]
            part_summaries[k, :] = r[3]
            part_Ds[k] = r[4]
            accepted[k] = r[5]
            brokeout += r[6]
        del iterable
        print('mvn2 duration: ' + str(time.time() - start))
        brokepercent = 100 * brokeout / (N_parts - worst_keep)
        print('Breakout = %.2f %%' % (brokepercent))
        # Count the number of unique particles
        #unique_thetas = np.unique(part_thetas, axis=0)
        N_unique = np.shape(np.unique(part_thetas, axis=0))[0]

        # Output information on discrepancy and uniqueness if flag is set
        if verbose:
            print(
                "Current discrepancy target is %g, number of unique particles is %g"
                % (target_D, N_unique))

        # Check to see if the desired discrepancy target has been reached, and
        # terminate the loop if so. The structure of the loop means that a
        # mutation step will happen after the final resample, so we expect a
        # full sample of N_parts particles all satisfying the desired
        # discrepancy constraint
        print("Discrepency differnce: %.2f\n" % (desired_D - target_D, ))
        if target_D < desired_D:
            looping = 0

        #Also check to see if degeneracy is occurring - this happens when the
        # MCMC steps fail to find new locations and thus remain copies of
        # previous particles. This also terminates the loop when it becomes too
        # severe, but with additional output of a warning
        #print("Degeneracy differnce: %.2f\n" % (N_unique-N_parts))

        if N_unique <= (N_parts / 2):
            print(
                "WARNING: SMC loop terminated due to particle degeneracy. Target not reached! \n"
            )
            looping = 0
        '''
        # Visualise Particles if flag is set
        if visualise
            
            % Call provided plotting function
            f_visualise(part_thetas, part_outputs, part_summaries, part_Ds);
            drawnow;
            
        end
        '''
        '''
        #Memory and runtime safety features 
        mem_used = psutil.virtual_memory()[2]
        print('memory % used:', mem_used)
        if mem_used > 90:
            print('WARNING: High memory usage.  Aborting!\n')
            looping = 0
        if time.time()>max_runtime:
            #print('WARNING: Runtime exceeded.  Aborting!')
            looping = 0
        testitcount+=1
        '''
        break
    # Close the parallel pool now that it's use is finished
    pool.close()
    pool.join()
    del pool

    #Sort final unique output
    ordering = np.argsort(part_Ds)
    part_thetas = np.copy(np.unique(part_thetas[ordering, :], axis=0))
    _, idx = np.unique(part_thetas, axis=0, return_index=True)

    part_thetas = np.copy(part_thetas[idx, :])
    part_Ds = np.copy(part_Ds[idx])
    part_outputs = np.copy(np.asarray(part_outputs[idx, :, :], dtype='f8'))
    part_summaries = np.copy(part_summaries[idx, :])

    print("Best discrepancy: %.2f, Worst discrepancy: %.2f" %
          (part_Ds[0], part_Ds[-1]))

    print('SMCABC COMPLETE\n')
    #print only unique output
    return part_thetas, part_outputs, part_summaries, part_Ds
Exemplo n.º 24
0
H_lims = np.asarray([0.025, 0.1])  # Disc aspect ratio
iH = 7
Md_lims = np.asarray([0.01, 0.1])  # Disc mass
iMd = 8

headers = 'Rp,Mpl,a,Ms1,Ms2,alpha,pindex,H,Mdisc'  #column headers for csv later

lims = [
    Rp_lims, Mpl_lims, a_lims, Ms1_lims, Ms2_lims, alpha_lims, pindex_lims,
    H_lims, Md_lims
]
nparams = len(lims)

nsamp = 10

lhs = np.asarray(lhsmdu.sample(nparams,
                               nsamp))  # perform latin hypercube sampling

# scale random variables to be within desired limits
for i, lim in enumerate(lims):
    lhs[i, :] = lhs[i, :] * (lim[1] - lim[0]) + lim[0]
    lhs[i, :] = lhs[i, :]

# calculate required grid size and timestep vals
grid_rad = lhs[iRp, :] + 10  # 10 au greater than the planet location
torb = 2 * np.pi * grid_rad**(3. / 2.)  # orbital period at grid outer radius
dt = torb / 100.
tvisc = 4. / 9. * (grid_rad**2 / lhs[ialpha, :]**2 / lhs[iH, :]**2) * np.sqrt(
    grid_rad**3 / lhs[iMs1, :])

# calc sigma0 from disc mass and pindex
# NOTE: this assumes this disc mass extent is 100au NOT just the size of the grid
Exemplo n.º 25
0
# with multi-dimensional uniformity. The following Python code calls two Latin Hypercube functions
# where n data points are plotted on a square of side length L.
# The grid and data set is color coded. Note: 
# The authors of the following code would use the Latin Hypercube function in sequence with
# The Seer, to plot an unbiased amount of data points to train The Seer's neural network.
# The origin of The Seer's test envrionment would be at 2.75 meters and 
# due to the radiation patterns and hardware implemented within The Seer.
# The following code would account for this shift of origin from (0,0) to
# (2.75,0), as well as a 0.5 meter padding space needed between The Seer's receiving antenna array 
# and the closest test data points.


# Set number of sample points:
n = 100

k = lhsmdu.sample(2,n) # Latin Hypercube Function  with multi-dimensional uniformity
# The project only utilized 2D but more dimensions could be included, check out 
# the lhsmdu project at pypi.org/project/lhsmdu/
k = np.array(k*5) # Set grind size or dimensions of of testing environment for first Latin Hypercube function

# Uncomment to include a second set of LHS points
#randSeed = 11 # Create random Latin Hypercube seed to have two unbiased data sets and samples
# The random seed does not need to be LHS, other methods are available.
#l = lhsmdu.sample(2,50, randomSeed=randSeed) #Second Latin Hypercube Function with (2D) uniformity
#l = np.array(l*5.5) #Set grind size or dimensions of testing environment for second Latin Hypercube function

fig = plt.figure()
ax = fig.gca()
ax.set_xticks(np.arange(0,7,.5)) # Set the number of ticks on the axis and the spacing between them
ax.set_yticks(np.arange(0,7,.5)) # Set the number of ticks on the axis and the spacing between them
Exemplo n.º 26
0
import numpy as np
import pandas as pd

from pythongp.test_functions import test_functions
import lhsmdu

lower = [100, 1000, 1000, 10, 10, 10, 10, 10]
higher = [10000, 10000, 10000, 1000, 1000, 1000, 1000, 1000]
lower = np.array(lower)
higher = np.array(higher)

d = 8

doe = np.array(np.transpose(lhsmdu.sample(d, d * 3)))

doe = doe * np.tile(higher - lower, (24, 1)) + np.tile(lower, (24, 1))

results = test_functions.g_10(doe)

doe = pd.DataFrame(doe, columns=['x{}'.format(i) for i in range(8)])

pd.concat((doe, results), axis=1).to_csv('/Users/sebastien/data/data_g10',
                                         sep=';')
Exemplo n.º 27
0
import lhsmdu
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy

l = lhsmdu.sample(
    3, 10)  # Latin Hypercube Sampling of two variables, and 10 samples each.
k = lhsmdu.createRandomStandardUniformMatrix(3, 10)  # Monte Carlo Sampling
pdir = numpy.matrix(
    '55 55 115 55 115 55 115 80 70 60 100 84 63 108 73 94 85 63 98 112')
H = numpy.matrix(
    '1.2 1.2 1.2 2.3 2.3 2.3 2.3 .7 1.2 1.7 1.4 1.1 2.1 .9 2.0 1.5 1.9 1.6 2.1 1.8'
)
F = numpy.matrix(
    '.091 .125 .125 .167 .167 .091 .091 .091 .091 .140 .11 .131 .152 .116 .097 .149 .162 .144 .138 .169'
)
k[0] = 60 * k[0] + 55
l[0] = 60 * l[0] + 55
k[1] = 2.5 * k[1]
l[1] = 2.5 * l[1]
k[2] = .1 * k[2] + .085
l[2] = .1 * l[2] + .085

fig = plt.figure()
ax = fig.add_subplot(131, projection='3d')
ax.scatter([k[0]], [k[1]], [k[2]], color="b", label="MC")
ax.scatter([l[0]], [l[1]], [l[2]], color="r", label="LHS")
ax.scatter([pdir], [H], numpy.transpose(F), color="g", label="OURS")
ax.set_xlabel('Peak Dir')
ax.set_ylabel('Wave Height')
ax.set_zlabel('Frequency')
Exemplo n.º 28
0
def GeneticAlgorithm(objFunc,
                     consFunc,
                     bndsArr,
                     halfpop=10,
                     maxit=100,
                     tol=1e-6,
                     mutationChance=0.005,
                     mutationScale=0.05,
                     verbose=0):
    #objFunc is the objective function
    #consFunc is the constraing function
    #The constraint function must be of the form g(x) < 0, where x may be
    #a vector
    #bndsArr is the bounds array
    #bndsArr has the form (# vars, 2)
    #For example    [[1, 2]
    #                [3, 4]]
    #Means 2 variables. x1 is bounded by 1, 2, and x2 is bounded by 3, 4
    #halfpop is half the population size. Half the population is used to
    #ensure parity
    #maxit is maximum iterations
    #tol is tolerance per iteration. From one iteration to the next, if the change
    #of the objective function is less than the tolerance, the algorithm
    #will automatically terminate.
    #mutationChance is the chance of a mutation occurring. It defaults to 0.5%
    #mutationScale is the scale of the mutation, scale * (-1, 1)

    #Establishes the total population
    totPop = halfpop * 2

    #Establishes the number of variables
    numVars = len(bndsArr)

    #Ensures the objective function is callable
    if (not callable(objFunc)):
        print("Error, objective function not specified correctly.")
        return 0

    #Ensures the constraint function is callable
    elif (not callable(consFunc)):
        print("Error, constraint function is not specified correctly.")
        return 0
    #Ensures the total population is an integer
    elif (not isinstance(totPop, int)):
        print("Error, population size specified as non-integer.")
        return 0

    #Ensures the maximum iterations is an integer
    elif (not isinstance(maxit, int)):
        print("Error, maximum iterations specified as non-integer.")
        return 0

    #Ensures the tolerance is an integer or decimal
    elif ((not isinstance(tol, float)) and (not isinstance(tol, int))):
        print("Error, tolerance not specified as a number.")

    #Ensures the bounds array is the proper shape of (#vars, 2)
    elif (np.shape(bndsArr) != (numVars, 2)):
        print(
            "Error, bounds array is not correct size. Should be of size (x , 2)"
        )
        return 0

    #If all the above goes through, then it will execute the Genetic Algorithm
    else:
        print("Initialization success.")
        print("Creating initial population with LHS...")

        #Initializes an LHS array of size (#vars, total population)
        LHSarr = lhsmdu.sample(numVars, totPop)

        #Initializes an initial population of zeros of size
        #(total population, #vars)
        InitPop = np.zeros((totPop, numVars))

        #Reassigns each population member with the LHS sample to between the
        #lower bounds and upper bounds
        for i in range(0, totPop):  #i is population member index
            for j in range(0, numVars):  #j is variable number
                InitPop[i, j] = LHSarr[j, i] * (bndsArr[j, 1] -
                                                bndsArr[j, 0]) + bndsArr[j, 0]
        #The results have the form (Population Member Index, Variable)

        if (verbose == 1):
            print("Initial population established.\n")
            print("Testing initial population...")

        currentEval = np.zeros(totPop)
        for i in range(0, totPop):
            currentEval[i] = ObjectiveFunction(InitPop[i, :])

        minIndex = np.argmin(currentEval)
        minVal = min(currentEval)
        print("--------------------------------------------------------------")
        print("Initial Population Evaluation")
        print("The best objective function evaluation is ", minVal)
        print("It ocurrs at population member ", minIndex)
        print("--------------------------------------------------------------")

        prevGenObj = minVal

        for k in range(0, maxit):

            if (verbose == 1):
                print("Creating contest population.")

            #Creates a copy of the initial population for the contest population
            ContestPopulation = np.copy(InitPop)

            #Creates an array that will be populated with indices for the contest
            ContestIndexArray = np.zeros((totPop, 2), dtype=int)

            #Creates an array of available contest indices
            AvailableContestIndices = np.linspace(0,
                                                  totPop - 1,
                                                  totPop,
                                                  dtype=int)

            #Assigns available contest indices randomly to the contest index array.
            #Each population member is entered into the contest twice,
            #so during the breeding phase the population size is maintained.
            for i in range(0, halfpop):
                for j in range(0, 2):
                    select = random.randrange(0, len(AvailableContestIndices))
                    ContestIndexArray[i, j] = AvailableContestIndices[select]
                    AvailableContestIndices = np.delete(
                        AvailableContestIndices, select)

            #Reestablishes the available contest indices for the second pairing
            AvailableContestIndices = np.linspace(0,
                                                  totPop - 1,
                                                  totPop,
                                                  dtype=int)

            for i in range(0, halfpop):
                for j in range(0, 2):
                    select = random.randrange(0, len(AvailableContestIndices))
                    ContestIndexArray[i + halfpop,
                                      j] = AvailableContestIndices[select]
                    AvailableContestIndices = np.delete(
                        AvailableContestIndices, select)

            EvaluatedConstraints = np.zeros(totPop)
            EvaluatedObjectives = np.zeros(totPop)

            for i in range(0, totPop):
                EvaluatedConstraints[i] = consFunc(ContestPopulation[i, :])
                EvaluatedObjectives[i] = objFunc(ContestPopulation[i, :])

            ContestWinnerArr = np.zeros(totPop, dtype=int)

            if (verbose == 1):
                print(ContestIndexArray)

            for i in range(0, totPop):
                constraint1 = EvaluatedConstraints[ContestIndexArray[i, 0]]
                constraint2 = EvaluatedConstraints[ContestIndexArray[i, 1]]
                if ((constraint1 > 0) and (constraint2 > 0)):
                    if (constraint1 < constraint2):
                        ContestWinnerArr[i] = ContestIndexArray[i, 0]
                    else:
                        ContestWinnerArr[i] = ContestIndexArray[i, 1]
                elif ((constraint1 < 0) and (constraint2 > 0)):
                    ContestWinnerArr[i] = ContestIndexArray[i, 0]
                elif ((constraint1 > 0) and (constraint2 < 0)):
                    ContestWinnerArr[i] = ContestIndexArray[i, 1]
                else:
                    evaluate1 = EvaluatedObjectives[ContestIndexArray[i, 0]]
                    evaluate2 = EvaluatedObjectives[ContestIndexArray[i, 1]]
                    if (evaluate1 < evaluate2):
                        ContestWinnerArr[i] = ContestIndexArray[i, 0]
                    else:
                        ContestWinnerArr[i] = ContestIndexArray[i, 1]
            if (verbose == 1):
                print("Contest finished.")
                print("Winner indices: ", ContestWinnerArr)

                print("Creating breeding pairs.")

            AvailableBreeders = np.copy(ContestWinnerArr)
            BreedingIndices = np.zeros((halfpop, 2), dtype=int)

            for i in range(0, halfpop):
                for j in range(0, 2):
                    select = random.randrange(0, len(AvailableBreeders))
                    BreedingIndices[i, j] = AvailableBreeders[select]
                    AvailableBreeders = np.delete(AvailableBreeders, select)

            #Breed
            for i in range(0, halfpop):
                InitPop[i] = 0.5 * ContestPopulation[BreedingIndices[i,0]] + \
                    0.5 * ContestPopulation[BreedingIndices[i,1]]
                InitPop[i + halfpop] = 2 * ContestPopulation[BreedingIndices[i,1]] - \
                    ContestPopulation[BreedingIndices[i,0]]

            for i in range(0, totPop):
                mutate = random.random()
                if (mutate < mutationChance):
                    if (verbose == 1):
                        print("Mutation.")
                    mutationAmount = mutationScale * (2. * random.random() -
                                                      1.)
                    InitPop[i] += mutationAmount

            #Evaluate change in best value

            currentEval = np.zeros(totPop)
            for i in range(0, totPop):
                currentEval[i] = ObjectiveFunction(InitPop[i, :])

            minIndex = np.argmin(currentEval)
            minVal = min(currentEval)
            print(
                "--------------------------------------------------------------"
            )
            print("Iteration ", k + 1)
            print("The best objective function evaluation is ", minVal)
            print("It ocurrs at population member ", minIndex)
            print(
                "--------------------------------------------------------------"
            )

            currGenObj = minVal

            if (abs(prevGenObj - currGenObj) <= tol):
                print("Tolerance reached. Aborting algorithm.")
                print("Best results found at ", InitPop[minIndex])
                print("The value was: ", minVal)
                return 0
            else:
                prevGenObj = currGenObj

        print("Maximum iterations reached.")
    return 0
Exemplo n.º 29
0
def runtime(numDimensions, numSamples):
    ''' Checks runtime using standard variables '''
    start_time = time()
    m = lhsmdu.sample(numDimensions,numSamples)
    end_time = time()
    print end_time-start_time
Exemplo n.º 30
0
    for key in solvents:
        s1, s2 = solvents[key][0], solvents[key][1]
        distance = (x1 - s1)**2 + (x2 - s2)**2
        dis_dict[distance] = key
        dis_list[count] = distance
        count += 1
    min_dist = min(dis_list)
    return dis_dict[min_dist]


# read in solvents and their polarity and mbo
sol = parse_data.solvent_parser('data_solvent.csv')
# number of initial samples
n_sample = 10
# sample uniformly from [0,1]^4
samples = lhsmdu.sample(4, n_sample)
# first two dimensions are {1,2,3}
samples[0] = np.floor(samples[0] * 3)
samples[1] = np.floor(samples[1] * 3)
# third dim is (0.6,1.4)
samples[2] = [0.6 for i in range(n_sample)] + \
    samples[2] * (1.4-0.6)
# fourth dim is (5, 60)
samples[3] = [5. for i in range(n_sample)] + \
    samples[3] * (60 - 5.)
# map samples to solution combo
solutions = [[0 for j in range(3)] for i in range(n_sample)]
for i in range(n_sample):
    # halides
    if samples[0, i] == 0:
        solutions[i][0] = 'I'
Exemplo n.º 31
0
    if any(M_model) < np.min(range_M):
        squared_sum += 100

    # Step 5: Calculate cost-function
    squared_sum = np.sum((yG_model - yG_vec))**2 + np.sum(
        (yI_model - yI_vec)**2)

    return squared_sum


## Hypercube set up
randSeed = 2  # random number of choice
lhsmdu.setRandomSeed(
    randSeed)  # Latin Hypercube Sampling with multi-dimensional uniformity
start = np.array(
    lhsmdu.sample(18, 4)
)  # Latin Hypercube Sampling with multi-dimensional uniformity (parameters, samples)

para, samples = start.shape

## intervals for the parameters
para_int = [0, 500]

minimum = (np.inf, None)

# Bounds for the model
bound_low = np.array(
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1])
bound_upp = np.repeat(np.inf, para)
bounds = Bounds(bound_low, bound_upp)
    if any(F_model < np.min(range_F)):
        squared_sum += penalty

    # Calculate cost-function
    squared_sum += np.sum((yG_model - yG_vec)**2) + np.sum(
        (yI_model - yI_vec)**2)

    return squared_sum


## Hypercube set up
randSeed = 2  # random number of choice
lhsmdu.setRandomSeed(
    randSeed)  # Latin Hypercube Sampling with multi-dimensional uniformity
start = np.array(
    lhsmdu.sample(8, 10)
)  # Latin Hypercube Sampling with multi-dimensional uniformity (parameters, samples)

para, samples = start.shape

## intervals for the parameters
para_int = [0, 0.001, 0.01, 0.1, 1, 10, 100, 500]
#   0,   1,    2,    3,  4,  5,  6,   7
minimum = (np.inf, None)

# Bounds for the model
bound_low = np.repeat(0.0, para)
bound_upp = np.repeat(np.inf, para)
bounds = Bounds(bound_low, bound_upp)

fig = plt.figure()