コード例 #1
0
def test_lhs_random_state(criterion):
    n_dim = 2
    n_samples = 20
    lhs = Lhs()

    h = lhs._lhs_normalized(n_dim, n_samples, 0)
    h2 = lhs._lhs_normalized(n_dim, n_samples, 0)
    assert_array_equal(h, h2)
    lhs = Lhs(criterion=criterion, iterations=100)
    h = lhs.generate([
        (0., 1.),
    ] * n_dim, n_samples, random_state=0)
    h2 = lhs.generate([
        (0., 1.),
    ] * n_dim, n_samples, random_state=0)
    assert_array_equal(h, h2)
コード例 #2
0
def main():
    args = getArgumentParser().parse_args()

    space = [
        skopt.space.Real(args.mzp_min,
                         args.mzp_max,
                         name='mzp',
                         prior='uniform'),
        skopt.space.Real(args.mdh_min,
                         args.mdh_max,
                         name='mdh',
                         prior='uniform'),
        skopt.space.Real(args.mdm_min,
                         args.mdm_max,
                         name='mdm',
                         prior='uniform'),
        skopt.space.Real(args.gx_min,
                         args.gx_max,
                         name='g',
                         prior='log-uniform')
    ]

    # sample data
    lhs = Lhs(lhs_type="centered", criterion=None)
    x = np.array(lhs.generate(space, args.n_samples, random_state=42))

    # set up dataframe for writing to file
    df = pd.DataFrame(data=x, columns=["mzp", "mdh", "mdm", "g"])
    df.index.rename('dsid', inplace=True)
    df.index = np.arange(args.dsid_start, args.dsid_start + len(df))

    # write data to file
    df.to_csv('./grid_hypercube.csv', header=False)
コード例 #3
0
    def solve(self):
        """
        Wrapper function for scipy.optimize.differential_evolution
        """
        progress = []
        def cb(xk, convergence):
            progress.append(self.fun(xk))

        # initialize number of points = popsize
        space = Space([(0.,1.)]*len(self.bounds))
        lhs = Lhs()
        pop = np.asarray(lhs.generate(space.dimensions, self.popsize))
        
        min_b, max_b = np.asarray(self.bounds).T
        diff = max_b - min_b
        pop = min_b + pop * diff
        
        progress.append(np.min(np.apply_along_axis(self.fun, 1, pop)))
        
        result = scipy_de(self.fun, self.bounds, popsize=1, maxiter = self.maxiter, tol=0.0001, disp=self.disp, callback=cb, init=pop)
        self.x = result.x
        self.fx = result.fun
        f_calls = (np.arange(1,len(progress)+1)) * self.popsize
        self.converge_data = np.vstack((f_calls, np.asarray(progress)))
        self.solved = True
コード例 #4
0
ファイル: lhs.py プロジェクト: DaanMoll/Stochasticsim
def lhs_sample(n_samples):
    """
    Takes random n_samples with the lhs method.
    Returns array x and y.
    """
    x = np.array([])
    y = np.array([])

    # Makes the space of points which van be chosen from
    space = Space([(-2., 1.), (-1.5, 1.5)])

    # Chooses which kind oh lhs will be used
    lhs = Lhs(lhs_type="classic", criterion=None)

    coordinates = 0
    # Generates n_samples withhi the chosen space
    coordinates = lhs.generate(space.dimensions, n_samples)

    # appends all x and y values to array
    for coordinate in coordinates:
        a = coordinate[0]
        x = np.append(x, a)
        b = coordinate[1]
        y = np.append(y, b)

    return x, y
コード例 #5
0
def test_lhs_criterion(lhs_type, criterion):
    lhs = Lhs(lhs_type=lhs_type, criterion=criterion, iterations=100)
    samples = lhs.generate([
        (0., 1.),
    ] * 2, 200)
    assert len(samples) == 200
    assert len(samples[0]) == 2
    samples = lhs.generate([("a", "b", "c")], 3)
    assert samples[0][0] in ["a", "b", "c"]

    samples = lhs.generate([("a", "b", "c"), (0, 1)], 1)
    assert samples[0][0] in ["a", "b", "c"]
    assert samples[0][1] in [0, 1]

    samples = lhs.generate([("a", "b", "c"), (0, 1)], 3)
    assert samples[0][0] in ["a", "b", "c"]
    assert samples[0][1] in [0, 1]
コード例 #6
0
def test_lhs_pdist():
    n_dim = 2
    n_samples = 20
    lhs = Lhs()

    h = lhs._lhs_normalized(n_dim, n_samples, 0)
    d_classic = spatial.distance.pdist(np.array(h), 'euclidean')
    lhs = Lhs(criterion="maximin", iterations=100)
    h = lhs.generate([
        (0., 1.),
    ] * n_dim, n_samples, random_state=0)
    d = spatial.distance.pdist(np.array(h), 'euclidean')
    assert np.min(d) > np.min(d_classic)
コード例 #7
0
 def __init__(self, bounds, method='latin'):
     self.dimensions = len(bounds)
     self.position = np.empty(self.dimensions)
     self.velocity = np.zeros(self.dimensions)
     self.pbest_position = np.empty(self.dimensions)
     self.pbest_value = None
     self.lowerbounds, self.upperbounds = np.asarray(bounds).T
     
     # initialize positions
     if method == 'random':
         position = np.random.rand(self.dimensions)
     elif method == 'latin':
         space = Space([(0.,1.)]*self.dimensions)
         lhs = Lhs()
         position = np.asarray(lhs.generate(space.dimensions,1))[0]
         
     min_b, max_b = np.asarray(bounds).T
     diff = max_b - min_b
     self.position = min_b + position * diff
コード例 #8
0
 def draw_latin_hypercube_samples(self, num_samples: int) -> list:
     """ Draws an LHS-distributed sample from the search space """
     if self.searchspace_size < num_samples:
         raise ValueError("Can't sample more than the size of the search space")
     if self.sampling_crit is None:
         lhs = Lhs(lhs_type="centered", criterion=None)
     else:
         lhs = Lhs(lhs_type="classic", criterion=self.sampling_crit, iterations=self.sampling_iter)
     param_configs = lhs.generate(self.dimensions(), num_samples)
     indices = list()
     normalized_param_configs = list()
     for i in range(len(param_configs) - 1):
         try:
             param_config = self.normalize_param_config(param_configs[i])
             index = self.find_param_config_index(param_config)
             indices.append(index)
             normalized_param_configs.append(param_config)
         except ValueError:
             """ Due to search space restrictions, the search space may not be an exact cartesian product of the tunable parameter values.
             It is thus possible for LHS to generate a parameter combination that is not in the actual searchspace, which must be skipped. """
             continue
     return list(zip(normalized_param_configs, indices))
コード例 #9
0
#############################################################################
# Sobol'
# ------

sobol = Sobol()
x = sobol.generate(space.dimensions, n_samples)
plot_searchspace(x, "Sobol'")
pdist_data.append(pdist(x).flatten())
x_label.append("sobol'")

#############################################################################
# Classic Latin hypercube sampling
# --------------------------------

lhs = Lhs(lhs_type="classic", criterion=None)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'classic LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("lhs")

#############################################################################
# Centered Latin hypercube sampling
# ---------------------------------

lhs = Lhs(lhs_type="centered", criterion=None)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'centered LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("center")

#############################################################################
コード例 #10
0

omega_m_limits = [0.2,0.4]
h_limits = [0.6,0.8]
sigma_8_limits = [0.7,0.8]

f_esc_limits = [0.01,1.]
C_ion_limits = [0.,1.]
D_ion_limits = [0.,2.]

limits = np.array([omega_m_limits,h_limits,sigma_8_limits,f_esc_limits,C_ion_limits,D_ion_limits])

lhs = Lhs(lhs_type="classic", criterion=None)

np.random.seed(123456789)
omega_m, h, sigma_8, f_esc, C_ion, D_ion = np.array(lhs.generate(limits, n_samples= 1000)).T

file = open("simfast21.ini")
ini_file = file.readlines()
file.close()


for i in range(len(omega_m)):

    dirname = 'runs/run'+str(i)
    os.system('mkdir '+dirname)

    fname = dirname+'/simfast21.ini'

    make_ini_file(ini_file, fname, omega_m = omega_m[i], h = h[i], sigma_8=sigma_8[i],
                  C_ion=C_ion[i], D_ion=D_ion[i], f_esc=f_esc[i])
コード例 #11
0
class Sampler_SkoptSampler(Sampler):
    """ Sampler using skopt.sampler class

    Attributes
    ----------

    """
    def __init__(self, samplerDict):
        """Sampler_SkoptSampler class constructor

        Parameters
        ----------

        """

        Sampler.__init__(self, samplerDict)

        # type
        self.type = "SkoptSampler"

        # method: LHS, Sobol, Halton, and Hammersly
        self.method = None

        # dictionary for the options of the particular method
        self.options = {}

        # load sampler configuration from dictionary
        self.load_from_sampler_dict()

        # create the sampler
        self.lhs = None

        self.create_sampler()

    def load_from_sampler_dict(self):
        """ Load configuration of the sampler from the dictionary

        Returns
        -------

        """

        #get the method: e.g., lhs
        self.method = self.samplerDict['method']

        #load options
        self.lhs_type = "classic"
        if "lhs_type" in self.samplerDict:
            self.lhs_type = self.samplerDict["lhs_type"]

        self.criterion = "maximin"
        if "criterion" in self.samplerDict:
            self.criterion = self.samplerDict["criterion"]

        self.iterations = 1000
        if "iterations" in self.samplerDict:
            self.iterations = self.samplerDict["iterations"]

    def create_sampler(self):
        """
        Create the sampler

        Returns
        -------

        """

        if self.method == "lhs":
            self.lhs = Lhs(lhs_type=self.lhs_type,
                           criterion=self.criterion,
                           iterations=self.iterations)
        else:
            raise Exception("The specified sampling method ", self.method,
                            "is not supported.")

    def generate(self, space, n_samples):
        """
        Generate and return samples

        Parameters
        ----------
        space: Space object
        n_samples: int
            number of samples to draw

        Returns
        -------

        array[n_samples, n_parameters]

        """

        return self.lhs.generate(space.dimensions, n_samples)
コード例 #12
0
 def _generate(self):
     lhs = Lhs(criterion=self.criterion, iterations=self.iterations)
     X = lhs.generate(self.search_dims, self.size, random_state=self.rng)
     return X
コード例 #13
0
ファイル: BO_open_config_T.py プロジェクト: bkhoutom/CHEM449
repeat = 1

for G, inv_G in enumerate(inv_G_ls):
    y = lambda site_w: OpenQT(s,
                              d,
                              np.array(site_w[:(s - 2) * d]),
                              np.array(site_w[(s - 2) * d:]),
                              Gamma=1 / inv_G,
                              n_p=3).T_r(epabs=0.001)[0]  # object function
    note = "inv_Gamma = {0}T".format(inv_G / T)
    print(note)
    filename = '{0}s_{1}d_job_{2}_{3}invG_kappa_0.01.csv'.format(
        s, d, job, inv_G / T)
    for itr in range(repeat):
        lhs = Lhs(lhs_type="classic", criterion=None)
        X_init = lhs.generate(bound, num_init)
        print(X_init)
        Y_init = np.array([y(X_i) for X_i in X_init])
        print(Y_init)

        # Run BO
        r = gp_minimize(
            y,  # negative for maximize; positive for minimize
            bound,
            base_estimator=gpr,
            acq_func='LCB',  # expected improvement
            kappa=0.01,
            # xi=0.01,          # exploitation-exploration trade-off
            # acq_optimizer="sampling", # for the periodic kernel
            n_calls=num_itr,  # number of iterations (s-2)*d*100
            n_random_starts=-num_init,  # initial samples are provided
コード例 #14
0
def test_lhs_centered():
    lhs = Lhs(lhs_type="centered")
    samples = lhs.generate([
        (0., 1.),
    ] * 3, 3)
    assert_almost_equal(np.sum(samples), 4.5)
コード例 #15
0
    def solve(self):
        """
        Solves the optimization problem through simulated annealing algorithm
        """
        
        # provide an initial state and temperature
        time = 0
        current_temp = self.initial_temp

        space = Space([(0.,1.)]*len(self.bounds))
        lhs = Lhs()
        current_state = np.asarray(lhs.generate(space.dimensions, self.popsize))
        min_b, max_b = np.asarray(self.bounds).T
        diff = max_b - min_b
        current_state = min_b + current_state * diff
    
        # evaluate current state
        energy = np.apply_along_axis(self.fun, 1, current_state)
        best_energy = np.min(energy)
        best_state = current_state[np.argmin(energy)]
        evals = self.popsize
        
        # variables for storing progress data
        progress = []
        
        for i in range(self.maxiter):
            for j in range(len(current_state)):
                # generate a new state, randomly chosen neighbour of state
                if self.get_neighbor == 'cauchy':
                    neighbor = SimulatedAnnealing.get_neighbor_cauchy(current_state[j], diff, min_b, max_b, current_temp, self.qv)
                else:
                    neighbor = SimulatedAnnealing.get_neighbor_normal(current_state[j], diff, self.initial_temp, current_temp)
                
                # evaluate new neighbor
                energy_neighbor = self.fun(neighbor)
                delta = energy_neighbor - energy[j]
                evals += 1
           
                if delta < 0:
                    current_state[j] = neighbor
                    energy[j] = energy_neighbor
                    if energy[j] < best_energy:
                        best_energy = energy[j]
                        best_state = current_state[j]
                else:
                    if np.random.rand() < np.exp(-delta/current_temp):
                        current_state[j] = neighbor
                        energy[j] = energy_neighbor
            
            progress.append(best_energy)
            
            time += 1
            current_temp = self.temp_func(self.initial_temp, current_temp, time, self.qv)
            
            if self.disp:
                print(f"simulated annealing step {i}: f(x)= {best_energy}")
                
            if evals > self.max_eval:
                break
        
        f_calls = np.arange(1, i+2) * self.popsize
        self.converge_data = np.vstack((f_calls, np.asarray(progress)))
        self.solved = True
        self.x = best_state
        self.fx = best_energy