Exemplo n.º 1
0
	def _optimize(self, acquisition: Acquisition , context_manager: ContextManager) -> Tuple[np.ndarray, np.ndarray]:
		"""
		See AcquisitionOptimizerBase._optimizer for parameter descriptions.

		Optimize an acqusition function using a GA

		"""
		# initialize population of strings
		random_design = RandomDesign(self.space)
		population = random_design.get_samples(self.population_size)
		# clac fitness for current population
		fitness_pop = acquisition.evaluate(population)
		standardized_fitness_pop = fitness_pop / sum(fitness_pop)
		# initialize best location and score so far
		X_max = population[np.argmax(fitness_pop)].reshape(-1,1) 
		acq_max = np.max(fitness_pop).reshape(-1,1) 
		iteration_bests=[]
		_log.info("Starting local optimization of acquisition function {}".format(type(acquisition)))
		for step in range(self.num_evolutions):
			_log.info("Performing evolution step {}".format(step))
			# evolve populations
			population = self._evolve(population,standardized_fitness_pop)
			# recalc fitness
			fitness_pop = acquisition.evaluate(population)
			standardized_fitness_pop = fitness_pop / sum(fitness_pop)
			# update best location and score (if found better solution)
			acq_pop_max = np.max(fitness_pop)
			iteration_bests.append(acq_pop_max)
			_log.info("best acqusition score in the new population".format(acq_pop_max))
			if acq_pop_max > acq_max[0][0]:
				acq_max[0][0] = acq_pop_max
				X_max[0] = population[np.argmax(fitness_pop)]
		# if dynamic then keep running (stop when no improvement over most recent 10 iterations)
		stop = False
		i=self.num_evolutions
		while not stop:
			_log.info("Performing evolution step {}".format(step))
			# evolve populations
			population = self._evolve(population,standardized_fitness_pop)
			# recalc fitness
			fitness_pop = acquisition.evaluate(population)
			standardized_fitness_pop = fitness_pop / sum(fitness_pop)
			# update best location and score (if found better solution)
			acq_pop_max = np.max(fitness_pop)
			iteration_bests.append(acq_pop_max)
			_log.info("best acqusition score in the new population".format(acq_pop_max))
			if acq_pop_max > acq_max[0][0]:
				acq_max[0][0] = acq_pop_max
				X_max[0] = population[np.argmax(fitness_pop)]
			if acq_max[0][0]==max(iteration_bests[:-10]):
				stop=True
			# also stop if ran for 100 evolutions in total
			if i==100:
				stop=True
			i+=1


				
		# return best solution from the whole optimization
		return X_max, acq_max
Exemplo n.º 2
0
def test_multi_fidelity_function_shapes(fcn):
    n_points = 10
    fcn, space = fcn()
    random = RandomDesign(space)
    samples = random.get_samples(n_points)

    # There are only 2 or 3 fidelity functions in set of functions we are testing
    n_fidelities = len(space.parameters[-1].domain)
    if n_fidelities == 2:
        samples[:5, -1] = 0
        samples[5:, -1] = 1
    elif n_fidelities == 3:
        samples[:5, -1] = 0
        samples[5:8, -1] = 1
        samples[8:, -1] = 2
    else:
        raise ValueError('Please add a case for functions with {:.0f} fidelity levels'.format(n_fidelities))

    # Check shapes when calling through function wrapper
    results = fcn.evaluate(samples)
    assert len(results) == n_points
    for result in results:
        assert result.Y.shape == (1,)

    # Also check shape when calling each fidelity function individually
    for f in fcn.f:
        assert f(samples[:, :-1]).shape == (n_points, 1)
Exemplo n.º 3
0
def test_multi_source_batch_experimental_design():
    objective, space = multi_fidelity_forrester_function()

    # Create initial data
    random_design = RandomDesign(space)
    x_init = random_design.get_samples(10)
    intiial_results = objective.evaluate(x_init)
    y_init = np.array([res.Y for res in intiial_results])

    # Create multi source acquisition optimizer
    acquisition_optimizer = GradientAcquisitionOptimizer(space)
    multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(
        acquisition_optimizer, space)

    # Create GP model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    # Create acquisition
    acquisition = ModelVariance(model)

    # Create batch candidate point calculator
    batch_candidate_point_calculator = GreedyBatchPointCalculator(
        model, acquisition, multi_source_acquisition_optimizer, batch_size=5)

    initial_loop_state = LoopState(intiial_results)
    loop = OuterLoop(batch_candidate_point_calculator,
                     FixedIntervalUpdater(model, 1), initial_loop_state)

    loop.run_loop(objective, 10)
    assert loop.loop_state.X.shape[0] == 60
Exemplo n.º 4
0
def create_model_free_designs(space: ParameterSpace):
    return [RandomDesign(space), LatinDesign(space), SobolDesign(space)]
Exemplo n.º 5
0
config_details = {
    0: {
        "name": "p_under18",
        "config": "case_config"
    },
    1: {
        "name": "compliance",
        "config": "policy_config"
    }
}

# Run the simulation a number of times to get some datapoints for the emulator
from emukit.core.initial_designs import RandomDesign

design = RandomDesign(space)
x = design.get_samples(100)
# NB this takes a while to run
y = np.array([simulation(k, config_details)['Effective R']
              for k in x])[:, np.newaxis]

# Use GP regression as the emulator
from GPy.models import GPRegression
from emukit.model_wrappers import GPyModelWrapper
from emukit.sensitivity.monte_carlo import MonteCarloSensitivity

model_gpy = GPRegression(x, y)
model_emukit = GPyModelWrapper(model_gpy)
model_emukit.optimize()

# Run Monte Carlo estimation of Sobol indices on the emulator