def main(): name = 'L2L-FUN-FACE' experiment = Experiment("../results/") trajectory_name = name traj, all_jube_params, = experiment.prepare_experiment(name=name, trajectory_name=trajectory_name, log_stdout=True) ## Benchmark function function_id = 4 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) ## Outerloop optimizer initialization parameters = FACEParameters(min_pop_size=20, max_pop_size=50, n_elite=10, smoothing=0.2, temp_decay=0, n_iteration=1, distribution=Gaussian(), n_expand=5, stop_criterion=np.inf, seed=109) optimizer = FACEOptimizer(traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-0.1,), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) experiment.run_experiment(optimizer=optimizer, optimizee=optimizee, optimizer_parameters=parameters, optimizee_parameters=None) experiment.end_experiment(optimizer)
def main(): name = 'L2L-FUN-GS' experiment = Experiment(root_dir_path='../results') traj, _ = experiment.prepare_experiment(name=name, log_stdout=True) ## Benchmark function function_id = 4 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) ## Outerloop optimizer initialization n_grid_divs_per_axis = 30 parameters = GridSearchParameters(param_grid={ 'coords': (optimizee.bound[0], optimizee.bound[1], n_grid_divs_per_axis) }) optimizer = GridSearchOptimizer(traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-0.1,), parameters=parameters) # Experiment run experiment.run_experiment(optimizee=optimizee, optimizer=optimizer, optimizee_parameters=parameters) # End experiment experiment.end_experiment(optimizer)
def setUp(self): # Test function function_id = 14 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) self.experiment = Experiment(root_dir_path='../../results') jube_params = {} self.trajectory, all_jube_params = self.experiment.prepare_experiment( name='L2L', log_stdout=True, jube_parameter=jube_params) self.optimizee_parameters = namedtuple('OptimizeeParameters', []) self.optimizee = FunctionGeneratorOptimizee(self.trajectory, benchmark_function, seed=1)
def run_experiment(): experiment = Experiment("../results/") name = 'L2L-FUN-ES' trajectory_name = 'mirroring-and-fitness-shaping' traj, all_jube_params = experiment.prepare_experiment( name=name, trajectory_name=trajectory_name, log_stdout=True) ## Benchmark function function_id = 14 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 200 ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) ## Outerloop optimizer initialization optimizer_seed = 1234 parameters = EvolutionStrategiesParameters(learning_rate=0.1, noise_std=1.0, mirrored_sampling_enabled=True, fitness_shaping_enabled=True, pop_size=20, n_iteration=1000, stop_criterion=np.Inf, seed=optimizer_seed) optimizer = EvolutionStrategiesOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-1., ), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) # Run experiment experiment.run_experiment(optimizer=optimizer, optimizee=optimizee, optimizer_parameters=parameters) # End experiment experiment.end_experiment(optimizer)
def main(): name = 'L2L-FunctionGenerator-SA' experiment = Experiment("../results/") traj, all_jube_params = experiment.prepare_experiment(name=name, log_stdout=True) ## Benchmark function function_id = 14 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) ## Outerloop optimizer initialization parameters = SimulatedAnnealingParameters( n_parallel_runs=50, noisy_step=.03, temp_decay=.99, n_iteration=100, stop_criterion=np.Inf, seed=np.random.randint(1e5), cooling_schedule=AvailableCoolingSchedules.QUADRATIC_ADDAPTIVE) optimizer = SimulatedAnnealingOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-1, ), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) # Run experiment experiment.run_experiment(optimizer=optimizer, optimizee=optimizee, optimizer_parameters=parameters) # End experiment experiment.end_experiment(optimizer)
def main(): name = 'L2L-FUN-GD' experiment = Experiment("../results") traj, all_jube_params = experiment.prepare_experiment(name=name, trajectory_name=name) ## Benchmark function function_id = 4 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) ## Outerloop optimizer initialization # parameters = ClassicGDParameters(learning_rate=0.01, exploration_step_size=0.01, # n_random_steps=5, n_iteration=100, # stop_criterion=np.Inf) # parameters = AdamParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=5, first_order_decay=0.8, # second_order_decay=0.8, n_iteration=100, stop_criterion=np.Inf) # parameters = StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99, # exploration_step_size=0.01, n_random_steps=5, n_iteration=100, # stop_criterion=np.Inf) parameters = RMSPropParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=5, momentum_decay=0.5, n_iteration=100, stop_criterion=np.Inf, seed=99) optimizer = GradientDescentOptimizer(traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(0.1,), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) experiment.run_experiment(optimizer=optimizer, optimizee=optimizee, optimizer_parameters=parameters) experiment.end_experiment(optimizer)
def test_juberunner_setup(self): self.experiment = Experiment(root_dir_path='../../results') self.trajectory, _ = self.experiment.prepare_experiment( name='test_trajectory', trajectory='test_trajectory', filename=".", file_title='{} data'.format('test_trajectory'), comment='{} data'.format('test_trajectory'), add_time=True, automatic_storing=True, log_stdout=False, jube_parameter={}) self.trajectory.f_add_parameter_group("JUBE_params", "Contains JUBE parameters") self.trajectory.f_add_parameter_to_group( "JUBE_params", "exec", "python " + os.path.join( self.paths.simulation_path, "run_files/run_optimizee.py")) self.trajectory.f_add_parameter_to_group("JUBE_params", "paths", self.paths) ## Benchmark function function_id = 14 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 1 optimizee = FunctionGeneratorOptimizee(self.trajectory, benchmark_function, seed=optimizee_seed) jube.prepare_optimizee(optimizee, self.paths.root_dir_path) fname = os.path.join(self.paths.root_dir_path, "optimizee.bin") try: f = open(fname, "r") f.close() except Exception: self.fail()
def main(): experiment = Experiment(root_dir_path='../results') name = 'L2L-FUN-GA' traj, _ = experiment.prepare_experiment(name=name, log_stdout=True) ## Benchmark function function_id = 4 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) ## Outerloop optimizer initialization parameters = GeneticAlgorithmParameters(seed=0, popsize=50, CXPB=0.5, MUTPB=0.3, NGEN=100, indpb=0.02, tournsize=15, matepar=0.5, mutpar=1) optimizer = GeneticAlgorithmOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-0.1, ), parameters=parameters) experiment.run_experiment(optimizer=optimizer, optimizee=optimizee, optimizee_parameters=parameters) experiment.end_experiment(optimizer)
def main(): name = 'L2L-FunctionGenerator-PT' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation") paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path) print("All output logs can be found in directory ", paths.logs_path) traj_file = os.path.join(paths.output_dir_path, 'data.h5') # Create an environment that handles running our simulation # This initializes an environment env = Environment( trajectory=name, filename=traj_file, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, # freeze_input=True, # multiproc=True, # use_scoop=True, # wrap_mode=pypetconstants.WRAP_MODE_LOCAL, automatic_storing=True, log_stdout=False, # Sends stdout to logs ) create_shared_logger_data(logger_names=['bin', 'optimizers'], log_levels=['INFO', 'INFO'], log_to_consoles=[True, True], sim_name=name, log_directory=paths.logs_path) configure_loggers() # Get the trajectory from the environment traj = env.trajectory ## Benchmark function function_id = 14 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) #-------------------------------------------------------------------------- # configure settings for parallel tempering: # for each of the parallel runs chose # a cooling schedule # an upper and lower temperature bound # a decay parameter #-------------------------------------------------------------------------- # specify the number of parallel running schedules. Each following container # has to have an entry for each parallel run n_parallel_runs = 5 # for detailed information on the cooling schedules see either the wiki or # the documentaition in l2l.optimizers.paralleltempering.optimizer cooling_schedules = [ AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE, AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE, AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE, AvailableCoolingSchedules.LINEAR_ADDAPTIVE, AvailableCoolingSchedules.LINEAR_ADDAPTIVE ] # has to be from 1 to 0, first entry hast to be larger than second # represents the starting temperature and the ending temperature temperature_bounds = [[0.8, 0], [0.7, 0], [0.6, 0], [1, 0.1], [0.9, 0.2]] # decay parameter for each schedule. If needed can be different for each # schedule decay_parameters = np.full(n_parallel_runs, 0.99) #-------------------------------------------------------------------------- # end of configuration #-------------------------------------------------------------------------- # Check, if the temperature bounds and decay parameters are reasonable. assert ( ((temperature_bounds.all() <= 1) and (temperature_bounds.all() >= 0)) and (temperature_bounds[:, 0].all() > temperature_bounds[:, 1].all()) ), print("Warning: Temperature bounds are not within specifications.") assert ((decay_parameters.all() <= 1) and (decay_parameters.all() >= 0) ), print("Warning: Decay parameter not within specifications.") ## Outerloop optimizer initialization parameters = ParallelTemperingParameters( n_parallel_runs=n_parallel_runs, noisy_step=.03, n_iteration=1000, stop_criterion=np.Inf, seed=np.random.randint(1e5), cooling_schedules=cooling_schedules, temperature_bounds=temperature_bounds, decay_parameters=decay_parameters) optimizer = ParallelTemperingOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-1, ), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end(traj) # Finally disable logging and close all log-files env.disable_logging()
def main(): name = 'L2L-FUN-GA' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation" ) paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path) with open("bin/logging.yaml") as f: l_dict = yaml.load(f) log_output_file = os.path.join(paths.results_path, l_dict['handlers']['file']['filename']) l_dict['handlers']['file']['filename'] = log_output_file logging.config.dictConfig(l_dict) print("All output can be found in file ", log_output_file) print("Change the values in logging.yaml to control log level and destination") print("e.g. change the handler to console for the loggers you're interesting in to get output to stdout") traj_file = os.path.join(paths.output_dir_path, 'data.h5') # Create an environment that handles running our simulation # This initializes an environment env = Environment(trajectory=name, filename=traj_file, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, automatic_storing=True, log_stdout=False, # Sends stdout to logs log_folder=os.path.join(paths.output_dir_path, 'logs') ) # Get the trajectory from the environment traj = env.trajectory # Set JUBE params traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters") # Scheduler parameters # Name of the scheduler # traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm") # Command to submit jobs to the schedulers traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch") # Template file for the particular scheduler traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run") # Number of nodes to request for each run traj.f_add_parameter_to_group("JUBE_params", "nodes", "1") # Requested time for the compute resources traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:01:00") # MPI Processes per node traj.f_add_parameter_to_group("JUBE_params", "ppn", "1") # CPU cores per MPI process traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1") # Threads per process traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1") # Type of emails to be sent from the scheduler traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL") # Email to notify events from the scheduler traj.f_add_parameter_to_group("JUBE_params", "mail_address", "*****@*****.**") # Error file for the job traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr") # Output file for the job traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout") # JUBE parameters for multiprocessing. Relevant even without scheduler. # MPI Processes per job traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1") # The execution command traj.f_add_parameter_to_group("JUBE_params", "exec", "mpirun python3 " + root_dir_path + "/run_files/run_optimizee.py") # Ready file for a generation traj.f_add_parameter_to_group("JUBE_params", "ready_file", root_dir_path + "/readyfiles/ready_w_") # Path where the job will be executed traj.f_add_parameter_to_group("JUBE_params", "work_path", root_dir_path) ## Benchmark function function_id = 4 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) # Prepare optimizee for jube runs jube.prepare_optimizee(optimizee, root_dir_path) ## Outerloop optimizer initialization parameters = GeneticAlgorithmParameters(seed=0, popsize=50, CXPB=0.5, MUTPB=0.3, NGEN=100, indpb=0.02, tournsize=15, matepar=0.5, mutpar=1 ) optimizer = GeneticAlgorithmOptimizer(traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-0.1,), parameters=parameters) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end() # Finally disable logging and close all log-files env.disable_logging()
def main(): name = 'L2L-FUN-GS' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation" ) paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path) print("All output logs can be found in directory ", paths.logs_path) traj_file = os.path.join(paths.output_dir_path, 'data.h5') # Create an environment that handles running our simulation # This initializes an environment env = Environment(trajectory=name, filename=traj_file, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, automatic_storing=True, log_stdout=False, # Sends stdout to logs ) create_shared_logger_data(logger_names=['bin', 'optimizers'], log_levels=['INFO', 'INFO'], log_to_consoles=[True, True], sim_name=name, log_directory=paths.logs_path) configure_loggers() # Get the trajectory from the environment traj = env.trajectory # Get the trajectory from the environment traj = env.trajectory # Set JUBE params traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters") # Execution command traj.f_add_parameter_to_group("JUBE_params", "exec", "python " + os.path.join(paths.simulation_path, "run_files/run_optimizee.py")) # Paths traj.f_add_parameter_to_group("JUBE_params", "paths", paths) ## Benchmark function function_id = 4 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) # Prepare optimizee for jube runs jube.prepare_optimizee(optimizee, paths.simulation_path) ## Outerloop optimizer initialization n_grid_divs_per_axis = 30 parameters = GridSearchParameters(param_grid={ 'coords': (optimizee.bound[0], optimizee.bound[1], n_grid_divs_per_axis) }) optimizer = GridSearchOptimizer(traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-0.1,), parameters=parameters) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end(traj) # Finally disable logging and close all log-files env.disable_logging()
def run_experiment(): name = 'L2L-FUN-ES' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation") trajectory_name = 'mirroring-and-fitness-shaping' paths = Paths(name, dict(run_num='test'), root_dir_path=root_dir_path, suffix="-" + trajectory_name) print("All output logs can be found in directory ", paths.logs_path) # Create an environment that handles running our simulation # This initializes an environment env = Environment( trajectory=trajectory_name, filename=paths.output_dir_path, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, automatic_storing=True, log_stdout=False, # Sends stdout to logs ) create_shared_logger_data(logger_names=['bin', 'optimizers'], log_levels=['INFO', 'INFO'], log_to_consoles=[True, True], sim_name=name, log_directory=paths.logs_path) configure_loggers() # Get the trajectory from the environment traj = env.trajectory # Set JUBE params traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters") # Scheduler parameters # Name of the scheduler # traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm") # Command to submit jobs to the schedulers traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch") # Template file for the particular scheduler traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run") # Number of nodes to request for each run traj.f_add_parameter_to_group("JUBE_params", "nodes", "1") # Requested time for the compute resources traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:01:00") # MPI Processes per node traj.f_add_parameter_to_group("JUBE_params", "ppn", "1") # CPU cores per MPI process traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1") # Threads per process traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1") # Type of emails to be sent from the scheduler traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL") # Email to notify events from the scheduler traj.f_add_parameter_to_group("JUBE_params", "mail_address", "*****@*****.**") # Error file for the job traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr") # Output file for the job traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout") # JUBE parameters for multiprocessing. Relevant even without scheduler. # MPI Processes per job traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1") # The execution command traj.f_add_parameter_to_group( "JUBE_params", "exec", "python " + os.path.join(paths.root_dir_path, "run_files/run_optimizee.py")) # Ready file for a generation traj.f_add_parameter_to_group( "JUBE_params", "ready_file", os.path.join(paths.root_dir_path, "ready_files/ready_w_")) # Path where the job will be executed traj.f_add_parameter_to_group("JUBE_params", "work_path", paths.root_dir_path) ### Maybe we should pass the Paths object to avoid defining paths here and there traj.f_add_parameter_to_group("JUBE_params", "paths_obj", paths) ## Benchmark function function_id = 14 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 200 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) # Prepare optimizee for jube runs jube.prepare_optimizee(optimizee, paths.root_dir_path) ## Outerloop optimizer initialization optimizer_seed = 1234 parameters = EvolutionStrategiesParameters(learning_rate=0.1, noise_std=1.0, mirrored_sampling_enabled=True, fitness_shaping_enabled=True, pop_size=20, n_iteration=1000, stop_criterion=np.Inf, seed=optimizer_seed) optimizer = EvolutionStrategiesOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-1., ), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end(traj) # Finally disable logging and close all log-files env.disable_logging() return traj.v_storage_service.filename, traj.v_name, paths
def main(): name = 'L2L-FUN-GA' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation") paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path) with open("logging.yaml") as f: l_dict = yaml.load(f) log_output_file = os.path.join(paths.results_path, l_dict['handlers']['file']['filename']) l_dict['handlers']['file']['filename'] = log_output_file logging.config.dictConfig(l_dict) print("All output can be found in file ", log_output_file) print( "Change the values in logging.yaml to control log level and destination" ) print( "e.g. change the handler to console for the loggers you're interesting in to get output to stdout" ) traj_file = os.path.join(paths.output_dir_path, 'data.h5') # Create an environment that handles running our simulation # This initializes an environment env = Environment( trajectory=name, filename=traj_file, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, automatic_storing=True, log_stdout=False, # Sends stdout to logs log_folder=os.path.join(paths.output_dir_path, 'logs')) # Get the trajectory from the environment traj = env.trajectory # Set JUBE params traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters") # The execution command traj.f_add_parameter_to_group( "JUBE_params", "exec", "python " + os.path.join(paths.simulation_path, "run_files/run_optimizee.py")) # Paths traj.f_add_parameter_to_group("JUBE_params", "paths", paths) ## Benchmark function function_id = 4 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) # Prepare optimizee for jube runs jube.prepare_optimizee(optimizee, paths.simulation_path) ## Outerloop optimizer initialization parameters = GeneticAlgorithmParameters(seed=0, popsize=50, CXPB=0.5, MUTPB=0.3, NGEN=100, indpb=0.02, tournsize=15, matepar=0.5, mutpar=1) optimizer = GeneticAlgorithmOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-0.1, ), parameters=parameters) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end() # Finally disable logging and close all log-files env.disable_logging()
def main(): name = 'L2L-FUN-CE' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation") paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path) print("All output logs can be found in directory ", paths.logs_path) traj_file = os.path.join(paths.output_dir_path, 'data.h5') # Create an environment that handles running our simulation # This initializes an environment env = Environment( trajectory=name, filename=traj_file, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, freeze_input=True, multiproc=True, automatic_storing=True, log_stdout=False, # Sends stdout to logs ) create_shared_logger_data(logger_names=['bin', 'optimizers'], log_levels=['INFO', 'INFO'], log_to_consoles=[True, True], sim_name=name, log_directory=paths.logs_path) configure_loggers() # Get the trajectory from the environment traj = env.trajectory function_id = 14 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) ## Outerloop optimizer initialization parameters = CrossEntropyParameters( pop_size=50, rho=0.9, smoothing=0.0, temp_decay=0, n_iteration=160, distribution=NoisyBayesianGaussianMixture( n_components=3, noise_magnitude=1., noise_decay=0.9, weight_concentration_prior=1.5), stop_criterion=np.inf, seed=103) optimizer = CrossEntropyOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-0.1, ), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end(traj) # Finally disable logging and close all log-files env.disable_logging()
def run_experiment(): name = 'L2L-FUN-ES' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation") trajectory_name = 'mirroring-and-fitness-shaping' paths = Paths(name, dict(run_num='test'), root_dir_path=root_dir_path, suffix="-" + trajectory_name) print("All output logs can be found in directory ", paths.logs_path) # Create an environment that handles running our simulation # This initializes an environment env = Environment( trajectory=trajectory_name, filename=paths.output_dir_path, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, automatic_storing=True, log_stdout=False, # Sends stdout to logs ) create_shared_logger_data(logger_names=['bin', 'optimizers'], log_levels=['INFO', 'INFO'], log_to_consoles=[True, True], sim_name=name, log_directory=paths.logs_path) configure_loggers() # Get the trajectory from the environment traj = env.trajectory # Set JUBE params traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters") traj.f_add_parameter_to_group( "JUBE_params", "exec", "python " + os.path.join(paths.simulation_path, "run_files/run_optimizee.py")) # Paths traj.f_add_parameter_to_group("JUBE_params", "paths", paths) ## Benchmark function function_id = 14 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 200 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) # Prepare optimizee for jube runs jube.prepare_optimizee(optimizee, paths.simulation_path) ## Outerloop optimizer initialization optimizer_seed = 1234 parameters = EvolutionStrategiesParameters(learning_rate=0.1, noise_std=1.0, mirrored_sampling_enabled=True, fitness_shaping_enabled=True, pop_size=20, n_iteration=1000, stop_criterion=np.Inf, seed=optimizer_seed) optimizer = EvolutionStrategiesOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-1., ), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end(traj) # Finally disable logging and close all log-files env.disable_logging() return traj.v_storage_service.filename, traj.v_name, paths
def main(): name = 'L2L-FunctionGenerator-SA' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation") paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path) print("All output logs can be found in directory ", paths.logs_path) traj_file = os.path.join(paths.output_dir_path, 'data.h5') # Create an environment that handles running our simulation # This initializes an environment env = Environment( trajectory=name, filename=traj_file, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, # freeze_input=True, # multiproc=True, # use_scoop=True, # wrap_mode=pypetconstants.WRAP_MODE_LOCAL, automatic_storing=True, log_stdout=False, # Sends stdout to logs ) create_shared_logger_data(logger_names=['bin', 'optimizers'], log_levels=['INFO', 'INFO'], log_to_consoles=[True, True], sim_name=name, log_directory=paths.logs_path) configure_loggers() # Get the trajectory from the environment traj = env.trajectory # Set JUBE params traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters") # Execution command traj.f_add_parameter_to_group( "JUBE_params", "exec", "python " + os.path.join(paths.simulation_path, "run_files/run_optimizee.py")) # Paths traj.f_add_parameter_to_group("JUBE_params", "paths", paths) ## Benchmark function function_id = 14 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) #Prepare optimizee for jube runs jube.prepare_optimizee(optimizee, paths.simulation_path) ## Outerloop optimizer initialization parameters = SimulatedAnnealingParameters( n_parallel_runs=50, noisy_step=.03, temp_decay=.99, n_iteration=100, stop_criterion=np.Inf, seed=np.random.randint(1e5), cooling_schedule=AvailableCoolingSchedules.QUADRATIC_ADDAPTIVE) optimizer = SimulatedAnnealingOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-1, ), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end(traj) # Finally disable logging and close all log-files env.disable_logging()
def main(): name = 'L2L-FUN-GD' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation") paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path) print("All output logs can be found in directory ", paths.logs_path) traj_file = os.path.join(paths.output_dir_path, 'data.h5') # Create an environment that handles running our simulation # This initializes an environment env = Environment( trajectory=name, filename=traj_file, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, freeze_input=True, multiproc=True, automatic_storing=True, log_stdout=False, # Sends stdout to logs ) create_shared_logger_data(logger_names=['bin', 'optimizers'], log_levels=['INFO', 'INFO'], log_to_consoles=[True, True], sim_name=name, log_directory=paths.logs_path) configure_loggers() # Get the trajectory from the environment traj = env.trajectory # Set JUBE params traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters") # Scheduler parameters # Name of the scheduler # traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm") # Command to submit jobs to the schedulers traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch") # Template file for the particular scheduler traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run") # Number of nodes to request for each run traj.f_add_parameter_to_group("JUBE_params", "nodes", "1") # Requested time for the compute resources traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:01:00") # MPI Processes per node traj.f_add_parameter_to_group("JUBE_params", "ppn", "1") # CPU cores per MPI process traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1") # Threads per process traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1") # Type of emails to be sent from the scheduler traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL") # Email to notify events from the scheduler traj.f_add_parameter_to_group("JUBE_params", "mail_address", "*****@*****.**") # Error file for the job traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr") # Output file for the job traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout") # JUBE parameters for multiprocessing. Relevant even without scheduler. # MPI Processes per job traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1") # The execution command traj.f_add_parameter_to_group( "JUBE_params", "exec", "mpirun python3 " + root_dir_path + "/run_files/run_optimizee.py") # Ready file for a generation traj.f_add_parameter_to_group("JUBE_params", "ready_file", root_dir_path + "/readyfiles/ready_w_") # Path where the job will be executed traj.f_add_parameter_to_group("JUBE_params", "work_path", root_dir_path) ## Benchmark function function_id = 4 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) #Prepare optimizee for jube runs jube.prepare_optimizee(optimizee, root_dir_path) ## ## Outerloop optimizer initialization # parameters = ClassicGDParameters(learning_rate=0.01, exploration_step_size=0.01, # n_random_steps=5, n_iteration=100, # stop_criterion=np.Inf) # parameters = AdamParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=5, first_order_decay=0.8, # second_order_decay=0.8, n_iteration=100, stop_criterion=np.Inf) # parameters = StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99, # exploration_step_size=0.01, n_random_steps=5, n_iteration=100, # stop_criterion=np.Inf) parameters = RMSPropParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=5, momentum_decay=0.5, n_iteration=100, stop_criterion=np.Inf, seed=99) optimizer = GradientDescentOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(0.1, ), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end(traj) # Finally disable logging and close all log-files env.disable_logging()
def main(): name = 'L2L-FUNALL' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation" ) paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path) print("All output logs can be found in directory ", paths.logs_path) traj_file = os.path.join(paths.output_dir_path, 'data.h5') n_iterations = 100 # NOTE: Need to use lambdas here since we want the distributions within CE, FACE etc. optimizers to be reinitialized # afresh each time since it seems like they are stateful. optimizers = [ (CrossEntropyOptimizer, lambda: CrossEntropyParameters(pop_size=50, rho=0.2, smoothing=0.0, temp_decay=0, n_iteration=n_iterations, distribution=NoisyGaussian(noise_decay=0.95, noise_bias=0.05))), (FACEOptimizer, lambda: FACEParameters(min_pop_size=20, max_pop_size=50, n_elite=10, smoothing=0.2, temp_decay=0, n_iteration=n_iterations, distribution=Gaussian(), n_expand=5)), (GradientDescentOptimizer, lambda: RMSPropParameters(learning_rate=0.01, exploration_rate=0.01, n_random_steps=5, momentum_decay=0.5, n_iteration=n_iterations, stop_criterion=np.Inf)), (GradientDescentOptimizer, lambda: ClassicGDParameters(learning_rate=0.01, exploration_rate=0.01, n_random_steps=5, n_iteration=n_iterations, stop_criterion=np.Inf)), (GradientDescentOptimizer, lambda: AdamParameters(learning_rate=0.01, exploration_rate=0.01, n_random_steps=5, first_order_decay=0.8, second_order_decay=0.8, n_iteration=n_iterations, stop_criterion=np.Inf)), (GradientDescentOptimizer, lambda: StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99, exploration_rate=0.01, n_random_steps=5, n_iteration=n_iterations, stop_criterion=np.Inf)) ] # NOTE: Benchmark functions bench_functs = BenchmarkedFunctions() function_ids = range(len(bench_functs.function_name_map)) for function_id, (optimizer_class, optimizer_parameters_fn) in itertools.product(function_ids, optimizers): logger.info("Running benchmark for %s optimizer and function id %d", optimizer_class, function_id) optimizer_parameters = optimizer_parameters_fn() # Create an environment that handles running our simulation # This initializes an environment env = Environment(trajectory=name, filename=traj_file, file_title='{} data'.format(name), comment='{} data'.format(name), # freeze_input=True, # multiproc=True, # use_scoop=True, # wrap_mode=pypetconstants.WRAP_MODE_LOCAL, add_time=True, automatic_storing=True, log_stdout=False, # Sends stdout to logs ) create_shared_logger_data(logger_names=['bin', 'optimizers'], log_levels=['INFO', 'INFO'], log_to_consoles=[True, True], sim_name=name, log_directory=paths.logs_path) configure_loggers() # Get the trajectory from the environment traj = env.trajectory (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee = FunctionGeneratorOptimizee(traj, benchmark_function) optimizee_fitness_weights = -1. # Gradient descent does descent! if optimizer_class == GradientDescentOptimizer: optimizee_fitness_weights = +1. # Grid search optimizer input depends on optimizee! elif optimizer_class == GridSearchOptimizer: optimizer_parameters = GridSearchParameters(param_grid={ 'coords': (optimizee.bound[0], optimizee.bound[1], 30) }) optimizer = optimizer_class(traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(optimizee_fitness_weights,), parameters=optimizer_parameters, optimizee_bounding_func=optimizee.bounding_func) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) # NOTE: Outerloop optimizer end optimizer.end(traj) # Finally disable logging and close all log-files env.disable_logging()
def main(): name = 'L2L-FUN-GD' try: with open('bin/path.conf') as f: root_dir_path = f.read().strip() except FileNotFoundError: raise FileNotFoundError( "You have not set the root path to store your results." " Write the path to a path.conf text file in the bin directory" " before running the simulation") paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path) print("All output logs can be found in directory ", paths.logs_path) traj_file = os.path.join(paths.output_dir_path, 'data.h5') # Create an environment that handles running our simulation # This initializes an environment env = Environment( trajectory=name, filename=traj_file, file_title='{} data'.format(name), comment='{} data'.format(name), add_time=True, freeze_input=True, multiproc=True, automatic_storing=True, log_stdout=False, # Sends stdout to logs ) create_shared_logger_data(logger_names=['bin', 'optimizers'], log_levels=['INFO', 'INFO'], log_to_consoles=[True, True], sim_name=name, log_directory=paths.logs_path) configure_loggers() # Get the trajectory from the environment traj = env.trajectory # Set JUBE params traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters") # Execution command traj.f_add_parameter_to_group( "JUBE_params", "exec", "python " + os.path.join(paths.simulation_path, "run_files/run_optimizee.py")) # Paths traj.f_add_parameter_to_group("JUBE_params", "paths", paths) ## Benchmark function function_id = 4 bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) #Prepare optimizee for jube runs jube.prepare_optimizee(optimizee, paths.simulation_path) ## ## Outerloop optimizer initialization # parameters = ClassicGDParameters(learning_rate=0.01, exploration_step_size=0.01, # n_random_steps=5, n_iteration=100, # stop_criterion=np.Inf) # parameters = AdamParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=5, first_order_decay=0.8, # second_order_decay=0.8, n_iteration=100, stop_criterion=np.Inf) # parameters = StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99, # exploration_step_size=0.01, n_random_steps=5, n_iteration=100, # stop_criterion=np.Inf) parameters = RMSPropParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=5, momentum_decay=0.5, n_iteration=100, stop_criterion=np.Inf, seed=99) optimizer = GradientDescentOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(0.1, ), parameters=parameters, optimizee_bounding_func=optimizee.bounding_func) # Add post processing env.add_postprocessing(optimizer.post_process) # Run the simulation with all parameter combinations env.run(optimizee.simulate) ## Outerloop optimizer end optimizer.end(traj) # Finally disable logging and close all log-files env.disable_logging()
def main(): experiment = Experiment(root_dir_path='Data_Produced/L2L') # name = 'L2L-FUN-GA' name = 'L2L-FUN-GS' traj, _ = experiment.prepare_experiment(name=name, log_stdout=True, multiprocessing=False) # --------------------------------------------------------------------------------------------------------- # Benchmark function """ Ackley function has a large hole in at the centre surrounded by small hill like regions. Algorithms can get trapped in one of its many local minima. reference: https://www.sfu.ca/~ssurjano/ackley.html :param dims: dimensionality of the function Note: uses the recommended variable values, which are: a = 20, b = 0.2 and c = 2π. """ function_id = 4 # Select Ackley2d bench_functs = BenchmarkedFunctions() (benchmark_name, benchmark_function), benchmark_parameters = \ bench_functs.get_function_by_index(function_id, noise=True) # --------------------------------------------------------------------------------------------------------- optimizee_seed = 100 random_state = np.random.RandomState(seed=optimizee_seed) # function_tools.plot(benchmark_function, random_state) ## Innerloop simulator optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed) ## Outerloop optimizer initialization # parameters = GeneticAlgorithmParameters(seed=0, pop_size=50, cx_prob=0.5, # mut_prob=0.3, n_iteration=100, # ind_prob=0.02, # tourn_size=15, mate_par=0.5, # mut_par=1 # ) # # optimizer = GeneticAlgorithmOptimizer(traj, optimizee_create_individual=optimizee.create_individual, # optimizee_fitness_weights=(-0.1,), # parameters=parameters) # Setup the GridSearchOptimizer n_grid_divs_per_axis = 30 parameters = GridSearchParameters(param_grid={ 'coords': (optimizee.bound[0], optimizee.bound[1], n_grid_divs_per_axis) }) optimizer = GridSearchOptimizer( traj, optimizee_create_individual=optimizee.create_individual, optimizee_fitness_weights=(-0.1, ), # minimize! parameters=parameters) ## Optimization!!! experiment.run_experiment(optimizer=optimizer, optimizee=optimizee, optimizee_parameters=parameters) experiment.end_experiment(optimizer) print(f"best: {experiment.optimizer.best_individual['coords']}")