Esempio n. 1
0
def main():
    name = 'L2L-FunctionGenerator-PT'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        # freeze_input=True,
        # multiproc=True,
        # use_scoop=True,
        # wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    ## Benchmark function
    function_id = 14
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 100
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj,
                                           benchmark_function,
                                           seed=optimizee_seed)

    #--------------------------------------------------------------------------
    # configure settings for parallel tempering:
    # for each of the parallel runs chose
    # a cooling schedule
    # an upper and lower temperature bound
    # a decay parameter
    #--------------------------------------------------------------------------

    # specify the number of parallel running schedules. Each following container
    # has to have an entry for each parallel run
    n_parallel_runs = 5

    # for detailed information on the cooling schedules see either the wiki or
    # the documentaition in l2l.optimizers.paralleltempering.optimizer
    cooling_schedules = [
        AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE,
        AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE,
        AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE,
        AvailableCoolingSchedules.LINEAR_ADDAPTIVE,
        AvailableCoolingSchedules.LINEAR_ADDAPTIVE
    ]

    # has to be from 1 to 0, first entry hast to be larger than second
    # represents the starting temperature and the ending temperature
    temperature_bounds = [[0.8, 0], [0.7, 0], [0.6, 0], [1, 0.1], [0.9, 0.2]]

    # decay parameter for each schedule. If needed can be different for each
    # schedule
    decay_parameters = np.full(n_parallel_runs, 0.99)
    #--------------------------------------------------------------------------
    # end of configuration
    #--------------------------------------------------------------------------

    # Check, if the temperature bounds and decay parameters are reasonable.
    assert (
        ((temperature_bounds.all() <= 1) and (temperature_bounds.all() >= 0))
        and (temperature_bounds[:, 0].all() > temperature_bounds[:, 1].all())
    ), print("Warning: Temperature bounds are not within specifications.")
    assert ((decay_parameters.all() <= 1) and (decay_parameters.all() >= 0)
            ), print("Warning: Decay parameter not within specifications.")

    ## Outerloop optimizer initialization
    parameters = ParallelTemperingParameters(
        n_parallel_runs=n_parallel_runs,
        noisy_step=.03,
        n_iteration=1000,
        stop_criterion=np.Inf,
        seed=np.random.randint(1e5),
        cooling_schedules=cooling_schedules,
        temperature_bounds=temperature_bounds,
        decay_parameters=decay_parameters)
    optimizer = ParallelTemperingOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 2
0
class Experiment(object):
    def __init__(self, root_dir_path):
        """
        Prepares and starts the l2l simulation.

        For an example see `L2L/bin/l2l-template.py`

        :param root_dir_path: str, Path to the results folder. Accepts relative
        paths. Will check if the folder exists and create if not.
        """
        self.root_dir_path = os.path.abspath(root_dir_path)
        self.logger = logging.getLogger('bin.l2l')
        self.paths = None
        self.env = None
        self.traj = None
        self.optimizee = None
        self.optimizer = None

    def prepare_experiment(self, **kwargs):
        """
        Prepare the experiment by creating the enviroment and
        :param kwargs: optional dictionary, contains
            - name: str, name of the run, Default: L2L-run
            - trajectory_name: str, name of the trajectory, Default: trajectory
            - log_stdout: bool, if stdout should be sent to logs, Default:False
            - jube_parameter: dict, User specified parameter for jube.
                See notes section for default jube parameter
            - multiprocessing, bool, enable multiprocessing, Default: False
        :return traj, trajectory object
        :return all_jube_params, dict, a dictionary with all parameters for jube
            given by the user and default ones

        :notes
           Default JUBE parameters are:
            - scheduler: None,
            - submit_cmd: sbatch,
            - job_file: job.run,
            - nodes: 1,
            - walltime: 01:00:00,
            - ppn: 1,
            - cpu_pp: 1,
            - threads_pp: 4,
            - mail_mode: ALL,
            - err_file: stderr,
            - out_file: stdout,
            - tasks_per_job: 1,
            - exec: python3 + self.paths.simulation_path +
                "run_files/run_optimizee.py"
            - ready_file: self.paths.root_dir_path + "ready_files/ready_w_"
            - work_path: self.paths.root_dir_path,
            - paths_obj: self.paths
        """
        name = kwargs.get('name', 'L2L-run')
        if not os.path.isdir(self.root_dir_path):
            os.mkdir(os.path.abspath(self.root_dir_path))
            print('Created a folder at {}'.format(self.root_dir_path))

        trajectory_name = kwargs.get('trajectory_name', 'trajectory')

        self.paths = Paths(name, {},
                           root_dir_path=self.root_dir_path,
                           suffix="-" + trajectory_name)

        print("All output logs can be found in directory ",
              self.paths.logs_path)

        # Create an environment that handles running our simulation
        # This initializes an environment
        self.env = Environment(
            trajectory=trajectory_name,
            filename=self.paths.output_dir_path,
            file_title='{} data'.format(name),
            comment='{} data'.format(name),
            add_time=True,
            automatic_storing=True,
            log_stdout=kwargs.get('log_stdout', False),  # Sends stdout to logs
            multiprocessing=kwargs.get('multiprocessing', False))

        create_shared_logger_data(logger_names=['bin', 'optimizers'],
                                  log_levels=['INFO', 'INFO'],
                                  log_to_consoles=[True, True],
                                  sim_name=name,
                                  log_directory=self.paths.logs_path)
        configure_loggers()

        # Get the trajectory from the environment
        self.traj = self.env.trajectory

        # Set JUBE params
        default_jube_params = {
            # "scheduler": "None",
            "submit_cmd":
            "sbatch",
            "job_file":
            "job.run",
            "nodes":
            "1",
            "walltime":
            "01:00:00",
            "ppn":
            "1",
            "cpu_pp":
            "1",
            "threads_pp":
            "4",
            "mail_mode":
            "ALL",
            "err_file":
            "stderr",
            "out_file":
            "stdout",
            "tasks_per_job":
            "1",
            "exec":
            "python3 " + os.path.join(self.paths.simulation_path,
                                      "run_files/run_optimizee.py"),
            "ready_file":
            os.path.join(self.paths.root_dir_path, "ready_files/ready_w_"),
            "work_path":
            self.paths.root_dir_path,
            "paths_obj":
            self.paths,
        }
        # Will contain all jube parameters
        all_jube_params = {}
        self.traj.f_add_parameter_group("JUBE_params",
                                        "Contains JUBE parameters")
        # Go through the parameter dictionary and add to the trajectory
        if kwargs.get('jube_parameter'):
            for k, v in kwargs['jube_parameter'].items():
                if k == "exec":
                    val = v + " " + os.path.join(self.paths.simulation_path,
                                                 "run_files/run_optimizee.py")
                    self.traj.f_add_parameter_to_group("JUBE_params", k, val)
                    all_jube_params[k] = val
                else:
                    self.traj.f_add_parameter_to_group("JUBE_params", k, v)
                    all_jube_params[k] = v
        # Default parameter are added if they are not already set by the user
        for k, v in default_jube_params.items():
            if kwargs.get('jube_parameter'):
                if k not in kwargs.get('jube_parameter').keys():
                    self.traj.f_add_parameter_to_group("JUBE_params", k, v)
            else:
                self.traj.f_add_parameter_to_group("JUBE_params", k, v)
            all_jube_params[k] = v
        print('JUBE parameters used: {}'.format(all_jube_params))
        return self.traj, all_jube_params

    def run_experiment(self,
                       optimizer,
                       optimizee,
                       optimizer_parameters=None,
                       optimizee_parameters=None):
        """
        Runs the simulation with all parameter combinations

        Optimizee and optimizer object are required as well as their parameters
        as namedtuples.

        :param optimizee: optimizee object
        :param optimizee_parameters: Namedtuple, optional, parameters of the optimizee
        :param optimizer: optimizer object
        :param optimizer_parameters: Namedtuple, optional, parameters of the optimizer
        """
        self.optimizee = optimizee
        self.optimizer = optimizer
        self.optimizer = optimizer
        self.logger.info("Optimizee parameters: %s", optimizee_parameters)
        self.logger.info("Optimizer parameters: %s", optimizer_parameters)
        jube.prepare_optimizee(optimizee, self.paths.simulation_path)
        # Add post processing
        self.env.add_postprocessing(optimizer.post_process)
        # Run the simulation
        self.env.run(optimizee.simulate)

    def end_experiment(self, optimizer):
        """
        Ends the experiment and disables the logging

        :param optimizer: optimizer object
        :return traj, trajectory object
        :return path, Path object
        """
        # Outer-loop optimizer end
        optimizer.end(self.traj)
        # Finally disable logging and close all log-files
        self.env.disable_logging()
        return self.traj, self.paths
Esempio n. 3
0
def main():

    name = "FIT-TEMPS"
    root_dir_path = os.path.dirname(os.path.abspath(sys.argv[0]))

    paths = Paths(name, dict(run_num="test"), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, "data.h5")
    print(traj_file)
    os.makedirs(paths.output_dir_path, exist_ok=True)
    print("Trajectory file is: {}".format(traj_file))

    trajectories = load_last_trajs(
        os.path.join(paths.output_dir_path, 'per_gen_trajectories'))
    if len(trajectories):
        k = trajectories['generation']
        traj = trajectories[k]
    else:
        traj = name

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=traj,
        filename=traj_file,
        file_title="{} data".format(name),
        comment="{} data".format(name),
        add_time=bool(1),
        automatic_storing=bool(1),
        log_stdout=bool(0),  # Sends stdout to logs
        multiprocessing=MULTIPROCESSING,
    )
    create_shared_logger_data(logger_names=["bin", "optimizers"],
                              log_levels=["INFO", "INFO"],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # trajectories = load_last_trajs(os.path.join(paths.root_dir_path,'trajectories'))

    # env.trajectory.individuals[0] = trajectories

    # Get the trajectory from the environment
    traj = env.trajectory

    if len(trajectories) == 0:

        # Set JUBE params
        traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")

        # Scheduler parameters
        # Name of the scheduler
        # traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm")

        # Command to submit jobs to the schedulers
        # traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch")

        # Template file for the particular scheduler
        traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run")
        # Number of nodes to request for each run
        traj.f_add_parameter_to_group("JUBE_params", "nodes", "1")
        # Requested time for the compute resources
        traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:10:00")
        # MPI Processes per node
        traj.f_add_parameter_to_group("JUBE_params", "ppn", "1")
        # CPU cores per MPI process
        traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1")
        # Threads per process
        traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1")
        # Type of emails to be sent from the scheduler
        traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL")
        # Email to notify events from the scheduler
        traj.f_add_parameter_to_group("JUBE_params", "mail_address",
                                      "*****@*****.**")
        # Error file for the job
        traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr")
        # Output file for the job
        traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout")
        # JUBE parameters for multiprocessing. Relevant even without scheduler.
        # MPI Processes per job
        traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1")

        # The execution command
        run_filename = os.path.join(paths.root_dir_path,
                                    "run_files/run_optimizee.py")
        command = "python3 {}".format(run_filename)
        if ON_JEWELS and not USE_MPI:
            # -N num nodes
            # -t exec time (mins)
            # -n num sub-procs
            command = "srun -t 15 -N 1 -n 4 -c 1 --gres=gpu:1 {}".format(
                command)
        elif USE_MPI:
            command = "MPIEXEC_TIMEOUT={} mpiexec -bind-to socket -np 1 {}".format(
                60, command)

        traj.f_add_parameter_to_group("JUBE_params", "exec", command)

        # Ready file for a generation
        traj.f_add_parameter_to_group(
            "JUBE_params", "ready_file",
            os.path.join(paths.root_dir_path, "readyfiles/ready_w_"))
        # Path where the job will be executed
        traj.f_add_parameter_to_group("JUBE_params", "work_path",
                                      paths.root_dir_path)

        ### Maybe we should pass the Paths object to avoid defining paths here and there
        traj.f_add_parameter_to_group("JUBE_params", "paths_obj", paths)

        csv = open('./temperature/temperature-anomaly.csv', 'r')
        temps = []
        years = []
        for i, line in enumerate(csv):
            sp = line.split(',')
            if sp[0] == 'Global':
                temps.append(float(sp[3]))
                years.append(float(sp[2]))
            if sp[0].startswith('Northern'):
                break
        csv.close()

        traj.f_add_parameter_group("simulation", "Contains JUBE parameters")
        traj.f_add_parameter_to_group("simulation", 'target', temps)  # ms
        traj.f_add_parameter_to_group("simulation", 'years', years)

    ## Innerloop simulator
    optimizee = FitOptimizee(traj, 1234)

    # Prepare optimizee for jube runs
    JUBE_runner.prepare_optimizee(optimizee, paths.root_dir_path)

    _, dict_spec = dict_to_list(optimizee.create_individual(),
                                get_dict_spec=True)
    # step_size = np.asarray([config.ATTR_STEPS[k] for (k, spec, length) in dict_spec])

    fit_weights = [
        1.0,
    ]  # 0.1]
    num_generations = 5000
    population_size = 200
    # population_size = 5

    # if len(trajectories):
    #     traj.individuals = trajectories_to_individuals(
    #                             trajectories, population_size, optimizee)

    parameters = GeneticAlgorithmParameters(
        seed=None,
        popsize=population_size,
        CXPB=0.5,  # probability of mating 2 individuals
        MUTPB=0.8,  # probability of individual to mutate
        NGEN=num_generations,
        indpb=0.1,  # probability of "gene" to mutate
        tournsize=population_size,  # number of best individuals to mate
        matepar=0.5,  # how much to mix two genes when mating
        mutpar=1.  #2.0/4.0, #standard deviations for normal distribution
    )

    optimizer = GeneticAlgorithmOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=fit_weights,
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func,
        percent_hall_of_fame=0.05,
        percent_elite=0.5,
    )

    # Add post processing
    ### guess this is where we want to split results from multiple runs?
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 4
0
def run_experiment():
    name = 'L2L-MNIST-ES'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")

    trajectory_name = 'small-mnist-full-monty-100-hidden'

    paths = Paths(name,
                  dict(run_num='test'),
                  root_dir_path=root_dir_path,
                  suffix="-" + trajectory_name)

    print("All output logs can be found in directory ", paths.logs_path)

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=trajectory_name,
        filename=paths.output_dir_path,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee_seed = 200

    optimizee_parameters = MNISTOptimizeeParameters(n_hidden=10,
                                                    seed=optimizee_seed,
                                                    use_small_mnist=True)
    ## Innerloop simulator
    optimizee = MNISTOptimizee(traj, optimizee_parameters)

    logger.info("Optimizee parameters: %s", optimizee_parameters)

    ## Outerloop optimizer initialization
    optimizer_seed = 1234
    optimizer_parameters = EvolutionStrategiesParameters(
        learning_rate=0.1,
        noise_std=0.1,
        mirrored_sampling_enabled=True,
        fitness_shaping_enabled=True,
        pop_size=20,
        n_iteration=2000,
        stop_criterion=np.Inf,
        seed=optimizer_seed)

    logger.info("Optimizer parameters: %s", optimizer_parameters)

    optimizer = EvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(1., ),
        parameters=optimizer_parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # Finally disable logging and close all log-files
    env.disable_logging()

    return traj.v_storage_service.filename, traj.v_name, paths
Esempio n. 5
0
def main():
    name = 'L2L-FUN-GA'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    with open("bin/logging.yaml") as f:
        l_dict = yaml.load(f)
        log_output_file = os.path.join(paths.results_path, l_dict['handlers']['file']['filename'])
        l_dict['handlers']['file']['filename'] = log_output_file
        logging.config.dictConfig(l_dict)

    print("All output can be found in file ", log_output_file)
    print("Change the values in logging.yaml to control log level and destination")
    print("e.g. change the handler to console for the loggers you're interesting in to get output to stdout")

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(trajectory=name, filename=traj_file, file_title='{} data'.format(name),
                      comment='{} data'.format(name),
                      add_time=True,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )

    # Get the trajectory from the environment
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")

    # Scheduler parameters
    # Name of the scheduler
    # traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm")
    # Command to submit jobs to the schedulers
    traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch")
    # Template file for the particular scheduler
    traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run")
    # Number of nodes to request for each run
    traj.f_add_parameter_to_group("JUBE_params", "nodes", "1")
    # Requested time for the compute resources
    traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:01:00")
    # MPI Processes per node
    traj.f_add_parameter_to_group("JUBE_params", "ppn", "1")
    # CPU cores per MPI process
    traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1")
    # Threads per process
    traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1")
    # Type of emails to be sent from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL")
    # Email to notify events from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_address", "*****@*****.**")
    # Error file for the job
    traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr")
    # Output file for the job
    traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout")
    # JUBE parameters for multiprocessing. Relevant even without scheduler.
    # MPI Processes per job
    traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1")
    # The execution command
    traj.f_add_parameter_to_group("JUBE_params", "exec", "mpirun python3 " + root_dir_path +
                                  "/run_files/run_optimizee.py")
    # Ready file for a generation
    traj.f_add_parameter_to_group("JUBE_params", "ready_file", root_dir_path + "/readyfiles/ready_w_")
    # Path where the job will be executed
    traj.f_add_parameter_to_group("JUBE_params", "work_path", root_dir_path)

    ## Benchmark function
    function_id = 4
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 100
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed)

    # Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, root_dir_path)

    ## Outerloop optimizer initialization
    parameters = GeneticAlgorithmParameters(seed=0, popsize=50, CXPB=0.5,
                                            MUTPB=0.3, NGEN=100, indpb=0.02,
                                            tournsize=15, matepar=0.5,
                                            mutpar=1
                                            )

    optimizer = GeneticAlgorithmOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
                                          optimizee_fitness_weights=(-0.1,),
                                          parameters=parameters)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end()

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 6
0
def main():

    name = "L2L-OMNIGLOT"
    root_dir_path = os.path.dirname(os.path.abspath(sys.argv[0]))

    paths = Paths(name, dict(run_num="test"), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, "data.h5")
    os.makedirs(paths.output_dir_path, exist_ok=True)
    print("Trajectory file is: {}".format(traj_file))

    trajectories = load_last_trajs(
        os.path.join(paths.output_dir_path, 'per_gen_trajectories'))
    if len(trajectories):
        k = trajectories['generation']
        traj = trajectories[k]
    else:
        traj = name

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=traj,
        filename=traj_file,
        file_title="{} data".format(name),
        comment="{} data".format(name),
        add_time=bool(1),
        automatic_storing=bool(1),
        log_stdout=bool(0),  # Sends stdout to logs
        multiprocessing=MULTIPROCESSING,
    )
    create_shared_logger_data(logger_names=["bin", "optimizers"],
                              log_levels=["INFO", "INFO"],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")

    # Scheduler parameters
    # Name of the scheduler
    # traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm")
    # Command to submit jobs to the schedulers
    # traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch")
    # Template file for the particular scheduler
    traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run")
    # Number of nodes to request for each run
    traj.f_add_parameter_to_group("JUBE_params", "nodes", "1")
    # Requested time for the compute resources
    traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:01:00")
    # MPI Processes per node
    traj.f_add_parameter_to_group("JUBE_params", "ppn", "1")
    # CPU cores per MPI process
    traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1")
    # Threads per process
    traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1")
    # Type of emails to be sent from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL")
    # Email to notify events from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_address",
                                  "*****@*****.**")
    # Error file for the job
    traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr")
    # Output file for the job
    traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout")
    # JUBE parameters for multiprocessing. Relevant even without scheduler.
    # MPI Processes per job
    traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1")

    # The execution command
    run_filename = os.path.join(paths.root_dir_path, "run_files",
                                "run_optimizee.py")
    command = "python3 {}".format(run_filename)
    if ON_JEWELS and not USE_MPI:
        # -N num nodes
        # -t exec time (mins)
        # -n num sub-procs
        command = "srun -n 1 -c {} --gres=gpu:1 {}".format(NUM_SIMS, command)
    elif USE_MPI:
        # -timeout <seconds>
        # command = "MPIEXEC_TIMEOUT={} "
        #           "mpiexec -bind-to socket -np 1 {}".format(60*240, command)
        command = "mpiexec -bind-to socket -np 1 {}".format(command)

    traj.f_add_parameter_to_group("JUBE_params", "exec", command)

    # Ready file for a generation
    traj.f_add_parameter_to_group(
        "JUBE_params", "ready_file",
        os.path.join(paths.root_dir_path, "readyfiles/ready_w_"))
    # Path where the job will be executed
    traj.f_add_parameter_to_group("JUBE_params", "work_path",
                                  paths.root_dir_path)

    # Maybe we should pass the Paths object to avoid
    # defining paths here and there
    traj.f_add_parameter_to_group("JUBE_params", "paths_obj", paths)

    traj.f_add_parameter_group("simulation", "Contains JUBE parameters")
    traj.f_add_parameter_to_group("simulation", 'num_sims', NUM_SIMS)  # ms
    traj.f_add_parameter_to_group("simulation", 'on_juwels', ON_JEWELS)
    traj.f_add_parameter_to_group("simulation", 'steps', config.STEPS)  # ms
    traj.f_add_parameter_to_group("simulation", 'duration',
                                  config.DURATION)  # ms
    traj.f_add_parameter_to_group("simulation", 'sample_dt',
                                  config.SAMPLE_DT)  # ms
    traj.f_add_parameter_to_group(
        "simulation",  # rows, cols
        'input_shape',
        config.INPUT_SHAPE)
    traj.f_add_parameter_to_group(
        "simulation",  # rows, cols
        'input_divs',
        config.INPUT_DIVS)
    traj.f_add_parameter_to_group("simulation", 'input_layers',
                                  config.N_INPUT_LAYERS)
    traj.f_add_parameter_to_group("simulation", 'num_classes',
                                  config.N_CLASSES)
    traj.f_add_parameter_to_group("simulation", 'samples_per_class',
                                  config.N_SAMPLES)
    traj.f_add_parameter_to_group("simulation", 'test_per_class',
                                  config.N_TEST)
    traj.f_add_parameter_to_group("simulation", 'num_epochs', config.N_EPOCHS)
    traj.f_add_parameter_to_group("simulation", 'total_per_class',
                                  config.TOTAL_SAMPLES)
    traj.f_add_parameter_to_group("simulation", 'kernel_width',
                                  config.KERNEL_W)
    traj.f_add_parameter_to_group("simulation", 'kernel_pad', config.PAD)
    traj.f_add_parameter_to_group("simulation", 'output_size',
                                  config.OUTPUT_SIZE)
    traj.f_add_parameter_to_group("simulation", 'use_gabor',
                                  config.USE_GABOR_LAYER)
    # traj.f_add_parameter_to_group("simulation",
    #                               'expand', config.EXPANSION_RANGE[0])
    # traj.f_add_parameter_to_group("simulation",
    #                               'conn_dist', config.CONN_DIST)
    traj.f_add_parameter_to_group("simulation", 'prob_noise',
                                  config.PROB_NOISE_SAMPLE)
    traj.f_add_parameter_to_group("simulation", 'noisy_spikes_path',
                                  paths.root_dir_path)

    # db_path = '/home/gp283/brainscales-recognition/codebase/images_to_spikes/omniglot/spikes'
    db_path = os.path.abspath('../omniglot_output_%d' % config.INPUT_SHAPE[0])
    traj.f_add_parameter_to_group("simulation", 'spikes_path', db_path)

    # dbs = [ name for name in os.listdir(db_path)
    #       if os.path.isdir(os.path.join(db_path, name)) ]
    # print(dbs)
    dbs = [
        'Mkhedruli_-Georgian-', 'Tagalog',
        'Ojibwe_-Canadian_Aboriginal_Syllabics-', 'Asomtavruli_-Georgian-',
        'Balinese', 'Japanese_-katakana-', 'Malay_-Jawi_-_Arabic-', 'Armenian',
        'Burmese_-Myanmar-', 'Arcadian', 'Futurama', 'Cyrillic',
        'Alphabet_of_the_Magi', 'Sanskrit', 'Braille', 'Bengali',
        'Inuktitut_-Canadian_Aboriginal_Syllabics-', 'Syriac_-Estrangelo-',
        'Gujarati', 'Korean', 'Early_Aramaic', 'Japanese_-hiragana-',
        'Anglo-Saxon_Futhorc', 'N_Ko', 'Grantha', 'Tifinagh',
        'Blackfoot_-Canadian_Aboriginal_Syllabics-', 'Greek', 'Hebrew', 'Latin'
    ]

    # dbs = ['Alphabet_of_the_Magi']
    # dbs = ['Futurama']
    # dbs = ['Latin']
    # dbs = ['Braille']
    # dbs = ['Blackfoot_-Canadian_Aboriginal_Syllabics-', 'Gujarati', 'Syriac_-Estrangelo-']
    dbs = ['Futurama']  #, 'Braille']
    # dbs = ['Cyrillic', 'Futurama', 'Braille']
    if config.DEBUG:
        dbs = ['Braille']
    #dbs = ['Braille']

    traj.f_add_parameter_to_group("simulation", 'database', dbs)

    # Innerloop simulator
    grad_desc = OPTIMIZER == GRADDESC
    optimizee = OmniglotOptimizee(traj, 1234, grad_desc)

    # Prepare optimizee for jube runs
    JUBE_runner.prepare_optimizee(optimizee, paths.root_dir_path)

    _, dict_spec = dict_to_list(optimizee.create_individual(),
                                get_dict_spec=True)
    # step_size = np.asarray(
    #     [config.ATTR_STEPS[k] for (k, spec, length) in dict_spec])
    step_size = tuple(
        [config.ATTR_STEPS[k] for (k, spec, length) in dict_spec])

    fit_weights = [
        1.0,
    ]  # 0.1]

    optimizer_seed = config.SEED
    if OPTIMIZER == GRADDESC:
        n_random_steps = 100
        n_iteration = 1000

        parameters = RMSPropParameters(learning_rate=0.0001,
                                       exploration_step_size=step_size,
                                       n_random_steps=n_random_steps,
                                       momentum_decay=0.5,
                                       n_iteration=n_iteration,
                                       stop_criterion=np.inf,
                                       seed=optimizer_seed)

        optimizer = GradientDescentOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=fit_weights,
            parameters=parameters,
            optimizee_bounding_func=optimizee.bounding_func)

    elif OPTIMIZER == EVOSTRAT:
        parameters = EvolutionStrategiesParameters(
            learning_rate=0.0001,
            noise_std=step_size,
            mirrored_sampling_enabled=True,
            fitness_shaping_enabled=True,
            pop_size=50,  # couples
            n_iteration=1000,
            stop_criterion=np.inf,
            seed=optimizer_seed)

        optimizer = EvolutionStrategiesOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=fit_weights,
            parameters=parameters,
            optimizee_bounding_func=optimizee.bounding_func)
    else:
        num_generations = 1000
        if ON_JEWELS:
            nodes = 12
            gpus_per_node = 4
            population_size = gpus_per_node * nodes
        else:
            population_size = 20
            #population_size = 5
        # population_size = 5
        p_hof = 0.2 if population_size < 50 else 0.1
        p_bob = 0.2
        # last_trajs = load_last_trajs(os.path.join(
        #    paths.output_dir_path, 'per_gen_trajectories'))
        # last_trajs = load_last_trajs(os.path.join(
        #    paths.root_dir_path, 'trajectories'))
        # if len(last_trajs):
        #     traj.individuals = trajectories_to_individuals(
        #         last_trajs, population_size, optimizee)
        attr_steps = [config.ATTR_STEPS[k[0]] for k in dict_spec]
        parameters = GeneticAlgorithmParameters(
            seed=optimizer_seed,
            popsize=population_size,
            CXPB=0.5,  # probability of mating 2 individuals
            # note: moved from 0.8 to 0.6 mutpb to see if it removes bouncing
            MUTPB=0.7,  # probability of individual to mutate
            NGEN=num_generations,
            indpb=0.1,  # probability of "gene" to mutate
            # number of best individuals to mate
            tournsize=population_size,
            matepar=0.5,  # how much to mix two genes when mating
            # standard deviations for normal distribution
            mutpar=attr_steps,
        )

        optimizer = GeneticAlgorithmOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=fit_weights,
            parameters=parameters,
            optimizee_bounding_func=optimizee.bounding_func,
            percent_hall_of_fame=p_hof,
            percent_elite=p_bob,
        )

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 7
0
def main():
    name = 'L2L-FUN-GS'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(trajectory=name, filename=traj_file, file_title='{} data'.format(name),
                      comment='{} data'.format(name),
                      add_time=True,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # Get the trajectory from the environment
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")
    # Execution command
    traj.f_add_parameter_to_group("JUBE_params", "exec", "python " +
                                  os.path.join(paths.simulation_path, "run_files/run_optimizee.py"))
    # Paths
    traj.f_add_parameter_to_group("JUBE_params", "paths", paths)


    ## Benchmark function
    function_id = 4
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 100
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj, benchmark_function, seed=optimizee_seed)

    # Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, paths.simulation_path)

    ## Outerloop optimizer initialization
    n_grid_divs_per_axis = 30
    parameters = GridSearchParameters(param_grid={
        'coords': (optimizee.bound[0], optimizee.bound[1], n_grid_divs_per_axis)
    })
    optimizer = GridSearchOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
                                    optimizee_fitness_weights=(-0.1,),
                                    parameters=parameters)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 8
0
def main():
    name = 'L2L-FUN-CE'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    function_id = 14
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 100
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj,
                                           benchmark_function,
                                           seed=optimizee_seed)

    ## Outerloop optimizer initialization
    parameters = CrossEntropyParameters(
        pop_size=50,
        rho=0.9,
        smoothing=0.0,
        temp_decay=0,
        n_iteration=160,
        distribution=NoisyBayesianGaussianMixture(
            n_components=3,
            noise_magnitude=1.,
            noise_decay=0.9,
            weight_concentration_prior=1.5),
        stop_criterion=np.inf,
        seed=103)
    optimizer = CrossEntropyOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-0.1, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 9
0
def main():
    name = 'L2L-FUN-GA'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    with open("logging.yaml") as f:
        l_dict = yaml.load(f)
        log_output_file = os.path.join(paths.results_path,
                                       l_dict['handlers']['file']['filename'])
        l_dict['handlers']['file']['filename'] = log_output_file
        logging.config.dictConfig(l_dict)

    print("All output can be found in file ", log_output_file)
    print(
        "Change the values in logging.yaml to control log level and destination"
    )
    print(
        "e.g. change the handler to console for the loggers you're interesting in to get output to stdout"
    )

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))

    # Get the trajectory from the environment
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")
    # The execution command
    traj.f_add_parameter_to_group(
        "JUBE_params", "exec", "python " +
        os.path.join(paths.simulation_path, "run_files/run_optimizee.py"))
    # Paths
    traj.f_add_parameter_to_group("JUBE_params", "paths", paths)

    ## Benchmark function
    function_id = 4
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 100
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj,
                                           benchmark_function,
                                           seed=optimizee_seed)

    # Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, paths.simulation_path)

    ## Outerloop optimizer initialization
    parameters = GeneticAlgorithmParameters(seed=0,
                                            popsize=50,
                                            CXPB=0.5,
                                            MUTPB=0.3,
                                            NGEN=100,
                                            indpb=0.02,
                                            tournsize=15,
                                            matepar=0.5,
                                            mutpar=1)

    optimizer = GeneticAlgorithmOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-0.1, ),
        parameters=parameters)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end()

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 10
0
def run_experiment():
    name = 'L2L-FUN-ES'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")

    trajectory_name = 'mirroring-and-fitness-shaping'

    paths = Paths(name,
                  dict(run_num='test'),
                  root_dir_path=root_dir_path,
                  suffix="-" + trajectory_name)

    print("All output logs can be found in directory ", paths.logs_path)

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=trajectory_name,
        filename=paths.output_dir_path,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory
    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")
    traj.f_add_parameter_to_group(
        "JUBE_params", "exec", "python " +
        os.path.join(paths.simulation_path, "run_files/run_optimizee.py"))
    # Paths
    traj.f_add_parameter_to_group("JUBE_params", "paths", paths)

    ## Benchmark function
    function_id = 14
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 200
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj,
                                           benchmark_function,
                                           seed=optimizee_seed)

    # Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, paths.simulation_path)

    ## Outerloop optimizer initialization
    optimizer_seed = 1234
    parameters = EvolutionStrategiesParameters(learning_rate=0.1,
                                               noise_std=1.0,
                                               mirrored_sampling_enabled=True,
                                               fitness_shaping_enabled=True,
                                               pop_size=20,
                                               n_iteration=1000,
                                               stop_criterion=np.Inf,
                                               seed=optimizer_seed)

    optimizer = EvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1., ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()

    return traj.v_storage_service.filename, traj.v_name, paths
Esempio n. 11
0
def main():
    # TODO when using the template: Give some *meaningful* name here
    name = 'L2L'

    # TODO when using the template: make a path.conf file and write the root path there
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    # Load the logging config which tells us where and what to log (loglevel, destination)

    print("All output logs can be found in directory ", paths.logs_path)

    # Create an environment that handles running our simulation
    # This initializes an environment. This environment is based on the Pypet implementation.
    # Uncomment 'freeze_input', 'multipproc', 'use_scoop' and 'wrap_mode' lines to disable running the experiment
    # across cores and nodes.
    env = Environment(trajectory=name, filename=paths.output_dir_path, file_title='{} data'.format(name),
                      comment='{} data'.format(name),
                      add_time=True,
                      freeze_input=False,
                      multiproc=True,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment.
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")
    # Execution command
    traj.f_add_parameter_to_group("JUBE_params", "exec", "python " +
                                  os.path.join(paths.simulation_path, "run_files/run_optimizee.py"))
    # Paths
    traj.f_add_parameter_to_group("JUBE_params", "paths", paths)

    # Scheduler parameters
    # Name of the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm")
    # Command to submit jobs to the schedulers
    traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch")
    # Template file for the particular scheduler
    traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run")
    # Number of nodes to request for each run
    traj.f_add_parameter_to_group("JUBE_params", "nodes", "1")
    # Requested time for the compute resources
    traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:01:00")
    # Threads per process
    traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1")
    # Type of emails to be sent from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL")
    # Email to notify events from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_address", "*****@*****.**")
    # Error file for the job
    traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr")
    # Output file for the job
    traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout")
    # JUBE parameters for multiprocessing.
    # MPI Processes per job
    traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1")
    # MPI Processes per node
    traj.f_add_parameter_to_group("JUBE_params", "ppn", "1")
    # CPU cores per MPI process
    traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1")

    ## Innerloop simulator
    # TODO when using the template: Change the optimizee to the appropriate Optimizee class
    optimizee = Optimizee(traj)

    # Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, paths.simulation_path)

    ## Outerloop optimizer initialization
    # TODO when using the template: Change the optimizer to the appropriate Optimizer class
    # and use the right value for optimizee_fitness_weights. Length is the number of dimensions of fitness, and
    # negative value implies minimization and vice versa
    optimizer_parameters = OptimizerParameters()
    optimizer = Optimizer(traj, optimizee.create_individual, (1.0,), optimizer_parameters)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 12
0
def run_experiment():
    name = 'L2L-MNIST-CE'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError("You have not set the root path to store your results."
                                " Write the path to a path.conf text file in the bin directory"
                                " before running the simulation")

    trajectory_name = 'small-mnist-full-monty'

    paths = Paths(name, dict(run_num='test'), root_dir_path=root_dir_path, suffix="-" + trajectory_name)

    print("All output logs can be found in directory ", paths.logs_path)

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=trajectory_name,
        filename=paths.output_dir_path,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )
    create_shared_logger_data(
        logger_names=['bin', 'optimizers'],
        log_levels=['INFO', 'INFO'],
        log_to_consoles=[True, True],
        sim_name=name,
        log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")

    # Scheduler parameters
    # Name of the scheduler
    # traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm")
    # Command to submit jobs to the schedulers
    traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch")
    # Template file for the particular scheduler
    traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run")
    # Number of nodes to request for each run
    traj.f_add_parameter_to_group("JUBE_params", "nodes", "1")
    # Requested time for the compute resources
    traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:01:00")
    # MPI Processes per node
    traj.f_add_parameter_to_group("JUBE_params", "ppn", "1")
    # CPU cores per MPI process
    traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1")
    # Threads per process
    traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1")
    # Type of emails to be sent from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL")
    # Email to notify events from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_address", "*****@*****.**")
    # Error file for the job
    traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr")
    # Output file for the job
    traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout")
    # JUBE parameters for multiprocessing. Relevant even without scheduler.
    # MPI Processes per job
    traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1")
    # The execution command
    traj.f_add_parameter_to_group("JUBE_params", "exec", "mpirun python3 " + root_dir_path +
                                  "/run_files/run_optimizee.py")
    # Ready file for a generation
    traj.f_add_parameter_to_group("JUBE_params", "ready_file", root_dir_path + "/readyfiles/ready_w_")
    # Path where the job will be executed
    traj.f_add_parameter_to_group("JUBE_params", "work_path", root_dir_path)

    optimizee_seed = 200

    optimizee_parameters = MNISTOptimizeeParameters(n_hidden=10, seed=optimizee_seed, use_small_mnist=True)
    ## Innerloop simulator
    optimizee = MNISTOptimizee(traj, optimizee_parameters)

    # Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, root_dir_path)

    logger.info("Optimizee parameters: %s", optimizee_parameters)

    ## Outerloop optimizer initialization
    optimizer_seed = 1234
    optimizer_parameters = CrossEntropyParameters(pop_size=40, rho=0.9, smoothing=0.0, temp_decay=0, n_iteration=5000,
                                                  distribution=NoisyGaussian(noise_magnitude=1., noise_decay=0.99),
                                                  stop_criterion=np.inf, seed=optimizer_seed)

    logger.info("Optimizer parameters: %s", optimizer_parameters)

    optimizer = CrossEntropyOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
                                      optimizee_fitness_weights=(1.,),
                                      parameters=optimizer_parameters,
                                      optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()

    return traj.v_storage_service.filename, traj.v_name, paths
Esempio n. 13
0
def main():
    name = 'L2L-FunctionGenerator-SA'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        # freeze_input=True,
        # multiproc=True,
        # use_scoop=True,
        # wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")
    # Execution command
    traj.f_add_parameter_to_group(
        "JUBE_params", "exec", "python " +
        os.path.join(paths.simulation_path, "run_files/run_optimizee.py"))
    # Paths
    traj.f_add_parameter_to_group("JUBE_params", "paths", paths)

    ## Benchmark function
    function_id = 14
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 100
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj,
                                           benchmark_function,
                                           seed=optimizee_seed)

    #Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, paths.simulation_path)

    ## Outerloop optimizer initialization
    parameters = SimulatedAnnealingParameters(
        n_parallel_runs=50,
        noisy_step=.03,
        temp_decay=.99,
        n_iteration=100,
        stop_criterion=np.Inf,
        seed=np.random.randint(1e5),
        cooling_schedule=AvailableCoolingSchedules.QUADRATIC_ADDAPTIVE)

    optimizer = SimulatedAnnealingOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 14
0
def run_experiment():
    name = 'L2L-FUN-ES'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")

    trajectory_name = 'mirroring-and-fitness-shaping'

    paths = Paths(name,
                  dict(run_num='test'),
                  root_dir_path=root_dir_path,
                  suffix="-" + trajectory_name)

    print("All output logs can be found in directory ", paths.logs_path)

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=trajectory_name,
        filename=paths.output_dir_path,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory
    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")

    # Scheduler parameters
    # Name of the scheduler
    # traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm")
    # Command to submit jobs to the schedulers
    traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch")
    # Template file for the particular scheduler
    traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run")
    # Number of nodes to request for each run
    traj.f_add_parameter_to_group("JUBE_params", "nodes", "1")
    # Requested time for the compute resources
    traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:01:00")
    # MPI Processes per node
    traj.f_add_parameter_to_group("JUBE_params", "ppn", "1")
    # CPU cores per MPI process
    traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1")
    # Threads per process
    traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1")
    # Type of emails to be sent from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL")
    # Email to notify events from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_address",
                                  "*****@*****.**")
    # Error file for the job
    traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr")
    # Output file for the job
    traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout")
    # JUBE parameters for multiprocessing. Relevant even without scheduler.
    # MPI Processes per job
    traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1")
    # The execution command
    traj.f_add_parameter_to_group(
        "JUBE_params", "exec", "python " +
        os.path.join(paths.root_dir_path, "run_files/run_optimizee.py"))
    # Ready file for a generation
    traj.f_add_parameter_to_group(
        "JUBE_params", "ready_file",
        os.path.join(paths.root_dir_path, "ready_files/ready_w_"))
    # Path where the job will be executed
    traj.f_add_parameter_to_group("JUBE_params", "work_path",
                                  paths.root_dir_path)

    ### Maybe we should pass the Paths object to avoid defining paths here and there
    traj.f_add_parameter_to_group("JUBE_params", "paths_obj", paths)

    ## Benchmark function
    function_id = 14
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 200
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj,
                                           benchmark_function,
                                           seed=optimizee_seed)

    # Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, paths.root_dir_path)

    ## Outerloop optimizer initialization
    optimizer_seed = 1234
    parameters = EvolutionStrategiesParameters(learning_rate=0.1,
                                               noise_std=1.0,
                                               mirrored_sampling_enabled=True,
                                               fitness_shaping_enabled=True,
                                               pop_size=20,
                                               n_iteration=1000,
                                               stop_criterion=np.Inf,
                                               seed=optimizer_seed)

    optimizer = EvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1., ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()

    return traj.v_storage_service.filename, traj.v_name, paths
Esempio n. 15
0
def main():
    name = 'L2L-FUN-GD'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )

    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")

    # Scheduler parameters
    # Name of the scheduler
    # traj.f_add_parameter_to_group("JUBE_params", "scheduler", "Slurm")
    # Command to submit jobs to the schedulers
    traj.f_add_parameter_to_group("JUBE_params", "submit_cmd", "sbatch")
    # Template file for the particular scheduler
    traj.f_add_parameter_to_group("JUBE_params", "job_file", "job.run")
    # Number of nodes to request for each run
    traj.f_add_parameter_to_group("JUBE_params", "nodes", "1")
    # Requested time for the compute resources
    traj.f_add_parameter_to_group("JUBE_params", "walltime", "00:01:00")
    # MPI Processes per node
    traj.f_add_parameter_to_group("JUBE_params", "ppn", "1")
    # CPU cores per MPI process
    traj.f_add_parameter_to_group("JUBE_params", "cpu_pp", "1")
    # Threads per process
    traj.f_add_parameter_to_group("JUBE_params", "threads_pp", "1")
    # Type of emails to be sent from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_mode", "ALL")
    # Email to notify events from the scheduler
    traj.f_add_parameter_to_group("JUBE_params", "mail_address",
                                  "*****@*****.**")
    # Error file for the job
    traj.f_add_parameter_to_group("JUBE_params", "err_file", "stderr")
    # Output file for the job
    traj.f_add_parameter_to_group("JUBE_params", "out_file", "stdout")
    # JUBE parameters for multiprocessing. Relevant even without scheduler.
    # MPI Processes per job
    traj.f_add_parameter_to_group("JUBE_params", "tasks_per_job", "1")
    # The execution command
    traj.f_add_parameter_to_group(
        "JUBE_params", "exec",
        "mpirun python3 " + root_dir_path + "/run_files/run_optimizee.py")
    # Ready file for a generation
    traj.f_add_parameter_to_group("JUBE_params", "ready_file",
                                  root_dir_path + "/readyfiles/ready_w_")
    # Path where the job will be executed
    traj.f_add_parameter_to_group("JUBE_params", "work_path", root_dir_path)

    ## Benchmark function
    function_id = 4
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 100
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj,
                                           benchmark_function,
                                           seed=optimizee_seed)

    #Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, root_dir_path)
    ##
    ## Outerloop optimizer initialization
    # parameters = ClassicGDParameters(learning_rate=0.01, exploration_step_size=0.01,
    #                                  n_random_steps=5, n_iteration=100,
    #                                  stop_criterion=np.Inf)
    # parameters = AdamParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=5, first_order_decay=0.8,
    #                             second_order_decay=0.8, n_iteration=100, stop_criterion=np.Inf)
    # parameters = StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99,
    #                                     exploration_step_size=0.01, n_random_steps=5, n_iteration=100,
    #                                     stop_criterion=np.Inf)
    parameters = RMSPropParameters(learning_rate=0.01,
                                   exploration_step_size=0.01,
                                   n_random_steps=5,
                                   momentum_decay=0.5,
                                   n_iteration=100,
                                   stop_criterion=np.Inf,
                                   seed=99)

    optimizer = GradientDescentOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(0.1, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 16
0
def main():
    name = 'L2L-FUNALL'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    n_iterations = 100

    # NOTE: Need to use lambdas here since we want the distributions within CE, FACE etc. optimizers to be reinitialized
    #  afresh each time since it seems like they are stateful.
    optimizers = [
        (CrossEntropyOptimizer,
         lambda: CrossEntropyParameters(pop_size=50, rho=0.2, smoothing=0.0, temp_decay=0,
                                        n_iteration=n_iterations,
                                        distribution=NoisyGaussian(noise_decay=0.95, noise_bias=0.05))),
        (FACEOptimizer,
         lambda: FACEParameters(min_pop_size=20, max_pop_size=50, n_elite=10, smoothing=0.2, temp_decay=0,
                                n_iteration=n_iterations, distribution=Gaussian(), n_expand=5)),
        (GradientDescentOptimizer,
         lambda: RMSPropParameters(learning_rate=0.01, exploration_rate=0.01, n_random_steps=5, momentum_decay=0.5,
                                   n_iteration=n_iterations, stop_criterion=np.Inf)),
        (GradientDescentOptimizer,
         lambda: ClassicGDParameters(learning_rate=0.01, exploration_rate=0.01, n_random_steps=5,
                                     n_iteration=n_iterations, stop_criterion=np.Inf)),
        (GradientDescentOptimizer,
         lambda: AdamParameters(learning_rate=0.01, exploration_rate=0.01, n_random_steps=5, first_order_decay=0.8,
                                second_order_decay=0.8, n_iteration=n_iterations, stop_criterion=np.Inf)),
        (GradientDescentOptimizer,
         lambda: StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99,
                                        exploration_rate=0.01, n_random_steps=5, n_iteration=n_iterations,
                                        stop_criterion=np.Inf))
    ]

    # NOTE: Benchmark functions
    bench_functs = BenchmarkedFunctions()
    function_ids = range(len(bench_functs.function_name_map))

    for function_id, (optimizer_class, optimizer_parameters_fn) in itertools.product(function_ids, optimizers):
        logger.info("Running benchmark for %s optimizer and function id %d", optimizer_class, function_id)
        optimizer_parameters = optimizer_parameters_fn()

        # Create an environment that handles running our simulation
        # This initializes an environment
        env = Environment(trajectory=name, filename=traj_file, file_title='{} data'.format(name),
                          comment='{} data'.format(name),
                          # freeze_input=True,
                          # multiproc=True,
                          # use_scoop=True,
                          # wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
                          add_time=True,
                          automatic_storing=True,
                          log_stdout=False,  # Sends stdout to logs
                          )
        create_shared_logger_data(logger_names=['bin', 'optimizers'],
                                  log_levels=['INFO', 'INFO'],
                                  log_to_consoles=[True, True],
                                  sim_name=name,
                                  log_directory=paths.logs_path)
        configure_loggers()

        # Get the trajectory from the environment
        traj = env.trajectory

        (benchmark_name, benchmark_function), benchmark_parameters = \
            bench_functs.get_function_by_index(function_id, noise=True)

        optimizee = FunctionGeneratorOptimizee(traj, benchmark_function)

        optimizee_fitness_weights = -1.
        # Gradient descent does descent!
        if optimizer_class == GradientDescentOptimizer:
            optimizee_fitness_weights = +1.
        # Grid search optimizer input depends on optimizee!
        elif optimizer_class == GridSearchOptimizer:
            optimizer_parameters = GridSearchParameters(param_grid={
                'coords': (optimizee.bound[0], optimizee.bound[1], 30)
            })

        optimizer = optimizer_class(traj, optimizee_create_individual=optimizee.create_individual,
                                    optimizee_fitness_weights=(optimizee_fitness_weights,),
                                    parameters=optimizer_parameters,
                                    optimizee_bounding_func=optimizee.bounding_func)

        # Add post processing
        env.add_postprocessing(optimizer.post_process)

        # Run the simulation with all parameter combinations
        env.run(optimizee.simulate)

        # NOTE: Outerloop optimizer end
        optimizer.end(traj)

        # Finally disable logging and close all log-files
        env.disable_logging()
Esempio n. 17
0
def main():
    name = 'L2L-FUN-GD'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )

    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")
    # Execution command
    traj.f_add_parameter_to_group(
        "JUBE_params", "exec", "python " +
        os.path.join(paths.simulation_path, "run_files/run_optimizee.py"))
    # Paths
    traj.f_add_parameter_to_group("JUBE_params", "paths", paths)

    ## Benchmark function
    function_id = 4
    bench_functs = BenchmarkedFunctions()
    (benchmark_name, benchmark_function), benchmark_parameters = \
        bench_functs.get_function_by_index(function_id, noise=True)

    optimizee_seed = 100
    random_state = np.random.RandomState(seed=optimizee_seed)
    function_tools.plot(benchmark_function, random_state)

    ## Innerloop simulator
    optimizee = FunctionGeneratorOptimizee(traj,
                                           benchmark_function,
                                           seed=optimizee_seed)

    #Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, paths.simulation_path)
    ##
    ## Outerloop optimizer initialization
    # parameters = ClassicGDParameters(learning_rate=0.01, exploration_step_size=0.01,
    #                                  n_random_steps=5, n_iteration=100,
    #                                  stop_criterion=np.Inf)
    # parameters = AdamParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=5, first_order_decay=0.8,
    #                             second_order_decay=0.8, n_iteration=100, stop_criterion=np.Inf)
    # parameters = StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99,
    #                                     exploration_step_size=0.01, n_random_steps=5, n_iteration=100,
    #                                     stop_criterion=np.Inf)
    parameters = RMSPropParameters(learning_rate=0.01,
                                   exploration_step_size=0.01,
                                   n_random_steps=5,
                                   momentum_decay=0.5,
                                   n_iteration=100,
                                   stop_criterion=np.Inf,
                                   seed=99)

    optimizer = GradientDescentOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(0.1, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
Esempio n. 18
0
def run_experiment():
    name = 'L2L-MNIST-CE'
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")

    trajectory_name = 'small-mnist-full-monty'

    paths = Paths(name,
                  dict(run_num='test'),
                  root_dir_path=root_dir_path,
                  suffix="-" + trajectory_name)

    print("All output logs can be found in directory ", paths.logs_path)

    # Create an environment that handles running our simulation
    # This initializes an environment
    env = Environment(
        trajectory=trajectory_name,
        filename=paths.output_dir_path,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
    )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # Set JUBE params
    traj.f_add_parameter_group("JUBE_params", "Contains JUBE parameters")
    # Execution command
    traj.f_add_parameter_to_group(
        "JUBE_params", "exec", "python " +
        os.path.join(paths.simulation_path, "run_files/run_optimizee.py"))
    # Paths
    traj.f_add_parameter_to_group("JUBE_params", "paths", paths)

    optimizee_seed = 200

    optimizee_parameters = MNISTOptimizeeParameters(n_hidden=10,
                                                    seed=optimizee_seed,
                                                    use_small_mnist=True)
    ## Innerloop simulator
    optimizee = MNISTOptimizee(traj, optimizee_parameters)

    # Prepare optimizee for jube runs
    jube.prepare_optimizee(optimizee, paths.simulation_path)

    logger.info("Optimizee parameters: %s", optimizee_parameters)

    ## Outerloop optimizer initialization
    optimizer_seed = 1234
    optimizer_parameters = CrossEntropyParameters(pop_size=40,
                                                  rho=0.9,
                                                  smoothing=0.0,
                                                  temp_decay=0,
                                                  n_iteration=5000,
                                                  distribution=NoisyGaussian(
                                                      noise_magnitude=1.,
                                                      noise_decay=0.99),
                                                  stop_criterion=np.inf,
                                                  seed=optimizer_seed)

    logger.info("Optimizer parameters: %s", optimizer_parameters)

    optimizer = CrossEntropyOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(1., ),
        parameters=optimizer_parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()

    return traj.v_storage_service.filename, traj.v_name, paths