示例#1
0
def main():

    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(
        trajectory='FiringRate',
        comment='Experiment to measure the firing rate '
        'of a leaky integrate and fire neuron. '
        'Exploring different input currents, '
        'as well as refractory periods',
        add_time=False,  # We don't want to add the current time to the name,
        log_stdout=True,
        log_config='DEFAULT',
        multiproc=True,
        ncores=4,  #I think my laptop has 4 cores
        git_repository='/home/pinolej/th',
        wrap_mode='QUEUE',
        filename=filename,
        overwrite_file=True)

    traj = env.trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.add_postprocessing(neuron_postproc)

    # Run the experiment
    env.run(run_neuron)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#2
0
文件: main.py 项目: MehmetTimur/pypet
def main():

    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(trajectory='FiringRate',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_stdout=True,
                      log_config='DEFAULT',
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    traj = env.trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.add_postprocessing(neuron_postproc)

    # Run the experiment
    env.run(run_neuron)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#3
0
文件: main.py 项目: fontaine618/NAIVI
def main():
    # pypet environment
    env = Environment(trajectory=SIM_NAME,
                      comment="Experiment on density with binary covariates",
                      log_config=None,
                      multiproc=False,
                      ncores=1,
                      filename=SIM_PATH + "/results/",
                      overwrite_file=True)
    traj = env.trajectory

    # parameters (data generation)

    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_adj", np.float64(1.),
                         "True variance in the link Probit model")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")
    traj.f_add_parameter("model.adj_model", "Logistic", "Adjacency model")
    traj.f_add_parameter("model.bin_model", "Logistic",
                         "Binary covariate model")

    # parameters (fit)
    traj.f_add_parameter("fit.n_iter", np.int64(20),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_vmp", np.int64(5),
                         "Number of VMP iterations per E-step")
    traj.f_add_parameter("fit.n_gd", np.int64(5),
                         "Number of GD iterations per M-step")
    traj.f_add_parameter("fit.step_size", np.float64(0.01), "GD Step size")

    # experiment
    explore_dict = {
        "data.alpha_mean":
        np.array([-3.2, -2.8, -2.4, -2., -1.6, -1.2, -0.8, -0.4, 0.0, 0.4]),
        "data.p_bin":
        np.array([10, 100, 500]),
        "data.seed":
        np.arange(0, 100, 1)
    }
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)

    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
示例#4
0
文件: main.py 项目: fontaine618/NAIVI
def main(path, name, explore_dict):
    comment = "\n".join(
        ["{}: {}".format(k, v) for k, v in explore_dict.items()])
    # pypet environment
    env = Environment(trajectory=name,
                      comment=comment,
                      log_config=None,
                      multiproc=False,
                      ncores=1,
                      filename=path + name + "/results/",
                      overwrite_file=True)
    traj = env.trajectory
    traj.f_add_parameter("path", path + name, "Path")

    # parameters (data generation)
    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.center", np.int64(1), "Ego-network center")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")

    # parameters (fit)
    traj.f_add_parameter("fit.algo", "MLE", "Inference algorithm")
    traj.f_add_parameter("fit.max_iter", np.int64(500),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_sample", np.int64(1),
                         "Number of samples for VIMC")
    traj.f_add_parameter("fit.eps", np.float64(1.0e-6),
                         "convergence threshold")
    traj.f_add_parameter("fit.lr", np.float64(0.01), "GD Step size")

    # experiment
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)
    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
示例#5
0
def main(dependent, optimizer):
    opt = optimizer.upper()
    identifier = '{:05x}'.format(np.random.randint(16**5))
    print('Identifier: ' + identifier)
    allocated_id = '07'  # dls.get_allocated_board_ids()[0]
    board_calibration_map = {
        'B291698': {
            'dac': 'dac_default.json',
            'cap': 'cap_mem_29.json'
        },
        '07': {
            'dac': 'dac_07_chip_20.json',
            'cap': 'calibration_20.json'
        },
        'B201319': {
            'dac': 'dac_B201319_chip_21.json',
            'cap': 'calibration_24.json'
        },
        'B201330': {
            'dac': 'dac_B201330_chip_22.json',
            'cap': 'calibration_22.json'
        }
    }

    dep_name = 'DEP' if dependent else 'IND'
    name = 'MAB_ANN_{}_{}_{}'.format(identifier, opt, dep_name)
    root_dir_path = os.path.expanduser('~/simulations')
    paths = Paths(name, dict(run_no=u'test'), root_dir_path=root_dir_path)

    with open(os.path.expanduser('~/LTL/bin/logging.yaml')) as f:
        l_dict = yaml.load(f)
        log_output_file = os.path.join(paths.results_path,
                                       l_dict['handlers']['file']['filename'])
        l_dict['handlers']['file']['filename'] = log_output_file
        logging.config.dictConfig(l_dict)

    print("All output logs can be found in directory " + str(paths.logs_path))

    traj_file = os.path.join(paths.output_dir_path, u'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        # freeze_input=True,
        # multiproc=True,
        # use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCK,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers', 'optimizees'],
                              log_levels=['INFO', 'INFO', 'INFO'],
                              log_to_consoles=[True, True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee_seed = 100

    with open('../adv/' + board_calibration_map[allocated_id]['cap']) as f:
        calibrated_config = json.load(f)
    with open('../adv/' + board_calibration_map[allocated_id]['dac']) as f:
        dac_config = json.load(f)

    class Dummy(object):
        def __init__(self, connector):
            self.connector = connector

        def __enter__(self):
            return self.connector

        def __exit__(self, exc_type, exc_val, exc_tb):
            pass

    class Mgr(object):
        def __init__(self):
            self.connector = None

        def establish(self):
            return Dummy(self.connector)

    max_learning_rate = 1.

    mgr = Mgr()
    optimizee_parameters = \
        BanditParameters(n_arms=2, n_pulls=100, n_samples=40, seed=optimizee_seed,
                         max_learning_rate=max_learning_rate, learning_rule=ANNLearningRule,
                         establish_connection=mgr.establish)
    optimizee = BanditOptimizee(traj, optimizee_parameters, dp=dependent)

    # Add post processing
    optimizer = None
    pop_size = 200
    n_iteration = 60
    if opt == 'CE':
        ce_optimizer_parameters = CrossEntropyParameters(
            pop_size=pop_size,
            rho=0.06,
            smoothing=0.3,
            temp_decay=0,
            n_iteration=n_iteration,
            distribution=NoisyGaussian(noise_magnitude=.2, noise_decay=.925),
            #Gaussian(),#NoisyGaussian(noise_magnitude=1., noise_decay=0.99),
            stop_criterion=np.inf,
            seed=102)
        ce_optimizer = CrossEntropyOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=(1, ),
            parameters=ce_optimizer_parameters,
            optimizee_bounding_func=optimizee.bounding_func)
        optimizer = ce_optimizer
    elif opt == 'ES':
        es_optimizer_parameters = EvolutionStrategiesParameters(
            learning_rate=1.8,
            learning_rate_decay=.93,
            noise_std=.03,
            mirrored_sampling_enabled=True,
            fitness_shaping_enabled=True,
            pop_size=int(pop_size / 2),
            n_iteration=n_iteration,
            stop_criterion=np.inf,
            seed=102)
        optimizer = EvolutionStrategiesOptimizer(traj,
                                                 optimizee.create_individual,
                                                 (1, ),
                                                 es_optimizer_parameters,
                                                 optimizee.bounding_func)
    elif opt == 'GD':
        gd_parameters = ClassicGDParameters(learning_rate=.003,
                                            exploration_step_size=.1,
                                            n_random_steps=pop_size,
                                            n_iteration=n_iteration,
                                            stop_criterion=np.inf,
                                            seed=102)
        optimizer = GradientDescentOptimizer(traj, optimizee.create_individual,
                                             (1, ), gd_parameters,
                                             optimizee.bounding_func)
    elif opt == 'SA':
        sa_parameters = SimulatedAnnealingParameters(
            n_parallel_runs=pop_size,
            noisy_step=.1,
            temp_decay=.9,
            n_iteration=n_iteration,
            stop_criterion=np.inf,
            seed=102,
            cooling_schedule=AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE)
        optimizer = SimulatedAnnealingOptimizer(traj,
                                                optimizee.create_individual,
                                                (1, ), sa_parameters,
                                                optimizee.bounding_func)
    elif opt == 'GS':
        n_grid_points = 5
        gs_optimizer_parameters = GridSearchParameters(
            param_grid={
                'weight_prior': (0, 1, n_grid_points),
                'learning_rate': (0, 1, n_grid_points),
                'stim_inhibition': (0, 1, n_grid_points),
                'action_inhibition': (0, 1, n_grid_points),
                'learning_rate_decay': (0, 1, n_grid_points)
            })
        gs_optimizer = GridSearchOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=(1, ),
            parameters=gs_optimizer_parameters)
        optimizer = gs_optimizer
    else:
        exit(1)
    env.add_postprocessing(optimizer.post_process)

    # Add Recorder
    recorder = Recorder(trajectory=traj,
                        optimizee_name='MAB',
                        optimizee_parameters=optimizee_parameters,
                        optimizer_name=optimizer.__class__.__name__,
                        optimizer_parameters=optimizer.get_params())
    recorder.start()

    # Run the simulation with all parameter combinations
    # optimizee.simulate(traj)
    # exit(0)
    with Connector(calibrated_config, dac_config, 3) as connector:
        mgr.connector = connector
        env.run(optimizee.simulate)
    mgr.connector.disconnect()

    ## Outerloop optimizer end
    optimizer.end(traj)
    recorder.end()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#6
0
def main(path_name, resolution, fixed_delay, use_pecevski, num_trials):
    name = path_name
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        use_scoop=True,
        multiproc=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        log_stdout=False,  # Sends stdout to logs
    )

    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = SAMOptimizee(traj,
                             use_pecevski=use_pecevski,
                             n_NEST_threads=1,
                             time_resolution=resolution,
                             fixed_delay=fixed_delay,
                             plots_directory=paths.output_dir_path,
                             num_fitness_trials=num_trials)

    # NOTE: Outerloop optimizer initialization
    parameters = GeneticAlgorithmParameters(seed=0,
                                            popsize=200,
                                            CXPB=0.5,
                                            MUTPB=1.0,
                                            NGEN=20,
                                            indpb=0.01,
                                            tournsize=20,
                                            matepar=0.5,
                                            mutpar=1.0,
                                            remutate=False)

    optimizer = GeneticAlgorithmOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-0.1, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func,
        optimizee_parameter_spec=optimizee.parameter_spec,
        fitness_plot_name=path_name)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    name = 'LTL-MDP-GD_6_8_TD1'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
                      comment=u'{} data'.format(name),
                      add_time=True,
                      # freeze_input=True,
                      # multiproc=True,
                      # use_scoop=True,
                      wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee = DLSMDPOptimizee(traj)

    ## Outerloop optimizer initialization
    parameters = ClassicGDParameters(learning_rate=0.001, exploration_step_size=0.001,
                                     n_random_steps=50, n_iteration=30,
                                     stop_criterion=np.Inf, seed=1234)
    #parameters = AdamParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=15, first_order_decay=0.8,
    #                             second_order_decay=0.8, n_iteration=83, stop_criterion=np.Inf, seed=99)
    # parameters = StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99,
    #                                     exploration_step_size=0.01, n_random_steps=5, n_iteration=100,
    #                                     stop_criterion=np.Inf)
    #parameters = RMSPropParameters(learning_rate=0.01, exploration_step_size=0.01,
    #                               n_random_steps=5, momentum_decay=0.5,
    #                               n_iteration=100, stop_criterion=np.Inf, seed=99)

    optimizer = GradientDescentOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
                                         optimizee_fitness_weights=(-1.,),
                                         parameters=parameters,
                                         optimizee_bounding_func=optimizee.bounding_func,
                                         base_point_evaluations=10)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    name = 'LTL-MDP-SA_6_8_TD1'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
                      comment=u'{} data'.format(name),
                      add_time=True,
                      # freeze_input=True,
                      # multiproc=True,
                      # use_scoop=True,
                      wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee = DLSMDPOptimizee(traj)

    # NOTE: Outerloop optimizer initialization

    parameters = SimulatedAnnealingParameters(n_parallel_runs=50, noisy_step=.03, temp_decay=.99, n_iteration=30,
                                              stop_criterion=np.Inf, seed=np.random.randint(1e5), cooling_schedule=AvailableCoolingSchedules.QUADRATIC_ADDAPTIVE)

    optimizer = SimulatedAnnealingOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
                                            optimizee_fitness_weights=(-1.,),
                                            parameters=parameters,
                                            optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#9
0
def _batch_run(dir='unnamed',
               batch_id='template',
               space=None,
               save_data_in_hdf5=False,
               single_method=single_run,
               process_method=null_processing,
               post_process_method=None,
               final_process_method=None,
               multiprocessing=True,
               resumable=True,
               overwrite=False,
               sim_config=None,
               params=None,
               optimization=None,
               post_kwargs={},
               run_kwargs={}):
    saved_args = locals()
    traj_name = f'{batch_id}_traj'
    parent_dir_path = f'{paths.BatchRunFolder}/{dir}'
    dir_path = os.path.join(parent_dir_path, batch_id)

    filename = f'{dir_path}/{batch_id}.hdf5'
    build_new = True
    if os.path.exists(parent_dir_path) and os.path.exists(
            dir_path) and overwrite == False:
        build_new = False
        try:
            # print('Trying to resume existing trajectory')
            env = Environment(continuable=True)
            env.resume(trajectory_name=traj_name, resume_folder=dir_path)
            print('Resumed existing trajectory')
            build_new = False
        except:
            try:
                # print('Trying to load existing trajectory')
                traj = load_trajectory(filename=filename,
                                       name=traj_name,
                                       load_all=0)
                env = Environment(trajectory=traj, multiproc=True, ncores=4)

                traj = config_traj(traj, optimization)

                traj.f_load(index=None, load_parameters=2, load_results=0)
                traj.f_expand(space)
                print('Loaded existing trajectory')
                build_new = False
            except:
                print(
                    'Neither of resuming or expanding of existing trajectory worked'
                )

    if build_new:
        if multiprocessing:
            multiproc = True
            resumable = False
            wrap_mode = pypetconstants.WRAP_MODE_QUEUE
        else:
            multiproc = False
            resumable = True
            wrap_mode = pypetconstants.WRAP_MODE_LOCK
        # print('Trying to create novel environment')
        env = Environment(
            trajectory=traj_name,
            filename=filename,
            file_title=batch_id,
            comment=f'{batch_id} batch run!',
            large_overview_tables=True,
            overwrite_file=True,
            resumable=False,
            resume_folder=dir_path,
            multiproc=multiproc,
            ncores=4,
            use_pool=
            True,  # Our runs are inexpensive we can get rid of overhead by using a pool
            freeze_input=
            True,  # We can avoid some overhead by freezing the input to the pool
            wrap_mode=wrap_mode,
            graceful_exit=True)
        print('Created novel environment')
        traj = prepare_traj(env.traj, sim_config, params, batch_id,
                            parent_dir_path, dir_path)
        traj = config_traj(traj, optimization)
        traj.f_explore(space)

    if post_process_method is not None:
        env.add_postprocessing(post_process_method, **post_kwargs)
    env.run(single_method,
            process_method,
            save_data_in_hdf5=save_data_in_hdf5,
            save_to=dir_path,
            **run_kwargs)
    env.disable_logging()
    print('Batch run complete')
    if final_process_method is not None:
        results = final_process_method(env.traj)
        # print(results)
        return results
示例#10
0
def main():
    name = 'LTL-MDP-ES_6_8_TD1'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )

    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')
    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
                      comment=u'{} data'.format(name),
                      add_time=True,
                      # freeze_input=True,
                      # multiproc=True,
                      # use_scoop=True,
                      wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    ## Benchmark function

    optimizee = DLSMDPOptimizee(traj)

    ## Innerloop simulator

    ## Outerloop optimizer initialization
    optimizer_seed = 1234
    parameters = EvolutionStrategiesParameters(
        learning_rate=0.5,
        learning_rate_decay=0.95,
        noise_std=0.1,
        mirrored_sampling_enabled=True,
        fitness_shaping_enabled=True,
        pop_size=25,
        n_iteration=30,
        stop_criterion=np.Inf,
        seed=optimizer_seed)

    optimizer = EvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.,),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():

    env = Environment(trajectory='postproc_deap',
                      overwrite_file=True,
                      log_stdout=False,
                      log_level=50,  # only display ERRORS
                      automatic_storing=True,  # Since we us post-processing, we
                      # can safely enable automatic storing, because everything will
                      # only be stored once at the very end of all runs.
                      comment='Using pypet and DEAP with less overhead'
                      )
    traj = env.traj


    # ------- Add parameters ------- #
    traj.f_add_parameter('popsize', 100, comment='Population size')
    traj.f_add_parameter('CXPB', 0.5, comment='Crossover term')
    traj.f_add_parameter('MUTPB', 0.2, comment='Mutation probability')
    traj.f_add_parameter('NGEN', 20, comment='Number of generations')

    traj.f_add_parameter('generation', 0, comment='Current generation')
    traj.f_add_parameter('ind_idx', 0, comment='Index of individual')
    traj.f_add_parameter('ind_len', 50, comment='Length of individual')

    traj.f_add_parameter('indpb', 0.005, comment='Mutation parameter')
    traj.f_add_parameter('tournsize', 3, comment='Selection parameter')

    traj.f_add_parameter('seed', 42, comment='Seed for RNG')


    # Placeholders for individuals and results that are about to be explored
    traj.f_add_derived_parameter('individual', [0 for x in range(traj.ind_len)],
                                 'An indivudal of the population')
    traj.f_add_result('fitnesses', [], comment='Fitnesses of all individuals')


    # ------- Create and register functions with DEAP ------- #
    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    toolbox = base.Toolbox()
    # Attribute generator
    toolbox.register("attr_bool", random.randint, 0, 1)
    # Structure initializers
    toolbox.register("individual", tools.initRepeat, creator.Individual,
        toolbox.attr_bool, traj.ind_len)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Operator registering
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=traj.indpb)
    toolbox.register("select", tools.selTournament, tournsize=traj.tournsize)


    # ------- Initialize Population and Trajectory -------- #
    random.seed(traj.seed)
    pop = toolbox.population(n=traj.popsize)

    eval_pop = [ind for ind in pop if not ind.fitness.valid]
    traj.f_explore(cartesian_product({'generation': [0],
                                     'ind_idx': range(len(eval_pop)),
                                     'individual':[list(x) for x in eval_pop]},
                                        [('ind_idx', 'individual'),'generation']))

    # ----------- Add postprocessing ------------------ #
    postproc = Postprocessing(pop, eval_pop, toolbox)  # Add links to important structures
    env.add_postprocessing(postproc)

    # ------------ Run applying post-processing ---------- #
    env.run(eval_one_max)

    # ------------ Finished all runs and print result --------------- #
    print("-- End of (successful) evolution --")
    best_ind = tools.selBest(pop, 1)[0]
    print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
示例#12
0
def main():
    name = 'LTL-MDP-FACE'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        # freeze_input=True,
        # multiproc=True,
        # use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCK,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee = DLSMDPOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    # TODO: Change the optimizer to the appropriate Optimizer class
    parameters = FACEParameters(min_pop_size=25,
                                max_pop_size=25,
                                n_elite=10,
                                smoothing=0.2,
                                temp_decay=0,
                                n_iteration=100,
                                distribution=Gaussian(),
                                n_expand=5,
                                stop_criterion=np.inf,
                                seed=109)
    optimizer = FACEOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#13
0
def _batch_run(dir='unnamed',
               batch_id='template',
               space=None,
               save_data_in_hdf5=False,
               single_method=single_run,
               process_method=null_processing,
               post_process_method=None,
               final_process_method=None,
               multiprocessing=True,
               resumable=True,
               overwrite=False,
               sim_config=None,
               params=None,
               config=None,
               post_kwargs={},
               run_kwargs={}):
    saved_args = locals()
    # print(locals())
    traj_name = f'{batch_id}_traj'
    parent_dir_path = f'{BatchRunFolder}/{dir}'
    dir_path = os.path.join(parent_dir_path, batch_id)
    plot_path = os.path.join(dir_path, f'{batch_id}.pdf')
    data_path = os.path.join(dir_path, f'{batch_id}.csv')
    filename = f'{dir_path}/{batch_id}.hdf5'
    build_new = True
    if os.path.exists(parent_dir_path) and os.path.exists(
            dir_path) and overwrite == False:
        build_new = False
        try:
            print('Trying to resume existing trajectory')
            env = Environment(continuable=True)
            env.resume(trajectory_name=traj_name, resume_folder=dir_path)
            print('Resumed existing trajectory')
            build_new = False
        except:
            try:
                print('Trying to load existing trajectory')
                traj = load_trajectory(filename=filename,
                                       name=traj_name,
                                       load_all=0)
                env = Environment(trajectory=traj)
                traj.f_load(index=None, load_parameters=2, load_results=0)
                traj.f_expand(space)
                print('Loaded existing trajectory')
                build_new = False
            except:
                print(
                    'Neither of resuming or expanding of existing trajectory worked'
                )

    if build_new:
        if multiprocessing:
            multiproc = True
            resumable = False
            wrap_mode = pypetconstants.WRAP_MODE_QUEUE
        else:
            multiproc = False
            resumable = True
            wrap_mode = pypetconstants.WRAP_MODE_LOCK
        # try:
        print('Trying to create novel environment')
        env = Environment(
            trajectory=traj_name,
            filename=filename,
            file_title=batch_id,
            comment=f'{batch_id} batch run!',
            large_overview_tables=True,
            overwrite_file=True,
            resumable=False,
            resume_folder=dir_path,
            multiproc=multiproc,
            ncores=4,
            use_pool=
            True,  # Our runs are inexpensive we can get rid of overhead by using a pool
            freeze_input=
            True,  # We can avoid some overhead by freezing the input to the pool
            wrap_mode=wrap_mode,
            graceful_exit=True)
        traj = env.traj
        print('Created novel environment')
        fly_params, env_params, sim_params = sim_config[
            'fly_params'], sim_config['env_params'], sim_config['sim_params']
        if all(v is not None for v in [sim_params, env_params, fly_params]):
            traj = load_default_configuration(traj,
                                              sim_params=sim_params,
                                              env_params=env_params,
                                              fly_params=fly_params)
        elif params is not None:
            for p in params:
                traj.f_apar(p, 0.0)
        if config is not None:
            for k, v in config.items():
                traj.f_aconf(k, v)
        traj.f_aconf('parent_dir_path',
                     parent_dir_path,
                     comment='The parent directory')
        traj.f_aconf('dir_path',
                     dir_path,
                     comment='The directory path for saving data')
        traj.f_aconf('plot_path',
                     plot_path,
                     comment='The file path for saving plot')
        traj.f_aconf('data_path',
                     data_path,
                     comment='The file path for saving data')
        traj.f_aconf('dataset_path',
                     f'{dir_path}/{batch_id}',
                     comment='The directory path for saving datasets')
        traj.f_explore(space)
        # except:
        #     raise ValueError(f'Failed to perform batch run {batch_id}')

    if post_process_method is not None:
        env.add_postprocessing(post_process_method, **post_kwargs)
    env.run(single_method,
            process_method,
            save_data_in_hdf5=save_data_in_hdf5,
            save_to=dir_path,
            common_folder=batch_id,
            **run_kwargs)
    env.disable_logging()
    print('Batch run complete')
    if final_process_method is not None:
        return final_process_method(env.traj)
示例#14
0
def main():
    # pypet environment
    env = Environment(
        trajectory="missing_rate",
        comment="Test experiment with varying missing rate",
        log_config=None,
        multiproc=False,
        ncores=1,
        # use_pool=True,
        # freeze_input=True,
        # wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
        # graceful_exit=True,
        filename="./simulations/results/test/",
        overwrite_file=True
    )
    traj = env.trajectory

    # parameters (data generation)

    traj.f_add_parameter(
        "data.N", np.int64(500), "Number of nodes"
    )
    traj.f_add_parameter(
        "data.K", np.int64(5), "True number of latent components"
    )
    traj.f_add_parameter(
        "data.p_cts", np.int64(10), "Number of continuous covariates"
    )
    traj.f_add_parameter(
        "data.p_bin", np.int64(0), "Number of binary covariates"
    )
    traj.f_add_parameter(
        "data.var_adj", np.float64(1.), "True variance in the link Probit model"
    )
    traj.f_add_parameter(
        "data.var_cov", np.float64(1.), "True variance in the covariate model (cts and bin)"
    )
    traj.f_add_parameter(
        "data.missing_rate", np.float64(0.2), "Missing rate"
    )
    traj.f_add_parameter(
        "data.seed", np.int64(1), "Random seed"
    )
    traj.f_add_parameter(
        "data.alpha_mean", np.float64(-1.85), "Mean of the heterogeneity parameter"
    )

    # parameters (model)
    traj.f_add_parameter(
        "model.K", np.int64(3), "Number of latent components in the model"
    )
    traj.f_add_parameter(
        "model.adj_model", "Logistic", "Adjacency model"
    )
    traj.f_add_parameter(
        "model.bin_model", "Logistic", "Binary covariate model"
    )

    # parameters (fit)
    traj.f_add_parameter(
        "fit.n_iter", np.int64(10), "Number of VEM iterations"
    )
    traj.f_add_parameter(
        "fit.n_vmp", np.int64(10), "Number of VMP iterations per E-step"
    )
    traj.f_add_parameter(
        "fit.n_gd", np.int64(10), "Number of GD iterations per M-step"
    )
    traj.f_add_parameter(
        "fit.step_size", np.float64(0.01), "GD Step size"
    )

    # experiment
    explore_dict = {
        "data.missing_rate": np.array([0.05]),
        "data.p_cts": np.array([10]),
        "data.seed": np.array([1])
    }
    experiment = cartesian_product(explore_dict, ('data.missing_rate', "data.p_cts", "data.seed"))
    traj.f_explore(experiment)

    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
示例#15
0
def main(path_name, resolution, fixed_delay, state_handling, use_pecevski,
         num_trials):
    name = path_name
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        use_scoop=True,
        multiproc=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        log_stdout=False,  # Sends stdout to logs
    )

    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = SAMGraphOptimizee(traj,
                                  n_NEST_threads=1,
                                  time_resolution=resolution,
                                  fixed_delay=fixed_delay,
                                  use_pecevski=use_pecevski,
                                  state_handling=state_handling,
                                  plots_directory=paths.output_dir_path,
                                  num_fitness_trials=num_trials)

    # Get bounds for mu and sigma calculation.
    param_spec = OrderedDict(sorted(SAMGraph.parameter_spec(4).items()))
    names = [k for k, _ in param_spec.items()]
    mu = np.array([(v_min + v_max) / 2
                   for k, (v_min, v_max) in param_spec.items()])
    sigma = np.array([(v_max - v_min) / 2
                      for k, (v_min, v_max) in param_spec.items()])

    print("Using means: {}\nUsing stds: {}".format(dict(zip(names, mu)),
                                                   dict(zip(names, sigma))))

    # NOTE: Outerloop optimizer initialization
    parameters = NaturalEvolutionStrategiesParameters(
        seed=0,
        pop_size=96,
        n_iteration=40,
        learning_rate_sigma=0.5,
        learning_rate_mu=0.5,
        mu=mu,
        sigma=sigma,
        mirrored_sampling_enabled=True,
        fitness_shaping_enabled=True,
        stop_criterion=np.Inf)

    optimizer = NaturalEvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.0, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func,
        fitness_plot_name=path_name)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#16
0
def main():
    name = 'LTL-MDP-GS'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = StateActionOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    n_grid_divs_per_axis = 50
    parameters = GridSearchParameters(
        param_grid={
            'gamma': (optimizee.bound[0], optimizee.bound[1],
                      n_grid_divs_per_axis),
            #'lam': (optimizee.bound[0], optimizee.bound[1], n_grid_divs_per_axis),
            'eta': (optimizee.bound[0], optimizee.bound[1],
                    n_grid_divs_per_axis),
        })
    optimizer = GridSearchOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.),
        parameters=parameters)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#17
0
def main():
    name = 'LTL-MDP-CE_6_8_TD1_New'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Benchmark function
    optimizee = StateActionOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    # TODO: Change the optimizer to the appropriate Optimizer class
    parameters = CrossEntropyParameters(pop_size=75,
                                        rho=0.2,
                                        smoothing=0.0,
                                        temp_decay=0,
                                        n_iteration=75,
                                        distribution=NoisyGaussian(
                                            noise_magnitude=1,
                                            noise_decay=0.95),
                                        stop_criterion=np.inf,
                                        seed=102)
    optimizer = CrossEntropyOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1., ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Add Recorder
    recorder = Recorder(trajectory=traj,
                        optimizee_name='SNN StateAction',
                        optimizee_parameters=['gamma', 'eta'],
                        optimizer_name=optimizer.__class__.__name__,
                        optimizer_parameters=optimizer.get_params())
    recorder.start()

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)
    recorder.end()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#18
0
def main():

    env = Environment(
        trajectory='postproc_deap',
        overwrite_file=True,
        log_stdout=False,
        log_level=50,  # only display ERRORS
        automatic_storing=True,  # Since we us post-processing, we
        # can safely enable automatic storing, because everything will
        # only be stored once at the very end of all runs.
        comment='Using pypet and DEAP with less overhead')
    traj = env.traj

    # ------- Add parameters ------- #
    traj.f_add_parameter('popsize', 100, comment='Population size')
    traj.f_add_parameter('CXPB', 0.5, comment='Crossover term')
    traj.f_add_parameter('MUTPB', 0.2, comment='Mutation probability')
    traj.f_add_parameter('NGEN', 20, comment='Number of generations')

    traj.f_add_parameter('generation', 0, comment='Current generation')
    traj.f_add_parameter('ind_idx', 0, comment='Index of individual')
    traj.f_add_parameter('ind_len', 50, comment='Length of individual')

    traj.f_add_parameter('indpb', 0.005, comment='Mutation parameter')
    traj.f_add_parameter('tournsize', 3, comment='Selection parameter')

    traj.f_add_parameter('seed', 42, comment='Seed for RNG')

    # Placeholders for individuals and results that are about to be explored
    traj.f_add_derived_parameter('individual',
                                 [0 for x in range(traj.ind_len)],
                                 'An indivudal of the population')
    traj.f_add_result('fitnesses', [], comment='Fitnesses of all individuals')

    # ------- Create and register functions with DEAP ------- #
    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    toolbox = base.Toolbox()
    # Attribute generator
    toolbox.register("attr_bool", random.randint, 0, 1)
    # Structure initializers
    toolbox.register("individual", tools.initRepeat, creator.Individual,
                     toolbox.attr_bool, traj.ind_len)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Operator registering
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=traj.indpb)
    toolbox.register("select", tools.selTournament, tournsize=traj.tournsize)

    # ------- Initialize Population and Trajectory -------- #
    random.seed(traj.seed)
    pop = toolbox.population(n=traj.popsize)

    eval_pop = [ind for ind in pop if not ind.fitness.valid]
    traj.f_explore(
        cartesian_product(
            {
                'generation': [0],
                'ind_idx': range(len(eval_pop)),
                'individual': [list(x) for x in eval_pop]
            }, [('ind_idx', 'individual'), 'generation']))

    # ----------- Add postprocessing ------------------ #
    postproc = Postprocessing(pop, eval_pop,
                              toolbox)  # Add links to important structures
    env.add_postprocessing(postproc)

    # ------------ Run applying post-processing ---------- #
    env.run(eval_one_max)

    # ------------ Finished all runs and print result --------------- #
    print("-- End of (successful) evolution --")
    best_ind = tools.selBest(pop, 1)[0]
    print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))