示例#1
0
文件: main.py 项目: MehmetTimur/pypet
def main():

    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(trajectory='FiringRate',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_stdout=True,
                      log_config='DEFAULT',
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    traj = env.trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.add_postprocessing(neuron_postproc)

    # Run the experiment
    env.run(run_neuron)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#2
0
文件: plotff.py 项目: nigroup/pypet
def main():

    filename = os.path.join('hdf5', 'Clustered_Network.hdf5')
    # If we pass a filename to the trajectory a new HDF5StorageService will
    # be automatically created
    traj = Trajectory(filename=filename,
                    dynamically_imported_classes=[BrianMonitorResult,
                                                  BrianParameter])

    # Let's create and fake environment to enable logging:
    env = Environment(traj, do_single_runs=False)


    # Load the trajectory, but onyl laod the skeleton of the results
    traj.f_load(index=-1, load_parameters=2, load_derived_parameters=2, load_results=1)

    # Find the result instances related to the fano factor
    fano_dict = traj.f_get_from_runs('mean_fano_factor', fast_access=False)

    # Load the data of the fano factor results
    ffs = fano_dict.values()
    traj.f_load_items(ffs)

    # Extract all values and R_ee values for each run
    ffs_values = [x.f_get() for x in ffs]
    Rees = traj.f_get('R_ee').f_get_range()

    # Plot average fano factor as a function of R_ee
    plt.plot(Rees, ffs_values)
    plt.xlabel('R_ee')
    plt.ylabel('Avg. Fano Factor')
    plt.show()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#3
0
def main():

    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(
        trajectory='FiringRate',
        comment='Experiment to measure the firing rate '
        'of a leaky integrate and fire neuron. '
        'Exploring different input currents, '
        'as well as refractory periods',
        add_time=False,  # We don't want to add the current time to the name,
        log_stdout=True,
        log_config='DEFAULT',
        multiproc=True,
        ncores=4,  #I think my laptop has 4 cores
        git_repository='/home/pinolej/th',
        wrap_mode='QUEUE',
        filename=filename,
        overwrite_file=True)

    traj = env.trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.add_postprocessing(neuron_postproc)

    # Run the experiment
    env.run(run_neuron)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#4
0
文件: main.py 项目: fontaine618/NAIVI
def main():
    # pypet environment
    env = Environment(trajectory=SIM_NAME,
                      comment="Experiment on density with binary covariates",
                      log_config=None,
                      multiproc=False,
                      ncores=1,
                      filename=SIM_PATH + "/results/",
                      overwrite_file=True)
    traj = env.trajectory

    # parameters (data generation)

    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_adj", np.float64(1.),
                         "True variance in the link Probit model")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")
    traj.f_add_parameter("model.adj_model", "Logistic", "Adjacency model")
    traj.f_add_parameter("model.bin_model", "Logistic",
                         "Binary covariate model")

    # parameters (fit)
    traj.f_add_parameter("fit.n_iter", np.int64(20),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_vmp", np.int64(5),
                         "Number of VMP iterations per E-step")
    traj.f_add_parameter("fit.n_gd", np.int64(5),
                         "Number of GD iterations per M-step")
    traj.f_add_parameter("fit.step_size", np.float64(0.01), "GD Step size")

    # experiment
    explore_dict = {
        "data.alpha_mean":
        np.array([-3.2, -2.8, -2.4, -2., -1.6, -1.2, -0.8, -0.4, 0.0, 0.4]),
        "data.p_bin":
        np.array([10, 100, 500]),
        "data.seed":
        np.arange(0, 100, 1)
    }
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)

    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
示例#5
0
def main():
    """ Main *boilerplate* function to start simulation """
    # Now let's make use of logging
    logger = logging.getLogger()

    # Create folders for data and plots
    folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet')
    if not os.path.isdir(folder):
        os.makedirs(folder)
    filename = os.path.join(folder, 'all_patterns.hdf5')

    # Create an environment
    env = Environment(trajectory='cellular_automata',
                      multiproc=True,
                      ncores=4,
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    # extract the trajectory
    traj = env.traj

    traj.par.ncells = Parameter('ncells', 400, 'Number of cells')
    traj.par.steps = Parameter('steps', 250, 'Number of timesteps')
    traj.par.rule_number = Parameter('rule_number', 30, 'The ca rule')
    traj.par.initial_name = Parameter('initial_name', 'random',
                                      'The type of initial state')
    traj.par.seed = Parameter('seed', 100042, 'RNG Seed')

    # Explore
    exp_dict = {
        'rule_number': [10, 30, 90, 110, 184],
        'initial_name': ['single', 'random'],
    }
    # # You can uncomment the ``exp_dict`` below to see that changing the
    # # exploration scheme is now really easy:
    # exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
    #             'ncells' : [100, 200, 300],
    #             'seed': [333444555, 123456]}
    exp_dict = cartesian_product(exp_dict)
    traj.f_explore(exp_dict)

    # Run the simulation
    logger.info('Starting Simulation')
    env.run(wrap_automaton)

    # Load all data
    traj.f_load(load_data=2)

    logger.info('Printing data')
    for idx, run_name in enumerate(traj.f_iter_runs()):
        # Plot all patterns
        filename = os.path.join(folder, make_filename(traj))
        plot_pattern(traj.crun.pattern, traj.rule_number, filename)
        progressbar(idx, len(traj), logger=logger)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#6
0
def main():
    """ Main *boilerplate* function to start simulation """
    # Now let's make use of logging
    logger = logging.getLogger()

    # Create folders for data and plots
    folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet')
    if not os.path.isdir(folder):
        os.makedirs(folder)
    filename = os.path.join(folder, 'all_patterns.hdf5')

    # Create an environment
    env = Environment(trajectory='cellular_automata',
                      multiproc=True,
                      ncores=4,
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    # extract the trajectory
    traj = env.traj

    traj.v_lazy_adding = True
    traj.par.ncells = 400, 'Number of cells'
    traj.par.steps = 250, 'Number of timesteps'
    traj.par.rule_number = 30, 'The ca rule'
    traj.par.initial_name = 'random', 'The type of initial state'
    traj.par.seed = 100042, 'RNG Seed'


    # Explore
    exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
                'initial_name' : ['single', 'random'],}
    # # You can uncomment the ``exp_dict`` below to see that changing the
    # # exploration scheme is now really easy:
    # exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
    #             'ncells' : [100, 200, 300],
    #             'seed': [333444555, 123456]}
    exp_dict = cartesian_product(exp_dict)
    traj.f_explore(exp_dict)

    # Run the simulation
    logger.info('Starting Simulation')
    env.run(wrap_automaton)

    # Load all data
    traj.f_load(load_data=2)

    logger.info('Printing data')
    for idx, run_name in enumerate(traj.f_iter_runs()):
        # Plot all patterns
        filename = os.path.join(folder, make_filename(traj))
        plot_pattern(traj.crun.pattern, traj.rule_number, filename)
        progressbar(idx, len(traj), logger=logger)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#7
0
文件: main.py 项目: fontaine618/NAIVI
def main(path, name, explore_dict):
    comment = "\n".join(
        ["{}: {}".format(k, v) for k, v in explore_dict.items()])
    # pypet environment
    env = Environment(trajectory=name,
                      comment=comment,
                      log_config=None,
                      multiproc=False,
                      ncores=1,
                      filename=path + name + "/results/",
                      overwrite_file=True)
    traj = env.trajectory
    traj.f_add_parameter("path", path + name, "Path")

    # parameters (data generation)
    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.center", np.int64(1), "Ego-network center")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")

    # parameters (fit)
    traj.f_add_parameter("fit.algo", "MLE", "Inference algorithm")
    traj.f_add_parameter("fit.max_iter", np.int64(500),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_sample", np.int64(1),
                         "Number of samples for VIMC")
    traj.f_add_parameter("fit.eps", np.float64(1.0e-6),
                         "convergence threshold")
    traj.f_add_parameter("fit.lr", np.float64(0.01), "GD Step size")

    # experiment
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)
    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing under Windows you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see https://docs.python.org/2/library/multiprocessing.html#windows).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with 2 workers.
    filename = os.path.join('hdf5', 'example_04.hdf5')
    env = Environment(
        trajectory='Example_04_MP',
        filename=filename,
        file_title='Example_04_MP',
        log_stdout=True,
        comment='Multiprocessing example!',
        multiproc=True,
        ncores=4,
        use_pool=True,  # Our runs are inexpensive we can get rid of overhead
        # by using a pool
        freeze_input=True,  # We can avoid some
        # overhead by freezing the input to the pool
        wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
        graceful_exit=True,  # We want to exit in a data friendly way
        # that safes all results after hitting CTRL+C, try it ;-)
        overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(
        cartesian_product({
            'x': [float(x) for x in range(20)],
            'y': [float(y) for y in range(20)]
        }))

    # Run the simulation
    env.run(multiply)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing under Windows you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see https://docs.python.org/2/library/multiprocessing.html#windows).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with 2 workers.
    filename = os.path.join('hdf5', 'example_04.hdf5')
    env = Environment(trajectory='Example_04_MP',
                      filename=filename,
                      file_title='Example_04_MP',
                      log_stdout=True,
                      comment='Multiprocessing example!',
                      multiproc=True,
                      ncores=4,
                      use_pool=True,  # Our runs are inexpensive we can get rid of overhead
                      # by using a pool
                      freeze_input=True,  # We can avoid some
                      # overhead by freezing the input to the pool
                      wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
                      graceful_exit=True,  # We want to exit in a data friendly way
                      # that safes all results after hitting CTRL+C, try it ;-)
                      overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
                                      'y':[float(y) for y in range(20)]}))

    # Run the simulation
    env.run(multiply)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#10
0
def main():
    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(trajectory='FiringRatePipeline',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_stdout=True,
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      filename=filename,
                      overwrite_file=True)

    env.pipeline(mypipeline)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing with SCOOP you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see http://scoop.readthedocs.org/en/latest/usage.html#pitfalls).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with scoop:
    filename = os.path.join('hdf5', 'example_21.hdf5')
    env = Environment(trajectory='Example_21_SCOOP',
                      filename=filename,
                      file_title='Example_21_SCOOP',
                      log_stdout=True,
                      comment='Multiprocessing example using SCOOP!',
                      multiproc=True,
                      freeze_input=True, # We want to save overhead and freeze input
                      use_scoop=True, # Yes we want SCOOP!
                      wrap_mode=pypetconstants.WRAP_MODE_LOCAL,  # SCOOP only works with 'LOCAL'
                      # or 'NETLOCK' wrapping
                      overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
                                      'y':[float(y) for y in range(20)]}))
    # Run the simulation
    env.run(multiply)

    # Let's check that all runs are completed!
    assert traj.f_is_completed()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#12
0
def main():
    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(
        trajectory='FiringRatePipeline',
        comment='Experiment to measure the firing rate '
        'of a leaky integrate and fire neuron. '
        'Exploring different input currents, '
        'as well as refractory periods',
        add_time=False,  # We don't want to add the current time to the name,
        log_stdout=True,
        multiproc=True,
        ncores=2,  #My laptop has 2 cores ;-)
        filename=filename,
        overwrite_file=True)

    env.pipeline(mypipeline)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#13
0
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5','example_18.hdf5')
    env = Environment(trajectory='Multiplication',
                      filename=filename,
                      file_title='Example_18_Many_Runs',
                      overwrite_file=True,
                      comment='Contains many runs',
                      multiproc=True,
                      use_pool=True,
                      freeze_input=True,
                      ncores=2,
                      wrap_mode='QUEUE')

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, yielding 2500 runs
    traj.f_explore(cartesian_product({'x': range(50), 'y': range(50)}))

    # Run the simulation
    env.run(multiply)

    # Disable logging
    env.disable_logging()

    # turn auto loading on, since results have not been loaded, yet
    traj.v_auto_load = True
    # Use the `v_idx` functionality
    traj.v_idx = 2042
    print('The result of run %d is: ' % traj.v_idx)
    # Now we can rely on the wildcards
    print(traj.res.crunset.crun.z)
    traj.v_idx = -1
    # Or we can use the shortcuts `rts_X` (run to set) and `r_X` to get particular results
    print('The result of run %d is: ' % 2044)
    print(traj.res.rts_2044.r_2044.z)
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5', 'example_12.hdf5')
    env = Environment(
        trajectory='Multiplication',
        filename=filename,
        file_title='Example_12_Sharing_Data',
        overwrite_file=True,
        comment='The first example!',
        continuable=
        False,  # We have shared data in terms of a multiprocessing list,
        # so we CANNOT use the continue feature.
        multiproc=True,
        ncores=2)

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product
    traj.f_explore(cartesian_product({'x': [1, 2, 3, 4], 'y': [6, 7, 8]}))

    # We want a shared list where we can put all out results in. We use a manager for this:
    result_list = mp.Manager().list()
    # Let's make some space for potential results
    result_list[:] = [0 for _dummy in range(len(traj))]

    # Run the simulation
    env.run(multiply, result_list)

    # Now we want to store the final list as numpy array
    traj.f_add_result('z', np.array(result_list))

    # Finally let's print the result to see that it worked
    print(traj.z)

    #Disable logging and close all log-files
    env.disable_logging()
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5', 'example_12.hdf5')
    env = Environment(trajectory='Multiplication',
                      filename=filename,
                      file_title='Example_12_Sharing_Data',
                      overwrite_file=True,
                      comment='The first example!',
                      continuable=False, # We have shared data in terms of a multiprocessing list,
                      # so we CANNOT use the continue feature.
                      multiproc=True,
                      ncores=2)

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product
    traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))

    # We want a shared list where we can put all out results in. We use a manager for this:
    result_list = mp.Manager().list()
    # Let's make some space for potential results
    result_list[:] =[0 for _dummy in range(len(traj))]

    # Run the simulation
    env.run(multiply, result_list)

    # Now we want to store the final list as numpy array
    traj.f_add_result('z', np.array(result_list))

    # Finally let's print the result to see that it worked
    print(traj.z)

    #Disable logging and close all log-files
    env.disable_logging()
示例#16
0
def _batch_run(dir='unnamed',
               batch_id='template',
               space=None,
               save_data_in_hdf5=False,
               single_method=single_run,
               process_method=null_processing,
               post_process_method=None,
               final_process_method=None,
               multiprocessing=True,
               resumable=True,
               overwrite=False,
               sim_config=None,
               params=None,
               optimization=None,
               post_kwargs={},
               run_kwargs={}):
    saved_args = locals()
    traj_name = f'{batch_id}_traj'
    parent_dir_path = f'{paths.BatchRunFolder}/{dir}'
    dir_path = os.path.join(parent_dir_path, batch_id)

    filename = f'{dir_path}/{batch_id}.hdf5'
    build_new = True
    if os.path.exists(parent_dir_path) and os.path.exists(
            dir_path) and overwrite == False:
        build_new = False
        try:
            # print('Trying to resume existing trajectory')
            env = Environment(continuable=True)
            env.resume(trajectory_name=traj_name, resume_folder=dir_path)
            print('Resumed existing trajectory')
            build_new = False
        except:
            try:
                # print('Trying to load existing trajectory')
                traj = load_trajectory(filename=filename,
                                       name=traj_name,
                                       load_all=0)
                env = Environment(trajectory=traj, multiproc=True, ncores=4)

                traj = config_traj(traj, optimization)

                traj.f_load(index=None, load_parameters=2, load_results=0)
                traj.f_expand(space)
                print('Loaded existing trajectory')
                build_new = False
            except:
                print(
                    'Neither of resuming or expanding of existing trajectory worked'
                )

    if build_new:
        if multiprocessing:
            multiproc = True
            resumable = False
            wrap_mode = pypetconstants.WRAP_MODE_QUEUE
        else:
            multiproc = False
            resumable = True
            wrap_mode = pypetconstants.WRAP_MODE_LOCK
        # print('Trying to create novel environment')
        env = Environment(
            trajectory=traj_name,
            filename=filename,
            file_title=batch_id,
            comment=f'{batch_id} batch run!',
            large_overview_tables=True,
            overwrite_file=True,
            resumable=False,
            resume_folder=dir_path,
            multiproc=multiproc,
            ncores=4,
            use_pool=
            True,  # Our runs are inexpensive we can get rid of overhead by using a pool
            freeze_input=
            True,  # We can avoid some overhead by freezing the input to the pool
            wrap_mode=wrap_mode,
            graceful_exit=True)
        print('Created novel environment')
        traj = prepare_traj(env.traj, sim_config, params, batch_id,
                            parent_dir_path, dir_path)
        traj = config_traj(traj, optimization)
        traj.f_explore(space)

    if post_process_method is not None:
        env.add_postprocessing(post_process_method, **post_kwargs)
    env.run(single_method,
            process_method,
            save_data_in_hdf5=save_data_in_hdf5,
            save_to=dir_path,
            **run_kwargs)
    env.disable_logging()
    print('Batch run complete')
    if final_process_method is not None:
        results = final_process_method(env.traj)
        # print(results)
        return results
示例#17
0
def main():
    # pypet environment
    env = Environment(
        trajectory="missing_rate",
        comment="Test experiment with varying missing rate",
        log_config=None,
        multiproc=False,
        ncores=1,
        # use_pool=True,
        # freeze_input=True,
        # wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
        # graceful_exit=True,
        filename="./simulations/results/test/",
        overwrite_file=True
    )
    traj = env.trajectory

    # parameters (data generation)

    traj.f_add_parameter(
        "data.N", np.int64(500), "Number of nodes"
    )
    traj.f_add_parameter(
        "data.K", np.int64(5), "True number of latent components"
    )
    traj.f_add_parameter(
        "data.p_cts", np.int64(10), "Number of continuous covariates"
    )
    traj.f_add_parameter(
        "data.p_bin", np.int64(0), "Number of binary covariates"
    )
    traj.f_add_parameter(
        "data.var_adj", np.float64(1.), "True variance in the link Probit model"
    )
    traj.f_add_parameter(
        "data.var_cov", np.float64(1.), "True variance in the covariate model (cts and bin)"
    )
    traj.f_add_parameter(
        "data.missing_rate", np.float64(0.2), "Missing rate"
    )
    traj.f_add_parameter(
        "data.seed", np.int64(1), "Random seed"
    )
    traj.f_add_parameter(
        "data.alpha_mean", np.float64(-1.85), "Mean of the heterogeneity parameter"
    )

    # parameters (model)
    traj.f_add_parameter(
        "model.K", np.int64(3), "Number of latent components in the model"
    )
    traj.f_add_parameter(
        "model.adj_model", "Logistic", "Adjacency model"
    )
    traj.f_add_parameter(
        "model.bin_model", "Logistic", "Binary covariate model"
    )

    # parameters (fit)
    traj.f_add_parameter(
        "fit.n_iter", np.int64(10), "Number of VEM iterations"
    )
    traj.f_add_parameter(
        "fit.n_vmp", np.int64(10), "Number of VMP iterations per E-step"
    )
    traj.f_add_parameter(
        "fit.n_gd", np.int64(10), "Number of GD iterations per M-step"
    )
    traj.f_add_parameter(
        "fit.step_size", np.float64(0.01), "GD Step size"
    )

    # experiment
    explore_dict = {
        "data.missing_rate": np.array([0.05]),
        "data.p_cts": np.array([10]),
        "data.seed": np.array([1])
    }
    experiment = cartesian_product(explore_dict, ('data.missing_rate', "data.p_cts", "data.seed"))
    traj.f_explore(experiment)

    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
示例#18
0
def main():

    filename = os.path.join('hdf5', 'example_05.hdf5')
    env = Environment(trajectory='Example_05_Euler_Integration',
                      filename=filename,
                      file_title='Example_05_Euler_Integration',
                      overwrite_file=True,
                      comment='Go for Euler!')


    traj = env.trajectory
    trajectory_name = traj.v_name

    # 1st a) phase parameter addition
    add_parameters(traj)

    # 1st b) phase preparation
    # We will add the differential equation (well, its source code only) as a derived parameter
    traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
                                 comment='Source code of our equation!')

    # We want to explore some initial conditions
    traj.f_explore({'initial_conditions' : [
        np.array([0.01,0.01,0.01]),
        np.array([2.02,0.02,0.02]),
        np.array([42.0,4.2,0.42])
    ]})
    # 3 different conditions are enough for an illustrative example

    # 2nd phase let's run the experiment
    # We pass `euler_scheme` as our top-level simulation function and
    # the Lorenz equation 'diff_lorenz' as an additional argument
    env.run(euler_scheme, diff_lorenz)

    # We don't have a 3rd phase of post-processing here

    # 4th phase analysis.
    # I would recommend to do post-processing completely independent from the simulation,
    # but for simplicity let's do it here.

    # Let's assume that we start all over again and load the entire trajectory new.
    # Yet, there is an error within this approach, do you spot it?
    del traj
    traj = Trajectory(filename=filename)

    # We will only fully load parameters and derived parameters.
    # Results will be loaded manually later on.
    try:
        # However, this will fail because our trajectory does not know how to
        # build the FunctionParameter. You have seen this coming, right?
        traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
                    load_results=1)
    except ImportError as e:

        print('That did\'nt work, I am sorry: %s ' % str(e))

        # Ok, let's try again but this time with adding our parameter to the imports
        traj = Trajectory(filename=filename,
                           dynamically_imported_classes=FunctionParameter)

        # Now it works:
        traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
                    load_results=1)


    #For the fun of it, let's print the source code
    print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)

    # Let's get the exploration array:
    initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
    # Now let's plot our simulated equations for the different initial conditions:
    # We will iterate through the run names
    for idx, run_name in enumerate(traj.f_get_run_names()):

        #Get the result of run idx from the trajectory
        euler_result = traj.results.f_get(run_name).euler_evolution
        # Now we manually need to load the result. Actually the results are not so large and we
        # could load them all at once. But for demonstration we do as if they were huge:
        traj.f_load_item(euler_result)
        euler_data = euler_result.data

        #Plot fancy 3d plot
        fig = plt.figure(idx)
        ax = fig.gca(projection='3d')
        x = euler_data[:,0]
        y = euler_data[:,1]
        z = euler_data[:,2]
        ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
        plt.legend()
        plt.show()

        # Now we free the data again (because we assume its huuuuuuge):
        del euler_data
        euler_result.f_empty()

    # You have to click through the images to stop the example_05 module!

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    # Get current directory
    traj_dir = os.getcwd()
    # Read output path (if provided)
    if len(sys.argv) > 1:
        # Only use specified folder if it exists
        if os.path.isdir(sys.argv[1]):
            # Get name of directory
            traj_dir = os.path.dirname(sys.argv[1])
            # Convert to full path
            traj_dir = os.path.abspath(traj_dir)
    # Add time stamp (final '' is to make sure there is a trailing slash)
    traj_dir = os.path.join(traj_dir,
                            datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss"), '')
    # Create directory with time stamp
    os.makedirs(traj_dir)
    # Change current directory to the one containing the trajectory files
    os.chdir(traj_dir)
    print('Trajectory and results will be stored to: {0}'.format(traj_dir))

    # Create an environment that handles running.
    # Let's enable multiprocessing with scoop:
    env = Environment(
        trajectory='traj',
        comment='',
        add_time=False,
        log_config='DEFAULT',
        log_stdout=
        True,  # log everything that is printed, will make the log file HUGE
        filename=
        traj_dir,  # filename or just folder (name will be automatic in this case)
        multiproc=True,
        use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        memory_cap=10,
        swap_cap=1)
    traj = env.trajectory

    # -------------------------------------------------------------------
    # Add config parameters (those that DO NOT influence the final result of the experiment)
    traj.f_add_config('parallel_target_analysis',
                      False,
                      comment='Analyse targets in parallel')
    # -------------------------------------------------------------------
    # Parameters characterizing the initial topology of the network
    traj.f_add_parameter('topology.initial.model', 'BA')
    #traj.f_add_parameter('topology.initial.model', 'WS')
    traj.f_add_parameter('topology.initial.nodes_n',
                         5,
                         comment='Number of nodes')
    #traj.f_add_parameter('topology.initial.WS_k', 4, comment='Number of neighbours (and mean degree) in the Watts-Strogatz model')
    #traj.f_add_parameter('topology.initial.WS_p', 0.0, comment='Rewiring probability in the Watts-Strogatz model')
    traj.f_add_parameter(
        'topology.initial.BA_m',
        1,
        comment=
        'Number of edges to attach from a new node to existing nodes in the Barabási–Albert model'
    )

    # -------------------------------------------------------------------
    # Parameters characterizing the coupling between the nodes
    traj.f_add_parameter(
        'node_coupling.initial.model',
        'linear',
        comment=
        'Linear coupling model: the input to each target node is the weighted sum of the outputs of its source nodes'
    )
    traj.f_add_parameter('node_coupling.initial.weight_distribution', 'fixed')
    traj.f_add_parameter('node_coupling.initial.fixed_coupling', 0.1)

    # -------------------------------------------------------------------
    # Parameters characterizing the delay
    traj.f_add_parameter('delay.initial.distribution', 'uniform')
    traj.f_add_parameter('delay.initial.delay_links_n_max',
                         1,
                         comment='Maximum number of delay links')
    traj.f_add_parameter('delay.initial.delay_min', 1, comment='')
    traj.f_add_parameter('delay.initial.delay_max', 1, comment='')
    traj.f_add_parameter('delay.initial.delay_self', 1, comment='')

    # -------------------------------------------------------------------
    # Parameters characterizing the estimator
    traj.f_add_parameter('estimation.history_source',
                         1,
                         comment='Embedding length for the source')
    traj.f_add_parameter('estimation.history_target',
                         14,
                         comment='Embedding length for the target')

    # -------------------------------------------------------------------
    # Parameters characterizing the dynamics of the nodes
    traj.f_add_parameter('node_dynamics.model', 'AR_gaussian_discrete')
    #traj.f_add_parameter('node_dynamics.model', 'boolean_random')
    traj.f_add_parameter('node_dynamics.samples_n',
                         100,
                         comment='Number of samples (observations) to record')
    traj.f_add_parameter(
        'node_dynamics.samples_transient_n',
        1000 * traj.topology.initial.nodes_n,
        comment=
        'Number of initial samples (observations) to skip to leave out the transient'
    )
    traj.f_add_parameter('node_dynamics.replications',
                         1,
                         comment='Number of replications (trials) to record')
    traj.f_add_parameter('node_dynamics.noise_std',
                         1,
                         comment='Standard deviation of Gaussian noise')
    #traj.f_add_parameter('node_dynamics.RBN_r', 0.5, comment='Activity level (i.e. probability of state "1") in Boolean dynamics')
    #traj.f_add_parameter('node_dynamics.noise_flip_p', 0.005, comment='Probability of flipping bit in Boolean dynamics')

    # -------------------------------------------------------------------
    # Parameters characterizing the repetitions of the same run
    traj.f_add_parameter(
        'repetition_i', 0,
        comment='Index of the current repetition')  # Normally starts from 0

    # -------------------------------------------------------------------
    # Define parameter combinations to explore (a trajectory in
    # the parameter space)
    # The second argument, the tuple, specifies the order of the cartesian product,
    # The variable on the right most side changes fastest and defines the
    # 'inner for-loop' of the cartesian product
    explore_dict = cartesian_product(
        {
            'node_coupling.initial.weight_distribution': ['fixed'],
            'repetition_i': np.arange(0, 10000, 1).tolist(),
            'topology.initial.nodes_n': np.arange(100, 100 + 1, 30).tolist(),
            'node_dynamics.samples_n': np.array([100]).tolist(),
            #'topology.initial.WS_p': np.around(np.logspace(-2.2, 0, 10), decimals=4).tolist(),
        },
        (
            'node_coupling.initial.weight_distribution',
            'node_dynamics.samples_n',
            'topology.initial.nodes_n',
            #'topology.initial.WS_p',
            'repetition_i',
        ))
    print(explore_dict)
    traj.f_explore(explore_dict)
    # Run the experiment
    env.run(compute_bTE_all_pairs)
    # Check that all runs are completed
    assert traj.f_is_completed()
    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    name = 'LTL-MDP-GD_6_8_TD1'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
                      comment=u'{} data'.format(name),
                      add_time=True,
                      # freeze_input=True,
                      # multiproc=True,
                      # use_scoop=True,
                      wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee = DLSMDPOptimizee(traj)

    ## Outerloop optimizer initialization
    parameters = ClassicGDParameters(learning_rate=0.001, exploration_step_size=0.001,
                                     n_random_steps=50, n_iteration=30,
                                     stop_criterion=np.Inf, seed=1234)
    #parameters = AdamParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=15, first_order_decay=0.8,
    #                             second_order_decay=0.8, n_iteration=83, stop_criterion=np.Inf, seed=99)
    # parameters = StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99,
    #                                     exploration_step_size=0.01, n_random_steps=5, n_iteration=100,
    #                                     stop_criterion=np.Inf)
    #parameters = RMSPropParameters(learning_rate=0.01, exploration_step_size=0.01,
    #                               n_random_steps=5, momentum_decay=0.5,
    #                               n_iteration=100, stop_criterion=np.Inf, seed=99)

    optimizer = GradientDescentOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
                                         optimizee_fitness_weights=(-1.,),
                                         parameters=parameters,
                                         optimizee_bounding_func=optimizee.bounding_func,
                                         base_point_evaluations=10)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():

    filename = os.path.join('hdf5', 'example_06.hdf5')
    env = Environment(trajectory='Example_06_Euler_Integration',
                      filename=filename,
                      file_title='Example_06_Euler_Integration',
                      overwrite_file=True,
                      comment = 'Go for Euler!')


    traj = env.trajectory

    # 1st a) phase parameter addition
    # Remember we have some control flow in the `add_parameters` function, the default parameter
    # set we choose is the `'diff_lorenz'` one, but we want to deviate from that and use the
    # `'diff_roessler'`.
    # In order to do that we can preset the corresponding name parameter to change the
    # control flow:
    traj.f_preset_parameter('diff_name', 'diff_roessler') # If you erase this line, you will get
                                                          # again the lorenz attractor
    add_parameters(traj)

    # 1st b) phase preparation
    # Let's check which function we want to use
    if traj.diff_name=='diff_lorenz':
        diff_eq = diff_lorenz
    elif traj.diff_name=='diff_roessler':
        diff_eq = diff_roessler
    else:
        raise ValueError('I don\'t know what %s is.' % traj.diff_name)
    # And add the source code of the function as a derived parameter.
    traj.f_add_derived_parameter(FunctionParameter, 'diff_eq', diff_eq,
                                     comment='Source code of our equation!')

    # We want to explore some initial conditions
    traj.f_explore({'initial_conditions' : [
        np.array([0.01,0.01,0.01]),
        np.array([2.02,0.02,0.02]),
        np.array([42.0,4.2,0.42])
    ]})
    # 3 different conditions are enough for now

    # 2nd phase let's run the experiment
    # We pass 'euler_scheme' as our top-level simulation function and
    # the Roessler function as an additional argument
    env.run(euler_scheme, diff_eq)

    # Again no post-processing

    # 4th phase analysis.
    # I would recommend to do the analysis completely independent from the simulation
    # but for simplicity let's do it here.
    # We won't reload the trajectory this time but simply update the skeleton
    traj.f_load_skeleton()

    #For the fun of it, let's print the source code
    print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)

    # Let's get the exploration array:
    initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
    # Now let's plot our simulated equations for the different initial conditions.
    # We will iterate through the run names
    for idx, run_name in enumerate(traj.f_get_run_names()):

        # Get the result of run idx from the trajectory
        euler_result = traj.results.f_get(run_name).euler_evolution
        # Now we manually need to load the result. Actually the results are not so large and we
        # could load them all at once, but for demonstration we do as if they were huge:
        traj.f_load_item(euler_result)
        euler_data = euler_result.data

        # Plot fancy 3d plot
        fig = plt.figure(idx)
        ax = fig.gca(projection='3d')
        x = euler_data[:,0]
        y = euler_data[:,1]
        z = euler_data[:,2]
        ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
        plt.legend()
        plt.show()

        # Now we free the data again (because we assume its huuuuuuge):
        del euler_data
        euler_result.f_empty()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#22
0
def _batch_run(dir='unnamed',
               batch_id='template',
               space=None,
               save_data_in_hdf5=False,
               single_method=single_run,
               process_method=null_processing,
               post_process_method=None,
               final_process_method=None,
               multiprocessing=True,
               resumable=True,
               overwrite=False,
               sim_config=None,
               params=None,
               config=None,
               post_kwargs={},
               run_kwargs={}):
    saved_args = locals()
    # print(locals())
    traj_name = f'{batch_id}_traj'
    parent_dir_path = f'{BatchRunFolder}/{dir}'
    dir_path = os.path.join(parent_dir_path, batch_id)
    plot_path = os.path.join(dir_path, f'{batch_id}.pdf')
    data_path = os.path.join(dir_path, f'{batch_id}.csv')
    filename = f'{dir_path}/{batch_id}.hdf5'
    build_new = True
    if os.path.exists(parent_dir_path) and os.path.exists(
            dir_path) and overwrite == False:
        build_new = False
        try:
            print('Trying to resume existing trajectory')
            env = Environment(continuable=True)
            env.resume(trajectory_name=traj_name, resume_folder=dir_path)
            print('Resumed existing trajectory')
            build_new = False
        except:
            try:
                print('Trying to load existing trajectory')
                traj = load_trajectory(filename=filename,
                                       name=traj_name,
                                       load_all=0)
                env = Environment(trajectory=traj)
                traj.f_load(index=None, load_parameters=2, load_results=0)
                traj.f_expand(space)
                print('Loaded existing trajectory')
                build_new = False
            except:
                print(
                    'Neither of resuming or expanding of existing trajectory worked'
                )

    if build_new:
        if multiprocessing:
            multiproc = True
            resumable = False
            wrap_mode = pypetconstants.WRAP_MODE_QUEUE
        else:
            multiproc = False
            resumable = True
            wrap_mode = pypetconstants.WRAP_MODE_LOCK
        # try:
        print('Trying to create novel environment')
        env = Environment(
            trajectory=traj_name,
            filename=filename,
            file_title=batch_id,
            comment=f'{batch_id} batch run!',
            large_overview_tables=True,
            overwrite_file=True,
            resumable=False,
            resume_folder=dir_path,
            multiproc=multiproc,
            ncores=4,
            use_pool=
            True,  # Our runs are inexpensive we can get rid of overhead by using a pool
            freeze_input=
            True,  # We can avoid some overhead by freezing the input to the pool
            wrap_mode=wrap_mode,
            graceful_exit=True)
        traj = env.traj
        print('Created novel environment')
        fly_params, env_params, sim_params = sim_config[
            'fly_params'], sim_config['env_params'], sim_config['sim_params']
        if all(v is not None for v in [sim_params, env_params, fly_params]):
            traj = load_default_configuration(traj,
                                              sim_params=sim_params,
                                              env_params=env_params,
                                              fly_params=fly_params)
        elif params is not None:
            for p in params:
                traj.f_apar(p, 0.0)
        if config is not None:
            for k, v in config.items():
                traj.f_aconf(k, v)
        traj.f_aconf('parent_dir_path',
                     parent_dir_path,
                     comment='The parent directory')
        traj.f_aconf('dir_path',
                     dir_path,
                     comment='The directory path for saving data')
        traj.f_aconf('plot_path',
                     plot_path,
                     comment='The file path for saving plot')
        traj.f_aconf('data_path',
                     data_path,
                     comment='The file path for saving data')
        traj.f_aconf('dataset_path',
                     f'{dir_path}/{batch_id}',
                     comment='The directory path for saving datasets')
        traj.f_explore(space)
        # except:
        #     raise ValueError(f'Failed to perform batch run {batch_id}')

    if post_process_method is not None:
        env.add_postprocessing(post_process_method, **post_kwargs)
    env.run(single_method,
            process_method,
            save_data_in_hdf5=save_data_in_hdf5,
            save_to=dir_path,
            common_folder=batch_id,
            **run_kwargs)
    env.disable_logging()
    print('Batch run complete')
    if final_process_method is not None:
        return final_process_method(env.traj)
示例#23
0
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing with SCOOP you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see http://scoop.readthedocs.org/en/latest/usage.html#pitfalls).

    """

    # Load settings from file
    settings_file = 'pypet_settings.pkl'
    settings = load_obj(settings_file)
    # Print settings dictionary
    print('\nSettings dictionary:')
    for key, value in settings.items():
        print(key, ' : ', value)
    print('\nParameters to explore:')
    for key, value in settings.items():
        if isinstance(value, list):
            print(key, ' : ', value)

    # Create new folder to store results
    traj_dir = os.getcwd()
    # Read output path (if provided)
    if len(sys.argv) > 1:
        # Only use specified folder if it exists
        if os.path.isdir(sys.argv[1]):
            # Get name of directory
            traj_dir = os.path.dirname(sys.argv[1])
            # Convert to full path
            traj_dir = os.path.abspath(traj_dir)
    # Add time stamp (final '' is to make sure there is a trailing slash)
    traj_dir = os.path.join(traj_dir,
                            datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss"), '')
    # Create directory with time stamp
    os.makedirs(traj_dir)
    # Change current directory to the one containing the trajectory files
    os.chdir(traj_dir)
    print('Trajectory and results will be stored in: {0}'.format(traj_dir))

    # Create an environment that handles running.
    # Let's enable multiprocessing with scoop:
    env = Environment(
        trajectory='traj',
        comment='',
        add_time=False,
        log_config='DEFAULT',
        log_stdout=
        True,  # log everything that is printed, will make the log file HUGE
        filename=
        traj_dir,  # filename or just folder (name will be automatic in this case)
        multiproc=False,
        #use_pool=True,
        #ncores=10,
        #freeze_input=True,
        use_scoop=False,
        #wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        memory_cap=1,
        swap_cap=1
        #cpu_cap=30

        #,git_repository='' #path to the root git folder. The commit code will be added in the trajectory
        #,git_fail=True #no automatic commits
        #,sumatra_project='' #path to sumatra root folder,
        #graceful_exit=True
    )

    traj = env.trajectory

    # Add config parameters (those that DO NOT influence the final result of the experiment)
    traj.f_add_config('parallel_target_analysis', True)
    #traj.f_add_config('debug', False)
    #traj.f_add_config('max_mem_frac', 0.7)

    # Set up trajectory parameters
    param_to_explore = {}
    for key, val in settings.items():
        if isinstance(val, list):
            param_to_explore[key] = val
            traj.f_add_parameter(key, val[0])
        else:
            traj.f_add_parameter(key, val)

    # Define parameter combinations to explore (a trajectory in
    # the parameter space). The second argument, the tuple, specifies the order
    #  of the cartesian product.
    # The variable on the right most side changes fastest and defines the
    # 'inner for-loop' of the cartesian product
    explore_dict = cartesian_product(param_to_explore,
                                     tuple(param_to_explore.keys()))
    # explore_dict = cartesian_product(
    #     {
    #         'network_inference.algorithm': ['bMI_greedy', 'bTE_greedy', 'mTE_greedy'],
    #         #'node_coupling.initial.weight_distribution': ['fixed'],
    #         'repetition_i': np.arange(0, 5, 1).tolist(),
    #         'topology.initial.nodes_n': np.arange(50, 50+1, 300).tolist(),
    #         'node_dynamics.samples_n': np.array([1000, 10000]).tolist(),
    #         'network_inference.p_value': np.array([0.001]).tolist(),
    #         #'node_coupling.initial.self_coupling': np.arange(-0.5, 0.5 + 0.001, 0.1).tolist(),
    #         #'node_coupling.initial.total_cross_coupling': np.arange(-1., 1 + 0.001, 0.2).tolist(),
    #         #'topology.initial.WS_p': np.around(np.logspace(-2.2, 0, 10), decimals=4).tolist(),
    #     },
    #     (
    #         'network_inference.algorithm',
    #         #'node_coupling.initial.weight_distribution',
    #         'network_inference.p_value',
    #         'node_dynamics.samples_n',
    #         'topology.initial.nodes_n',
    #         #'topology.initial.WS_p',
    #         #'node_coupling.initial.self_coupling',
    #         #'node_coupling.initial.total_cross_coupling',
    #         'repetition_i',
    #     )
    # )
    traj.f_explore(explore_dict)

    # Store trajectory parameters to disk
    pypet_utils.print_traj_leaves(traj, 'parameters', 'traj_parameters.txt')

    # Run the experiment
    env.run(information_network_inference)
    # env.run(bTE_on_existing_links)

    # Check that all runs are completed
    assert traj.f_is_completed()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#24
0
# The environment has created a trajectory container for us
traj = env.trajectory

# Add both parameters
traj.v_lazy_adding = True
traj.par.x = 1, 'I am the first dimension!'
traj.par.y = 1, 'I am the second dimension!'

# Explore just two points
traj.f_explore({'x': [3, 4]})

# So far everything was as in the first example. However now we add links:
traj.f_add_link('mylink1', traj.f_get('x'))
# Note the `f_get` here to ensure to get the parameter instance, not the value 1
# This allows us now to access x differently:
print('x=' + str(traj.mylink1))
# We can try to avoid fast access as well, and recover the original parameter
print(str(traj.f_get('mylink1')))
# And also colon notation is allowed that creates new groups on the fly
traj.f_add_link('parameters.mynewgroup.mylink2', traj.f_get('y'))



# And, of course, we can also use the links during run:
env.run(multiply)

# Finally disable logging and close all log-files
env.disable_logging()

              backup_filename=True,
              move_data=True,
              delete_other_trajectory=True)

# And that's it, now we can take a look at the new trajectory and print all x,y,z triplets.
# But before that we need to load the data we computed during the runs from disk.
# We choose load_parameters=2 and load_results=2 since we want to load all data and not only
# the skeleton
traj1.f_load(load_parameters=2, load_results=2)

for run_name in traj1.f_get_run_names():
    # We can make the trajectory belief it is a single run. All parameters will
    # be treated as they were in the specific run. And we can use the `crun` wildcard.
    traj1.f_set_crun(run_name)
    x = traj1.x
    y = traj1.y
    # We need to specify the current run, because there exists more than one z value
    z = traj1.crun.z
    print('%s: x=%f, y=%f, z=%f' % (run_name, x, y, z))

# Don't forget to reset you trajectory to the default settings, to release its belief to
# be the last run.
traj1.f_restore_default()

# As you can see duplicate parameter space points have been removed.
# If you wish you can take a look at the files and backup files in
# the experiments/example_03/HDF5 directory

# Finally, disable logging and close log files
env1.disable_logging()
示例#26
0
def main(dependent, optimizer):
    opt = optimizer.upper()
    identifier = '{:05x}'.format(np.random.randint(16**5))
    print('Identifier: ' + identifier)
    allocated_id = '07'  # dls.get_allocated_board_ids()[0]
    board_calibration_map = {
        'B291698': {
            'dac': 'dac_default.json',
            'cap': 'cap_mem_29.json'
        },
        '07': {
            'dac': 'dac_07_chip_20.json',
            'cap': 'calibration_20.json'
        },
        'B201319': {
            'dac': 'dac_B201319_chip_21.json',
            'cap': 'calibration_24.json'
        },
        'B201330': {
            'dac': 'dac_B201330_chip_22.json',
            'cap': 'calibration_22.json'
        }
    }

    dep_name = 'DEP' if dependent else 'IND'
    name = 'MAB_ANN_{}_{}_{}'.format(identifier, opt, dep_name)
    root_dir_path = os.path.expanduser('~/simulations')
    paths = Paths(name, dict(run_no=u'test'), root_dir_path=root_dir_path)

    with open(os.path.expanduser('~/LTL/bin/logging.yaml')) as f:
        l_dict = yaml.load(f)
        log_output_file = os.path.join(paths.results_path,
                                       l_dict['handlers']['file']['filename'])
        l_dict['handlers']['file']['filename'] = log_output_file
        logging.config.dictConfig(l_dict)

    print("All output logs can be found in directory " + str(paths.logs_path))

    traj_file = os.path.join(paths.output_dir_path, u'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        # freeze_input=True,
        # multiproc=True,
        # use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCK,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers', 'optimizees'],
                              log_levels=['INFO', 'INFO', 'INFO'],
                              log_to_consoles=[True, True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee_seed = 100

    with open('../adv/' + board_calibration_map[allocated_id]['cap']) as f:
        calibrated_config = json.load(f)
    with open('../adv/' + board_calibration_map[allocated_id]['dac']) as f:
        dac_config = json.load(f)

    class Dummy(object):
        def __init__(self, connector):
            self.connector = connector

        def __enter__(self):
            return self.connector

        def __exit__(self, exc_type, exc_val, exc_tb):
            pass

    class Mgr(object):
        def __init__(self):
            self.connector = None

        def establish(self):
            return Dummy(self.connector)

    max_learning_rate = 1.

    mgr = Mgr()
    optimizee_parameters = \
        BanditParameters(n_arms=2, n_pulls=100, n_samples=40, seed=optimizee_seed,
                         max_learning_rate=max_learning_rate, learning_rule=ANNLearningRule,
                         establish_connection=mgr.establish)
    optimizee = BanditOptimizee(traj, optimizee_parameters, dp=dependent)

    # Add post processing
    optimizer = None
    pop_size = 200
    n_iteration = 60
    if opt == 'CE':
        ce_optimizer_parameters = CrossEntropyParameters(
            pop_size=pop_size,
            rho=0.06,
            smoothing=0.3,
            temp_decay=0,
            n_iteration=n_iteration,
            distribution=NoisyGaussian(noise_magnitude=.2, noise_decay=.925),
            #Gaussian(),#NoisyGaussian(noise_magnitude=1., noise_decay=0.99),
            stop_criterion=np.inf,
            seed=102)
        ce_optimizer = CrossEntropyOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=(1, ),
            parameters=ce_optimizer_parameters,
            optimizee_bounding_func=optimizee.bounding_func)
        optimizer = ce_optimizer
    elif opt == 'ES':
        es_optimizer_parameters = EvolutionStrategiesParameters(
            learning_rate=1.8,
            learning_rate_decay=.93,
            noise_std=.03,
            mirrored_sampling_enabled=True,
            fitness_shaping_enabled=True,
            pop_size=int(pop_size / 2),
            n_iteration=n_iteration,
            stop_criterion=np.inf,
            seed=102)
        optimizer = EvolutionStrategiesOptimizer(traj,
                                                 optimizee.create_individual,
                                                 (1, ),
                                                 es_optimizer_parameters,
                                                 optimizee.bounding_func)
    elif opt == 'GD':
        gd_parameters = ClassicGDParameters(learning_rate=.003,
                                            exploration_step_size=.1,
                                            n_random_steps=pop_size,
                                            n_iteration=n_iteration,
                                            stop_criterion=np.inf,
                                            seed=102)
        optimizer = GradientDescentOptimizer(traj, optimizee.create_individual,
                                             (1, ), gd_parameters,
                                             optimizee.bounding_func)
    elif opt == 'SA':
        sa_parameters = SimulatedAnnealingParameters(
            n_parallel_runs=pop_size,
            noisy_step=.1,
            temp_decay=.9,
            n_iteration=n_iteration,
            stop_criterion=np.inf,
            seed=102,
            cooling_schedule=AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE)
        optimizer = SimulatedAnnealingOptimizer(traj,
                                                optimizee.create_individual,
                                                (1, ), sa_parameters,
                                                optimizee.bounding_func)
    elif opt == 'GS':
        n_grid_points = 5
        gs_optimizer_parameters = GridSearchParameters(
            param_grid={
                'weight_prior': (0, 1, n_grid_points),
                'learning_rate': (0, 1, n_grid_points),
                'stim_inhibition': (0, 1, n_grid_points),
                'action_inhibition': (0, 1, n_grid_points),
                'learning_rate_decay': (0, 1, n_grid_points)
            })
        gs_optimizer = GridSearchOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=(1, ),
            parameters=gs_optimizer_parameters)
        optimizer = gs_optimizer
    else:
        exit(1)
    env.add_postprocessing(optimizer.post_process)

    # Add Recorder
    recorder = Recorder(trajectory=traj,
                        optimizee_name='MAB',
                        optimizee_parameters=optimizee_parameters,
                        optimizer_name=optimizer.__class__.__name__,
                        optimizer_parameters=optimizer.get_params())
    recorder.start()

    # Run the simulation with all parameter combinations
    # optimizee.simulate(traj)
    # exit(0)
    with Connector(calibrated_config, dac_config, 3) as connector:
        mgr.connector = connector
        env.run(optimizee.simulate)
    mgr.connector.disconnect()

    ## Outerloop optimizer end
    optimizer.end(traj)
    recorder.end()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#27
0
def main(path_name, resolution, fixed_delay, use_pecevski, num_trials):
    name = path_name
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        use_scoop=True,
        multiproc=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        log_stdout=False,  # Sends stdout to logs
    )

    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = SAMOptimizee(traj,
                             use_pecevski=use_pecevski,
                             n_NEST_threads=1,
                             time_resolution=resolution,
                             fixed_delay=fixed_delay,
                             plots_directory=paths.output_dir_path,
                             num_fitness_trials=num_trials)

    # NOTE: Outerloop optimizer initialization
    parameters = GeneticAlgorithmParameters(seed=0,
                                            popsize=200,
                                            CXPB=0.5,
                                            MUTPB=1.0,
                                            NGEN=20,
                                            indpb=0.01,
                                            tournsize=20,
                                            matepar=0.5,
                                            mutpar=1.0,
                                            remutate=False)

    optimizer = GeneticAlgorithmOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-0.1, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func,
        optimizee_parameter_spec=optimizee.parameter_spec,
        fitness_plot_name=path_name)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():

    filename = os.path.join('hdf5', 'example_06.hdf5')
    env = Environment(trajectory='Example_06_Euler_Integration',
                      filename=filename,
                      file_title='Example_06_Euler_Integration',
                      overwrite_file=True,
                      comment='Go for Euler!')

    traj = env.trajectory

    # 1st a) phase parameter addition
    # Remember we have some control flow in the `add_parameters` function, the default parameter
    # set we choose is the `'diff_lorenz'` one, but we want to deviate from that and use the
    # `'diff_roessler'`.
    # In order to do that we can preset the corresponding name parameter to change the
    # control flow:
    traj.f_preset_parameter(
        'diff_name', 'diff_roessler')  # If you erase this line, you will get
    # again the lorenz attractor
    add_parameters(traj)

    # 1st b) phase preparation
    # Let's check which function we want to use
    if traj.diff_name == 'diff_lorenz':
        diff_eq = diff_lorenz
    elif traj.diff_name == 'diff_roessler':
        diff_eq = diff_roessler
    else:
        raise ValueError('I don\'t know what %s is.' % traj.diff_name)
    # And add the source code of the function as a derived parameter.
    traj.f_add_derived_parameter(FunctionParameter,
                                 'diff_eq',
                                 diff_eq,
                                 comment='Source code of our equation!')

    # We want to explore some initial conditions
    traj.f_explore({
        'initial_conditions': [
            np.array([0.01, 0.01, 0.01]),
            np.array([2.02, 0.02, 0.02]),
            np.array([42.0, 4.2, 0.42])
        ]
    })
    # 3 different conditions are enough for now

    # 2nd phase let's run the experiment
    # We pass 'euler_scheme' as our top-level simulation function and
    # the Roessler function as an additional argument
    env.run(euler_scheme, diff_eq)

    # Again no post-processing

    # 4th phase analysis.
    # I would recommend to do the analysis completely independent from the simulation
    # but for simplicity let's do it here.
    # We won't reload the trajectory this time but simply update the skeleton
    traj.f_load_skeleton()

    #For the fun of it, let's print the source code
    print('\n ---------- The source code of your function ---------- \n %s' %
          traj.diff_eq)

    # Let's get the exploration array:
    initial_conditions_exploration_array = traj.f_get(
        'initial_conditions').f_get_range()
    # Now let's plot our simulated equations for the different initial conditions.
    # We will iterate through the run names
    for idx, run_name in enumerate(traj.f_get_run_names()):

        # Get the result of run idx from the trajectory
        euler_result = traj.results.f_get(run_name).euler_evolution
        # Now we manually need to load the result. Actually the results are not so large and we
        # could load them all at once, but for demonstration we do as if they were huge:
        traj.f_load_item(euler_result)
        euler_data = euler_result.data

        # Plot fancy 3d plot
        fig = plt.figure(idx)
        ax = fig.gca(projection='3d')
        x = euler_data[:, 0]
        y = euler_data[:, 1]
        z = euler_data[:, 2]
        ax.plot(x,
                y,
                z,
                label='Initial Conditions: %s' %
                str(initial_conditions_exploration_array[idx]))
        plt.legend()
        plt.show()

        # Now we free the data again (because we assume its huuuuuuge):
        del euler_data
        euler_result.f_empty()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#29
0
                load_results=0,
                load_derived_parameters=0,
                force=True)
    # Turn on auto loading
    traj.v_auto_load = True

    # Ensure trajectory was not already assembled
    if not traj.f_is_completed():
        # Save a backup version of the original trajectory
        traj_backup_fullpath = os.path.join(
            traj_dir, traj_filename + '.backup' +
            datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss"))
        shutil.copy(traj_fullpath, traj_backup_fullpath)

        # Create a pypet Environment object and link it to the trajectory
        env = Environment(trajectory=traj)

        # Run assembly of trajectory
        env.run(assemble)

        print('\nFinished assembling files in folder: {0}'.format(traj_dir))
        print('Parameters:')
        print_leaves(traj, 'parameters')
        print('----------------------------------------------------------\n')

        # Finally disable logging
        env.disable_logging()
    else:
        print('Folder skipped: trajectory already completed: {0}'.format(
            traj_dir))
示例#30
0
def main():
    name = 'LTL-MDP-FACE'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        # freeze_input=True,
        # multiproc=True,
        # use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCK,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee = DLSMDPOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    # TODO: Change the optimizer to the appropriate Optimizer class
    parameters = FACEParameters(min_pop_size=25,
                                max_pop_size=25,
                                n_elite=10,
                                smoothing=0.2,
                                temp_decay=0,
                                n_iteration=100,
                                distribution=Gaussian(),
                                n_expand=5,
                                stop_criterion=np.inf,
                                seed=109)
    optimizer = FACEOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#31
0
def main(path_name, resolution, fixed_delay, state_handling, use_pecevski,
         num_trials):
    name = path_name
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        use_scoop=True,
        multiproc=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        log_stdout=False,  # Sends stdout to logs
    )

    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = SAMGraphOptimizee(traj,
                                  n_NEST_threads=1,
                                  time_resolution=resolution,
                                  fixed_delay=fixed_delay,
                                  use_pecevski=use_pecevski,
                                  state_handling=state_handling,
                                  plots_directory=paths.output_dir_path,
                                  num_fitness_trials=num_trials)

    # Get bounds for mu and sigma calculation.
    param_spec = OrderedDict(sorted(SAMGraph.parameter_spec(4).items()))
    names = [k for k, _ in param_spec.items()]
    mu = np.array([(v_min + v_max) / 2
                   for k, (v_min, v_max) in param_spec.items()])
    sigma = np.array([(v_max - v_min) / 2
                      for k, (v_min, v_max) in param_spec.items()])

    print("Using means: {}\nUsing stds: {}".format(dict(zip(names, mu)),
                                                   dict(zip(names, sigma))))

    # NOTE: Outerloop optimizer initialization
    parameters = NaturalEvolutionStrategiesParameters(
        seed=0,
        pop_size=96,
        n_iteration=40,
        learning_rate_sigma=0.5,
        learning_rate_mu=0.5,
        mu=mu,
        sigma=sigma,
        mirrored_sampling_enabled=True,
        fitness_shaping_enabled=True,
        stop_criterion=np.Inf)

    optimizer = NaturalEvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.0, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func,
        fitness_plot_name=path_name)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#32
0
def main():
    name = 'LTL-MDP-ES_6_8_TD1'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )

    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')
    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
                      comment=u'{} data'.format(name),
                      add_time=True,
                      # freeze_input=True,
                      # multiproc=True,
                      # use_scoop=True,
                      wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    ## Benchmark function

    optimizee = DLSMDPOptimizee(traj)

    ## Innerloop simulator

    ## Outerloop optimizer initialization
    optimizer_seed = 1234
    parameters = EvolutionStrategiesParameters(
        learning_rate=0.5,
        learning_rate_decay=0.95,
        noise_std=0.1,
        mirrored_sampling_enabled=True,
        fitness_shaping_enabled=True,
        pop_size=25,
        n_iteration=30,
        stop_criterion=np.Inf,
        seed=optimizer_seed)

    optimizer = EvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.,),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
              backup_filename=True,
              move_data=True,
              delete_other_trajectory=True)

# And that's it, now we can take a look at the new trajectory and print all x,y,z triplets.
# But before that we need to load the data we computed during the runs from disk.
# We choose load_parameters=2 and load_results=2 since we want to load all data and not only
# the skeleton
traj1.f_load(load_parameters=2, load_results=2)

for run_name in traj1.f_get_run_names():
    # We can make the trajectory belief it is a single run. All parameters will
    # be treated as they were in the specific run. And we can use the `crun` wildcard.
    traj1.f_set_crun(run_name)
    x=traj1.x
    y=traj1.y
    # We need to specify the current run, because there exists more than one z value
    z=traj1.crun.z
    print('%s: x=%f, y=%f, z=%f' % (run_name, x, y, z))

# Don't forget to reset you trajectory to the default settings, to release its belief to
# be the last run.
traj1.f_restore_default()

# As you can see duplicate parameter space points have been removed.
# If you wish you can take a look at the files and backup files in
# the experiments/example_03/HDF5 directory

# Finally, disable logging and close log files
env1.disable_logging()
示例#34
0
def main():
    name = 'LTL-MDP-GS'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = StateActionOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    n_grid_divs_per_axis = 50
    parameters = GridSearchParameters(
        param_grid={
            'gamma': (optimizee.bound[0], optimizee.bound[1],
                      n_grid_divs_per_axis),
            #'lam': (optimizee.bound[0], optimizee.bound[1], n_grid_divs_per_axis),
            'eta': (optimizee.bound[0], optimizee.bound[1],
                    n_grid_divs_per_axis),
        })
    optimizer = GridSearchOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.),
        parameters=parameters)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#35
0
def main():
    name = 'LTL-MDP-CE_6_8_TD1_New'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Benchmark function
    optimizee = StateActionOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    # TODO: Change the optimizer to the appropriate Optimizer class
    parameters = CrossEntropyParameters(pop_size=75,
                                        rho=0.2,
                                        smoothing=0.0,
                                        temp_decay=0,
                                        n_iteration=75,
                                        distribution=NoisyGaussian(
                                            noise_magnitude=1,
                                            noise_decay=0.95),
                                        stop_criterion=np.inf,
                                        seed=102)
    optimizer = CrossEntropyOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1., ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Add Recorder
    recorder = Recorder(trajectory=traj,
                        optimizee_name='SNN StateAction',
                        optimizee_parameters=['gamma', 'eta'],
                        optimizer_name=optimizer.__class__.__name__,
                        optimizer_parameters=optimizer.get_params())
    recorder.start()

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)
    recorder.end()

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    name = 'LTL-MDP-SA_6_8_TD1'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
                      comment=u'{} data'.format(name),
                      add_time=True,
                      # freeze_input=True,
                      # multiproc=True,
                      # use_scoop=True,
                      wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee = DLSMDPOptimizee(traj)

    # NOTE: Outerloop optimizer initialization

    parameters = SimulatedAnnealingParameters(n_parallel_runs=50, noisy_step=.03, temp_decay=.99, n_iteration=30,
                                              stop_criterion=np.Inf, seed=np.random.randint(1e5), cooling_schedule=AvailableCoolingSchedules.QUADRATIC_ADDAPTIVE)

    optimizer = SimulatedAnnealingOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
                                            optimizee_fitness_weights=(-1.,),
                                            parameters=parameters,
                                            optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()