示例#1
0
文件: main.py 项目: fontaine618/NAIVI
def main():
    # pypet environment
    env = Environment(trajectory=SIM_NAME,
                      comment="Experiment on density with binary covariates",
                      log_config=None,
                      multiproc=False,
                      ncores=1,
                      filename=SIM_PATH + "/results/",
                      overwrite_file=True)
    traj = env.trajectory

    # parameters (data generation)

    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_adj", np.float64(1.),
                         "True variance in the link Probit model")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")
    traj.f_add_parameter("model.adj_model", "Logistic", "Adjacency model")
    traj.f_add_parameter("model.bin_model", "Logistic",
                         "Binary covariate model")

    # parameters (fit)
    traj.f_add_parameter("fit.n_iter", np.int64(20),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_vmp", np.int64(5),
                         "Number of VMP iterations per E-step")
    traj.f_add_parameter("fit.n_gd", np.int64(5),
                         "Number of GD iterations per M-step")
    traj.f_add_parameter("fit.step_size", np.float64(0.01), "GD Step size")

    # experiment
    explore_dict = {
        "data.alpha_mean":
        np.array([-3.2, -2.8, -2.4, -2., -1.6, -1.2, -0.8, -0.4, 0.0, 0.4]),
        "data.p_bin":
        np.array([10, 100, 500]),
        "data.seed":
        np.arange(0, 100, 1)
    }
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)

    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
示例#2
0
def main():
    batch = get_batch()

    filename = 'saga_%s.hdf5' % str(batch)
    env = Environment(trajectory='Example_22_Euler_Integration_%s' % str(batch),
                      filename=filename,
                      file_title='Example_22_Euler_Integration',
                      comment='Go for Euler!',
                      overwrite_file=True,
                      multiproc=True,  # Yes we can use multiprocessing within each batch!
                      ncores=4)

    traj = env.trajectory
    trajectory_name = traj.v_name

    # 1st a) phase parameter addition
    add_parameters(traj)

    # 1st b) phase preparation
    # We will add the differential equation (well, its source code only) as a derived parameter
    traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
                                 comment='Source code of our equation!')

    # explore the trajectory
    explore_batch(traj, batch)

    # 2nd phase let's run the experiment
    # We pass `euler_scheme` as our top-level simulation function and
    # the Lorenz equation 'diff_lorenz' as an additional argument
    env.run(euler_scheme, diff_lorenz)
示例#3
0
文件: the_task.py 项目: nigroup/pypet
def main():
    batch = get_batch()

    filename = 'saga_%s.hdf5' % str(batch)
    env = Environment(
        trajectory='Example_22_Euler_Integration_%s' % str(batch),
        filename=filename,
        file_title='Example_22_Euler_Integration',
        comment='Go for Euler!',
        overwrite_file=True,
        multiproc=True,  # Yes we can use multiprocessing within each batch!
        ncores=4)

    traj = env.trajectory
    trajectory_name = traj.v_name

    # 1st a) phase parameter addition
    add_parameters(traj)

    # 1st b) phase preparation
    # We will add the differential equation (well, its source code only) as a derived parameter
    traj.f_add_derived_parameter(FunctionParameter,
                                 'diff_eq',
                                 diff_lorenz,
                                 comment='Source code of our equation!')

    # explore the trajectory
    explore_batch(traj, batch)

    # 2nd phase let's run the experiment
    # We pass `euler_scheme` as our top-level simulation function and
    # the Lorenz equation 'diff_lorenz' as an additional argument
    env.run(euler_scheme, diff_lorenz)
示例#4
0
文件: main.py 项目: MehmetTimur/pypet
def main():

    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(trajectory='FiringRate',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_stdout=True,
                      log_config='DEFAULT',
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    traj = env.trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.add_postprocessing(neuron_postproc)

    # Run the experiment
    env.run(run_neuron)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#5
0
def main():

    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(
        trajectory='FiringRate',
        comment='Experiment to measure the firing rate '
        'of a leaky integrate and fire neuron. '
        'Exploring different input currents, '
        'as well as refractory periods',
        add_time=False,  # We don't want to add the current time to the name,
        log_stdout=True,
        log_config='DEFAULT',
        multiproc=True,
        ncores=4,  #I think my laptop has 4 cores
        git_repository='/home/pinolej/th',
        wrap_mode='QUEUE',
        filename=filename,
        overwrite_file=True)

    traj = env.trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.add_postprocessing(neuron_postproc)

    # Run the experiment
    env.run(run_neuron)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#6
0
def main():
    """ Main *boilerplate* function to start simulation """
    # Now let's make use of logging
    logger = logging.getLogger()

    # Create folders for data and plots
    folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet')
    if not os.path.isdir(folder):
        os.makedirs(folder)
    filename = os.path.join(folder, 'all_patterns.hdf5')

    # Create an environment
    env = Environment(trajectory='cellular_automata',
                      multiproc=True,
                      ncores=4,
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    # extract the trajectory
    traj = env.traj

    traj.par.ncells = Parameter('ncells', 400, 'Number of cells')
    traj.par.steps = Parameter('steps', 250, 'Number of timesteps')
    traj.par.rule_number = Parameter('rule_number', 30, 'The ca rule')
    traj.par.initial_name = Parameter('initial_name', 'random',
                                      'The type of initial state')
    traj.par.seed = Parameter('seed', 100042, 'RNG Seed')

    # Explore
    exp_dict = {
        'rule_number': [10, 30, 90, 110, 184],
        'initial_name': ['single', 'random'],
    }
    # # You can uncomment the ``exp_dict`` below to see that changing the
    # # exploration scheme is now really easy:
    # exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
    #             'ncells' : [100, 200, 300],
    #             'seed': [333444555, 123456]}
    exp_dict = cartesian_product(exp_dict)
    traj.f_explore(exp_dict)

    # Run the simulation
    logger.info('Starting Simulation')
    env.run(wrap_automaton)

    # Load all data
    traj.f_load(load_data=2)

    logger.info('Printing data')
    for idx, run_name in enumerate(traj.f_iter_runs()):
        # Plot all patterns
        filename = os.path.join(folder, make_filename(traj))
        plot_pattern(traj.crun.pattern, traj.rule_number, filename)
        progressbar(idx, len(traj), logger=logger)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#7
0
def main():
    """ Main *boilerplate* function to start simulation """
    # Now let's make use of logging
    logger = logging.getLogger()

    # Create folders for data and plots
    folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet')
    if not os.path.isdir(folder):
        os.makedirs(folder)
    filename = os.path.join(folder, 'all_patterns.hdf5')

    # Create an environment
    env = Environment(trajectory='cellular_automata',
                      multiproc=True,
                      ncores=4,
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    # extract the trajectory
    traj = env.traj

    traj.v_lazy_adding = True
    traj.par.ncells = 400, 'Number of cells'
    traj.par.steps = 250, 'Number of timesteps'
    traj.par.rule_number = 30, 'The ca rule'
    traj.par.initial_name = 'random', 'The type of initial state'
    traj.par.seed = 100042, 'RNG Seed'


    # Explore
    exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
                'initial_name' : ['single', 'random'],}
    # # You can uncomment the ``exp_dict`` below to see that changing the
    # # exploration scheme is now really easy:
    # exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
    #             'ncells' : [100, 200, 300],
    #             'seed': [333444555, 123456]}
    exp_dict = cartesian_product(exp_dict)
    traj.f_explore(exp_dict)

    # Run the simulation
    logger.info('Starting Simulation')
    env.run(wrap_automaton)

    # Load all data
    traj.f_load(load_data=2)

    logger.info('Printing data')
    for idx, run_name in enumerate(traj.f_iter_runs()):
        # Plot all patterns
        filename = os.path.join(folder, make_filename(traj))
        plot_pattern(traj.crun.pattern, traj.rule_number, filename)
        progressbar(idx, len(traj), logger=logger)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#8
0
def main(name,
         explore_dict,
         postprocess=False,
         ncores=1,
         testrun=False,
         commit=None):

    if not testrun:
        if commit is None:
            raise Exception("Non testrun needs a commit")

    filename = os.path.join(os.getcwd(), 'data/', name + '.hdf5')

    # if not the first run, tr2 will be merged later
    label = 'tr1'

    # if only post processing, can't use the same label
    # (generates HDF5 error)
    if postprocess:
        label += '_postprocess-%.6d' % random.randint(0, 999999)

    env = Environment(
        trajectory=label,
        add_time=False,
        filename=filename,
        continuable=False,  # ??
        lazy_debug=False,  # ??
        multiproc=True,
        ncores=ncores,
        use_pool=False,  # likely not working w/ brian2
        wrap_mode='QUEUE',  # ??
        overwrite_file=False)

    tr = env.trajectory

    add_params(tr)

    if not testrun:
        tr.f_add_parameter('mconfig.git.sha1', str(commit))
        tr.f_add_parameter('mconfig.git.message', commit.message)

    tr.f_explore(explore_dict)

    def run_sim(tr):
        try:
            run_net(tr)
        except TimeoutError:
            print("Unable to plot, must run analysis manually")

        post_process(tr)

    if postprocess:
        env.run(post_process)
    else:
        env.run(run_sim)
示例#9
0
文件: main.py 项目: fontaine618/NAIVI
def main(path, name, explore_dict):
    comment = "\n".join(
        ["{}: {}".format(k, v) for k, v in explore_dict.items()])
    # pypet environment
    env = Environment(trajectory=name,
                      comment=comment,
                      log_config=None,
                      multiproc=False,
                      ncores=1,
                      filename=path + name + "/results/",
                      overwrite_file=True)
    traj = env.trajectory
    traj.f_add_parameter("path", path + name, "Path")

    # parameters (data generation)
    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.center", np.int64(1), "Ego-network center")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")

    # parameters (fit)
    traj.f_add_parameter("fit.algo", "MLE", "Inference algorithm")
    traj.f_add_parameter("fit.max_iter", np.int64(500),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_sample", np.int64(1),
                         "Number of samples for VIMC")
    traj.f_add_parameter("fit.eps", np.float64(1.0e-6),
                         "convergence threshold")
    traj.f_add_parameter("fit.lr", np.float64(0.01), "GD Step size")

    # experiment
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)
    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing under Windows you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see https://docs.python.org/2/library/multiprocessing.html#windows).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with 2 workers.
    filename = os.path.join('hdf5', 'example_04.hdf5')
    env = Environment(
        trajectory='Example_04_MP',
        filename=filename,
        file_title='Example_04_MP',
        log_stdout=True,
        comment='Multiprocessing example!',
        multiproc=True,
        ncores=4,
        use_pool=True,  # Our runs are inexpensive we can get rid of overhead
        # by using a pool
        freeze_input=True,  # We can avoid some
        # overhead by freezing the input to the pool
        wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
        graceful_exit=True,  # We want to exit in a data friendly way
        # that safes all results after hitting CTRL+C, try it ;-)
        overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(
        cartesian_product({
            'x': [float(x) for x in range(20)],
            'y': [float(y) for y in range(20)]
        }))

    # Run the simulation
    env.run(multiply)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing under Windows you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see https://docs.python.org/2/library/multiprocessing.html#windows).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with 2 workers.
    filename = os.path.join('hdf5', 'example_04.hdf5')
    env = Environment(trajectory='Example_04_MP',
                      filename=filename,
                      file_title='Example_04_MP',
                      log_stdout=True,
                      comment='Multiprocessing example!',
                      multiproc=True,
                      ncores=4,
                      use_pool=True,  # Our runs are inexpensive we can get rid of overhead
                      # by using a pool
                      freeze_input=True,  # We can avoid some
                      # overhead by freezing the input to the pool
                      wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
                      graceful_exit=True,  # We want to exit in a data friendly way
                      # that safes all results after hitting CTRL+C, try it ;-)
                      overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
                                      'y':[float(y) for y in range(20)]}))

    # Run the simulation
    env.run(multiply)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing with SCOOP you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see http://scoop.readthedocs.org/en/latest/usage.html#pitfalls).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with scoop:
    filename = os.path.join('hdf5', 'example_21.hdf5')
    env = Environment(trajectory='Example_21_SCOOP',
                      filename=filename,
                      file_title='Example_21_SCOOP',
                      log_stdout=True,
                      comment='Multiprocessing example using SCOOP!',
                      multiproc=True,
                      freeze_input=True, # We want to save overhead and freeze input
                      use_scoop=True, # Yes we want SCOOP!
                      wrap_mode=pypetconstants.WRAP_MODE_LOCAL,  # SCOOP only works with 'LOCAL'
                      # or 'NETLOCK' wrapping
                      overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
                                      'y':[float(y) for y in range(20)]}))
    # Run the simulation
    env.run(multiply)

    # Let's check that all runs are completed!
    assert traj.f_is_completed()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#13
0
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5','example_18.hdf5')
    env = Environment(trajectory='Multiplication',
                      filename=filename,
                      file_title='Example_18_Many_Runs',
                      overwrite_file=True,
                      comment='Contains many runs',
                      multiproc=True,
                      use_pool=True,
                      freeze_input=True,
                      ncores=2,
                      wrap_mode='QUEUE')

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, yielding 2500 runs
    traj.f_explore(cartesian_product({'x': range(50), 'y': range(50)}))

    # Run the simulation
    env.run(multiply)

    # Disable logging
    env.disable_logging()

    # turn auto loading on, since results have not been loaded, yet
    traj.v_auto_load = True
    # Use the `v_idx` functionality
    traj.v_idx = 2042
    print('The result of run %d is: ' % traj.v_idx)
    # Now we can rely on the wildcards
    print(traj.res.crunset.crun.z)
    traj.v_idx = -1
    # Or we can use the shortcuts `rts_X` (run to set) and `r_X` to get particular results
    print('The result of run %d is: ' % 2044)
    print(traj.res.rts_2044.r_2044.z)
示例#14
0
def main(inputargs=None):
    if inputargs is None:
        inputargs = sys.argv[1:] if len(sys.argv) > 1 else ""
    args = docopt(__doc__, argv=inputargs)
    wavpath = path.join(modulePath, "resources", "tone_in_noise")
    stimuli = [path.join(wavpath, i) for i in glob.glob(path.join(wavpath, "*.wav"))]
    outfile = path.realpath(path.expanduser(args["--out"]))
    env = Environment(trajectory='tone-in-noise',
                      filename=outfile,
                      overwrite_file=True,
                      file_title="Tone in noise at different SNR",
                      comment="some comment",
                      large_overview_tables="False",
                      # freeze_input=True,
                      # use_pool=True,
                      multiproc=True,
                      ncores=3,
                      graceful_exit=True,
                      #wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
                      )

    traj = env.trajectory
    traj.f_add_parameter('periphery', 'verhulst', comment="which periphery was used")
    traj.f_add_parameter('brainstem', 'nelsoncarney04', comment="which brainstem model was used")
    traj.f_add_parameter('weighting', "--no-cf-weighting ", comment="weighted CFs")
    traj.f_add_parameter('wavfile', '', comment="Which wav file to run")
    traj.f_add_parameter('level', 80, comment="stimulus level, spl")
    traj.f_add_parameter('neuropathy', "none", comment="")

    parameter_dict = {
        "periphery" : ['verhulst', 'zilany'],
        "brainstem" : ['nelsoncarney04', 'carney2015'],
        "weighting" : [cf_weighting, ""],
        "wavfile"   : stimuli,
        "level"     : [80],
        "neuropathy": ["none", "moderate", "severe", "ls-moderate", "ls-severe"]
    }

    traj.f_explore(cartesian_product(parameter_dict))
    env.run(tone_in_noise)
    return 0
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5', 'example_12.hdf5')
    env = Environment(
        trajectory='Multiplication',
        filename=filename,
        file_title='Example_12_Sharing_Data',
        overwrite_file=True,
        comment='The first example!',
        continuable=
        False,  # We have shared data in terms of a multiprocessing list,
        # so we CANNOT use the continue feature.
        multiproc=True,
        ncores=2)

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product
    traj.f_explore(cartesian_product({'x': [1, 2, 3, 4], 'y': [6, 7, 8]}))

    # We want a shared list where we can put all out results in. We use a manager for this:
    result_list = mp.Manager().list()
    # Let's make some space for potential results
    result_list[:] = [0 for _dummy in range(len(traj))]

    # Run the simulation
    env.run(multiply, result_list)

    # Now we want to store the final list as numpy array
    traj.f_add_result('z', np.array(result_list))

    # Finally let's print the result to see that it worked
    print(traj.z)

    #Disable logging and close all log-files
    env.disable_logging()
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5', 'example_12.hdf5')
    env = Environment(trajectory='Multiplication',
                      filename=filename,
                      file_title='Example_12_Sharing_Data',
                      overwrite_file=True,
                      comment='The first example!',
                      continuable=False, # We have shared data in terms of a multiprocessing list,
                      # so we CANNOT use the continue feature.
                      multiproc=True,
                      ncores=2)

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product
    traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))

    # We want a shared list where we can put all out results in. We use a manager for this:
    result_list = mp.Manager().list()
    # Let's make some space for potential results
    result_list[:] =[0 for _dummy in range(len(traj))]

    # Run the simulation
    env.run(multiply, result_list)

    # Now we want to store the final list as numpy array
    traj.f_add_result('z', np.array(result_list))

    # Finally let's print the result to see that it worked
    print(traj.z)

    #Disable logging and close all log-files
    env.disable_logging()
示例#17
0
def main(inputargs):
    args = docopt(__doc__, argv=inputargs)
    wavpath = path.join(modulePath, "resources", "tone_in_noise")
    stimuli = [path.join(wavpath, i) for i in glob.glob(path.join(wavpath, "*.wav"))]
    outfile = path.realpath(path.expanduser(args["--out"]))
    env = Environment(trajectory='tone-in-noise',
                      filename=outfile,
                      overwrite_file=True,
                      file_title="Tone in noise at different SNR",
                      comment="some comment",
                      large_overview_tables="False",
                      # freeze_input=True,
                      # use_pool=True,
                      multiproc=True,
                      ncores=3,
                      graceful_exit=True,
                      #wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
                      )

    traj = env.trajectory
    traj.f_add_parameter('periphery', 'verhulst', comment="which periphery was used")
    traj.f_add_parameter('brainstem', 'nelsoncarney04', comment="which brainstem model was used")
    traj.f_add_parameter('weighting', "--no-cf-weighting ", comment="weighted CFs")
    traj.f_add_parameter('wavfile', '', comment="Which wav file to run")
    traj.f_add_parameter('level', 80, comment="stimulus level, spl")
    traj.f_add_parameter('neuropathy', "none", comment="")

    parameter_dict = {
        "periphery" : ['verhulst', 'zilany'],
        "brainstem" : ['nelsoncarney04', 'carney2015'],
        "weighting" : [cf_weighting, ""],
        "wavfile"   : stimuli,
        "level"     : [80],
        "neuropathy": ["none", "moderate", "severe", "ls-moderate", "ls-severe"]
    }

    traj.f_explore(cartesian_product(parameter_dict))
    env.run(tone_in_noise)
    return 0
示例#18
0
def main():
    name = 'LTL-MDP-GS'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = StateActionOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    n_grid_divs_per_axis = 50
    parameters = GridSearchParameters(
        param_grid={
            'gamma': (optimizee.bound[0], optimizee.bound[1],
                      n_grid_divs_per_axis),
            #'lam': (optimizee.bound[0], optimizee.bound[1], n_grid_divs_per_axis),
            'eta': (optimizee.bound[0], optimizee.bound[1],
                    n_grid_divs_per_axis),
        })
    optimizer = GridSearchOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.),
        parameters=parameters)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#19
0
# The environment has created a trajectory container for us
traj = env.trajectory

# Add both parameters
traj.v_lazy_adding = True
traj.par.x = 1, 'I am the first dimension!'
traj.par.y = 1, 'I am the second dimension!'

# Explore just two points
traj.f_explore({'x': [3, 4]})

# So far everything was as in the first example. However now we add links:
traj.f_add_link('mylink1', traj.f_get('x'))
# Note the `f_get` here to ensure to get the parameter instance, not the value 1
# This allows us now to access x differently:
print('x=' + str(traj.mylink1))
# We can try to avoid fast access as well, and recover the original parameter
print(str(traj.f_get('mylink1')))
# And also colon notation is allowed that creates new groups on the fly
traj.f_add_link('parameters.mynewgroup.mylink2', traj.f_get('y'))



# And, of course, we can also use the links during run:
env.run(multiply)

# Finally disable logging and close all log-files
env.disable_logging()

traj2 = env2.trajectory

# Add both parameters
traj1.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj1.f_add_parameter('y', 1.0, comment='I am the second dimension!')
traj2.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj2.f_add_parameter('y', 1.0, comment='I am the second dimension!')

# Explore the parameters with a cartesian product for the first trajectory:
traj1.f_explore(cartesian_product({'x':[1.0,2.0,3.0,4.0], 'y':[6.0,7.0,8.0]}))
# Let's explore slightly differently for the second:
traj2.f_explore(cartesian_product({'x':[3.0,4.0,5.0,6.0], 'y':[7.0,8.0,9.0]}))


# Run the simulations with all parameter combinations
env1.run(multiply)
env2.run(multiply)

# Now we merge them together into traj1
# We want to remove duplicate entries
# like the parameter space point x=3.0, y=7.0.
# Several points have been explored by both trajectories and we need them only once.
# Therefore, we set remove_duplicates=True (Note this takes O(N1*N2)!).
# We also want to backup both trajectories, but we let the system choose the filename.
# Accordingly we choose backup_filename=True instead of providing a filename.
# We want to move the hdf5 nodes from one trajectory to the other.
# Thus we set move_nodes=True.
# Finally,we want to delete the other trajectory afterwards since we already have a backup.
traj1.f_merge(traj2,
              remove_duplicates=True,
              backup_filename=True,
示例#21
0
    multiproc=True,
    ncores=ncores,
    use_pool=False,  # likely not working w/ brian2
    wrap_mode='QUEUE',  # ??
    overwrite_file=False)

tr = env.trajectory

add_params(tr)

if not args.testrun:
    tr.f_add_parameter('mconfig.git.sha1', str(commit))
    tr.f_add_parameter('mconfig.git.message', commit.message)

tr.f_explore(explore_dict)


def run_sim(tr):
    try:
        run_net(tr)
    except TimeoutError:
        print("Unable to plot, must run analysis manually")

    post_process(tr)


if args.postprocess:
    env.run(post_process)
else:
    env.run(run_sim)
示例#22
0
def main(dependent, optimizer):
    opt = optimizer.upper()
    identifier = '{:05x}'.format(np.random.randint(16**5))
    print('Identifier: ' + identifier)
    allocated_id = '07'  # dls.get_allocated_board_ids()[0]
    board_calibration_map = {
        'B291698': {
            'dac': 'dac_default.json',
            'cap': 'cap_mem_29.json'
        },
        '07': {
            'dac': 'dac_07_chip_20.json',
            'cap': 'calibration_20.json'
        },
        'B201319': {
            'dac': 'dac_B201319_chip_21.json',
            'cap': 'calibration_24.json'
        },
        'B201330': {
            'dac': 'dac_B201330_chip_22.json',
            'cap': 'calibration_22.json'
        }
    }

    dep_name = 'DEP' if dependent else 'IND'
    name = 'MAB_ANN_{}_{}_{}'.format(identifier, opt, dep_name)
    root_dir_path = os.path.expanduser('~/simulations')
    paths = Paths(name, dict(run_no=u'test'), root_dir_path=root_dir_path)

    with open(os.path.expanduser('~/LTL/bin/logging.yaml')) as f:
        l_dict = yaml.load(f)
        log_output_file = os.path.join(paths.results_path,
                                       l_dict['handlers']['file']['filename'])
        l_dict['handlers']['file']['filename'] = log_output_file
        logging.config.dictConfig(l_dict)

    print("All output logs can be found in directory " + str(paths.logs_path))

    traj_file = os.path.join(paths.output_dir_path, u'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        # freeze_input=True,
        # multiproc=True,
        # use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCK,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers', 'optimizees'],
                              log_levels=['INFO', 'INFO', 'INFO'],
                              log_to_consoles=[True, True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee_seed = 100

    with open('../adv/' + board_calibration_map[allocated_id]['cap']) as f:
        calibrated_config = json.load(f)
    with open('../adv/' + board_calibration_map[allocated_id]['dac']) as f:
        dac_config = json.load(f)

    class Dummy(object):
        def __init__(self, connector):
            self.connector = connector

        def __enter__(self):
            return self.connector

        def __exit__(self, exc_type, exc_val, exc_tb):
            pass

    class Mgr(object):
        def __init__(self):
            self.connector = None

        def establish(self):
            return Dummy(self.connector)

    max_learning_rate = 1.

    mgr = Mgr()
    optimizee_parameters = \
        BanditParameters(n_arms=2, n_pulls=100, n_samples=40, seed=optimizee_seed,
                         max_learning_rate=max_learning_rate, learning_rule=ANNLearningRule,
                         establish_connection=mgr.establish)
    optimizee = BanditOptimizee(traj, optimizee_parameters, dp=dependent)

    # Add post processing
    optimizer = None
    pop_size = 200
    n_iteration = 60
    if opt == 'CE':
        ce_optimizer_parameters = CrossEntropyParameters(
            pop_size=pop_size,
            rho=0.06,
            smoothing=0.3,
            temp_decay=0,
            n_iteration=n_iteration,
            distribution=NoisyGaussian(noise_magnitude=.2, noise_decay=.925),
            #Gaussian(),#NoisyGaussian(noise_magnitude=1., noise_decay=0.99),
            stop_criterion=np.inf,
            seed=102)
        ce_optimizer = CrossEntropyOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=(1, ),
            parameters=ce_optimizer_parameters,
            optimizee_bounding_func=optimizee.bounding_func)
        optimizer = ce_optimizer
    elif opt == 'ES':
        es_optimizer_parameters = EvolutionStrategiesParameters(
            learning_rate=1.8,
            learning_rate_decay=.93,
            noise_std=.03,
            mirrored_sampling_enabled=True,
            fitness_shaping_enabled=True,
            pop_size=int(pop_size / 2),
            n_iteration=n_iteration,
            stop_criterion=np.inf,
            seed=102)
        optimizer = EvolutionStrategiesOptimizer(traj,
                                                 optimizee.create_individual,
                                                 (1, ),
                                                 es_optimizer_parameters,
                                                 optimizee.bounding_func)
    elif opt == 'GD':
        gd_parameters = ClassicGDParameters(learning_rate=.003,
                                            exploration_step_size=.1,
                                            n_random_steps=pop_size,
                                            n_iteration=n_iteration,
                                            stop_criterion=np.inf,
                                            seed=102)
        optimizer = GradientDescentOptimizer(traj, optimizee.create_individual,
                                             (1, ), gd_parameters,
                                             optimizee.bounding_func)
    elif opt == 'SA':
        sa_parameters = SimulatedAnnealingParameters(
            n_parallel_runs=pop_size,
            noisy_step=.1,
            temp_decay=.9,
            n_iteration=n_iteration,
            stop_criterion=np.inf,
            seed=102,
            cooling_schedule=AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE)
        optimizer = SimulatedAnnealingOptimizer(traj,
                                                optimizee.create_individual,
                                                (1, ), sa_parameters,
                                                optimizee.bounding_func)
    elif opt == 'GS':
        n_grid_points = 5
        gs_optimizer_parameters = GridSearchParameters(
            param_grid={
                'weight_prior': (0, 1, n_grid_points),
                'learning_rate': (0, 1, n_grid_points),
                'stim_inhibition': (0, 1, n_grid_points),
                'action_inhibition': (0, 1, n_grid_points),
                'learning_rate_decay': (0, 1, n_grid_points)
            })
        gs_optimizer = GridSearchOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=(1, ),
            parameters=gs_optimizer_parameters)
        optimizer = gs_optimizer
    else:
        exit(1)
    env.add_postprocessing(optimizer.post_process)

    # Add Recorder
    recorder = Recorder(trajectory=traj,
                        optimizee_name='MAB',
                        optimizee_parameters=optimizee_parameters,
                        optimizer_name=optimizer.__class__.__name__,
                        optimizer_parameters=optimizer.get_params())
    recorder.start()

    # Run the simulation with all parameter combinations
    # optimizee.simulate(traj)
    # exit(0)
    with Connector(calibrated_config, dac_config, 3) as connector:
        mgr.connector = connector
        env.run(optimizee.simulate)
    mgr.connector.disconnect()

    ## Outerloop optimizer end
    optimizer.end(traj)
    recorder.end()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#23
0
                  lazy_debug=False,  # ??
                  multiproc=True,     
                  ncores=ncores,
                  use_pool=False, # likely not working w/ brian2
                  wrap_mode='QUEUE', # ??
                  overwrite_file=False)


tr = env.trajectory

add_params(tr)

if not args.testrun:
    tr.f_add_parameter('mconfig.git.sha1', str(commit))
    tr.f_add_parameter('mconfig.git.message', commit.message)

tr.f_explore(explore_dict)


def run_sim(tr):
    run_model(tr)
    #post_process(tr)
    
env.run(run_sim)



                  


示例#24
0
def main():
    name = 'LTL-MDP-FACE'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        # freeze_input=True,
        # multiproc=True,
        # use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCK,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee = DLSMDPOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    # TODO: Change the optimizer to the appropriate Optimizer class
    parameters = FACEParameters(min_pop_size=25,
                                max_pop_size=25,
                                n_elite=10,
                                smoothing=0.2,
                                temp_decay=0,
                                n_iteration=100,
                                distribution=Gaussian(),
                                n_expand=5,
                                stop_criterion=np.inf,
                                seed=109)
    optimizer = FACEOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():

    filename = os.path.join('hdf5', 'example_06.hdf5')
    env = Environment(trajectory='Example_06_Euler_Integration',
                      filename=filename,
                      file_title='Example_06_Euler_Integration',
                      overwrite_file=True,
                      comment = 'Go for Euler!')


    traj = env.trajectory

    # 1st a) phase parameter addition
    # Remember we have some control flow in the `add_parameters` function, the default parameter
    # set we choose is the `'diff_lorenz'` one, but we want to deviate from that and use the
    # `'diff_roessler'`.
    # In order to do that we can preset the corresponding name parameter to change the
    # control flow:
    traj.f_preset_parameter('diff_name', 'diff_roessler') # If you erase this line, you will get
                                                          # again the lorenz attractor
    add_parameters(traj)

    # 1st b) phase preparation
    # Let's check which function we want to use
    if traj.diff_name=='diff_lorenz':
        diff_eq = diff_lorenz
    elif traj.diff_name=='diff_roessler':
        diff_eq = diff_roessler
    else:
        raise ValueError('I don\'t know what %s is.' % traj.diff_name)
    # And add the source code of the function as a derived parameter.
    traj.f_add_derived_parameter(FunctionParameter, 'diff_eq', diff_eq,
                                     comment='Source code of our equation!')

    # We want to explore some initial conditions
    traj.f_explore({'initial_conditions' : [
        np.array([0.01,0.01,0.01]),
        np.array([2.02,0.02,0.02]),
        np.array([42.0,4.2,0.42])
    ]})
    # 3 different conditions are enough for now

    # 2nd phase let's run the experiment
    # We pass 'euler_scheme' as our top-level simulation function and
    # the Roessler function as an additional argument
    env.run(euler_scheme, diff_eq)

    # Again no post-processing

    # 4th phase analysis.
    # I would recommend to do the analysis completely independent from the simulation
    # but for simplicity let's do it here.
    # We won't reload the trajectory this time but simply update the skeleton
    traj.f_load_skeleton()

    #For the fun of it, let's print the source code
    print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)

    # Let's get the exploration array:
    initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
    # Now let's plot our simulated equations for the different initial conditions.
    # We will iterate through the run names
    for idx, run_name in enumerate(traj.f_get_run_names()):

        # Get the result of run idx from the trajectory
        euler_result = traj.results.f_get(run_name).euler_evolution
        # Now we manually need to load the result. Actually the results are not so large and we
        # could load them all at once, but for demonstration we do as if they were huge:
        traj.f_load_item(euler_result)
        euler_data = euler_result.data

        # Plot fancy 3d plot
        fig = plt.figure(idx)
        ax = fig.gca(projection='3d')
        x = euler_data[:,0]
        y = euler_data[:,1]
        z = euler_data[:,2]
        ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
        plt.legend()
        plt.show()

        # Now we free the data again (because we assume its huuuuuuge):
        del euler_data
        euler_result.f_empty()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#26
0
                  use_pool=False, # likely not working w/ brian2
                  wrap_mode='QUEUE', # ??
                  overwrite_file=False, # ??
                  git_repository='./',
                  git_fail = True)


tr = env.trajectory

add_params(tr)
tr.f_add_parameter('mconfig.git.sha1', str(commit))
tr.f_add_parameter('mconfig.git.message', commit.message)

tr.f_explore(explore_dict)

env.run(run_net)


if not first_run:

    print("\nSimulation successful. Now merging...\n")

    tr1 = Trajectory(name='tr1',
                      add_time=False,
                      filename=filename,
                      dynamic_imports=[Brian2MonitorResult,
                                       Brian2Parameter])

    tr1.f_load(load_parameters=2, load_derived_parameters=2,
               load_results=2)
示例#27
0
                                  # 'T3bF' : [0.367, 0.249]
# Define the observable in the par. space
def scan(traj):
    import imp
    imp.reload(P5p_anomaly)
    P5p_anomaly.Lmb_corr_par['A0'] = traj.A0
    P5p_anomaly.Lmb_corr_par['A1'] = traj.A1
    # P5p_anomaly.Lmb_corr_par['A2'][1]=traj.A2bF
    # P5p_anomaly.Lmb_corr_par['T1'][1]=traj.T1bF
    # P5p_anomaly.Lmb_corr_par['T2'][1]=traj.T2bF
    # P5p_anomaly.Lmb_corr_par['T3'][1]=traj.T3bF
    return P5p_anomaly.P5p_binned()


# Find the maximum and minimum value for each bin
Result = env.run(scan)
print(Result)

'''
def ManualScan():
    import imp
    P5p_anomaly.Lmb_corr_par['A0'] = [0.002, 0.465, 1.222]
    P5p_anomaly.Lmb_corr_par['A1'] = [-0.038, -0.074, 0.179]
    res = [P5p_anomaly.P5p_binned()]
    imp.reload(P5p_anomaly)
    P5p_anomaly.Lmb_corr_par['A0'] = [0.002, 0.715, 1.724]
    P5p_anomaly.Lmb_corr_par['A1'] = [0.012, -0.038, 0.137]
    res += [P5p_anomaly.P5p_binned()]
    return res

Result = ManualScan()
def main():

    env = Environment(trajectory='postproc_deap',
                      overwrite_file=True,
                      log_stdout=False,
                      log_level=50,  # only display ERRORS
                      automatic_storing=True,  # Since we us post-processing, we
                      # can safely enable automatic storing, because everything will
                      # only be stored once at the very end of all runs.
                      comment='Using pypet and DEAP with less overhead'
                      )
    traj = env.traj


    # ------- Add parameters ------- #
    traj.f_add_parameter('popsize', 100, comment='Population size')
    traj.f_add_parameter('CXPB', 0.5, comment='Crossover term')
    traj.f_add_parameter('MUTPB', 0.2, comment='Mutation probability')
    traj.f_add_parameter('NGEN', 20, comment='Number of generations')

    traj.f_add_parameter('generation', 0, comment='Current generation')
    traj.f_add_parameter('ind_idx', 0, comment='Index of individual')
    traj.f_add_parameter('ind_len', 50, comment='Length of individual')

    traj.f_add_parameter('indpb', 0.005, comment='Mutation parameter')
    traj.f_add_parameter('tournsize', 3, comment='Selection parameter')

    traj.f_add_parameter('seed', 42, comment='Seed for RNG')


    # Placeholders for individuals and results that are about to be explored
    traj.f_add_derived_parameter('individual', [0 for x in range(traj.ind_len)],
                                 'An indivudal of the population')
    traj.f_add_result('fitnesses', [], comment='Fitnesses of all individuals')


    # ------- Create and register functions with DEAP ------- #
    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    toolbox = base.Toolbox()
    # Attribute generator
    toolbox.register("attr_bool", random.randint, 0, 1)
    # Structure initializers
    toolbox.register("individual", tools.initRepeat, creator.Individual,
        toolbox.attr_bool, traj.ind_len)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Operator registering
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=traj.indpb)
    toolbox.register("select", tools.selTournament, tournsize=traj.tournsize)


    # ------- Initialize Population and Trajectory -------- #
    random.seed(traj.seed)
    pop = toolbox.population(n=traj.popsize)

    eval_pop = [ind for ind in pop if not ind.fitness.valid]
    traj.f_explore(cartesian_product({'generation': [0],
                                     'ind_idx': range(len(eval_pop)),
                                     'individual':[list(x) for x in eval_pop]},
                                        [('ind_idx', 'individual'),'generation']))

    # ----------- Add postprocessing ------------------ #
    postproc = Postprocessing(pop, eval_pop, toolbox)  # Add links to important structures
    env.add_postprocessing(postproc)

    # ------------ Run applying post-processing ---------- #
    env.run(eval_one_max)

    # ------------ Finished all runs and print result --------------- #
    print("-- End of (successful) evolution --")
    best_ind = tools.selBest(pop, 1)[0]
    print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
def main():
    name = 'LTL-MDP-SA_6_8_TD1'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
                      comment=u'{} data'.format(name),
                      add_time=True,
                      # freeze_input=True,
                      # multiproc=True,
                      # use_scoop=True,
                      wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee = DLSMDPOptimizee(traj)

    # NOTE: Outerloop optimizer initialization

    parameters = SimulatedAnnealingParameters(n_parallel_runs=50, noisy_step=.03, temp_decay=.99, n_iteration=30,
                                              stop_criterion=np.Inf, seed=np.random.randint(1e5), cooling_schedule=AvailableCoolingSchedules.QUADRATIC_ADDAPTIVE)

    optimizer = SimulatedAnnealingOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
                                            optimizee_fitness_weights=(-1.,),
                                            parameters=parameters,
                                            optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#30
0
def main(path_name, resolution, fixed_delay, use_pecevski, num_trials):
    name = path_name
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        use_scoop=True,
        multiproc=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        log_stdout=False,  # Sends stdout to logs
    )

    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = SAMOptimizee(traj,
                             use_pecevski=use_pecevski,
                             n_NEST_threads=1,
                             time_resolution=resolution,
                             fixed_delay=fixed_delay,
                             plots_directory=paths.output_dir_path,
                             num_fitness_trials=num_trials)

    # NOTE: Outerloop optimizer initialization
    parameters = GeneticAlgorithmParameters(seed=0,
                                            popsize=200,
                                            CXPB=0.5,
                                            MUTPB=1.0,
                                            NGEN=20,
                                            indpb=0.01,
                                            tournsize=20,
                                            matepar=0.5,
                                            mutpar=1.0,
                                            remutate=False)

    optimizer = GeneticAlgorithmOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-0.1, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func,
        optimizee_parameter_spec=optimizee.parameter_spec,
        fitness_plot_name=path_name)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#31
0
''' Experiment settings: set acquisition strategies, etc. '''
seeds = list(range(1))
run_settings = {
    'initialization_seed': seeds,
    'xi_acquisition_function': ['PCD'] * len(seeds)
}
#run_settings = {'initialization_seed':seeds*10,
#                                  'xi_acquisition_function':['PCD','EXT','RAND','EI','EXR']*len(seeds)}
#run_settings = {'initialization_seed':seeds*7,
#                                  'xi_acquisition_function':['PCD']*7*len(seeds),'m':[2,4,8,12,20,25,30]*len(seeds)}
traj.f_explore(run_settings)
''' Run experiment '''
start = time.time()
#env.run(six_hump_camel)
#env.run(levy)
env.run(hartmann6d)
#env.run(ackley)

print("The session completed!")
print("Total time: " + str(time.time() - start) + " seconds.")
''' End logging '''
if should_log:
    sys.stdout = orig_stdout
    log_file.close()
''' ---------------'''

## This is for debugging ###
GP_model = env.run(six_hump_camel)
GP_model = GP_model[0][1]
GP_model.theta = [0.01, 0.124, 0.1]
GP_model.update_model(optimize_theta=False)
示例#32
0
                load_results=0,
                load_derived_parameters=0,
                force=True)
    # Turn on auto loading
    traj.v_auto_load = True

    # Ensure trajectory was not already assembled
    if not traj.f_is_completed():
        # Save a backup version of the original trajectory
        traj_backup_fullpath = os.path.join(
            traj_dir, traj_filename + '.backup' +
            datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss"))
        shutil.copy(traj_fullpath, traj_backup_fullpath)

        # Create a pypet Environment object and link it to the trajectory
        env = Environment(trajectory=traj)

        # Run assembly of trajectory
        env.run(assemble)

        print('\nFinished assembling files in folder: {0}'.format(traj_dir))
        print('Parameters:')
        print_leaves(traj, 'parameters')
        print('----------------------------------------------------------\n')

        # Finally disable logging
        env.disable_logging()
    else:
        print('Folder skipped: trajectory already completed: {0}'.format(
            traj_dir))
def main():
    name = 'LTL-MDP-GD_6_8_TD1'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
                      comment=u'{} data'.format(name),
                      add_time=True,
                      # freeze_input=True,
                      # multiproc=True,
                      # use_scoop=True,
                      wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee = DLSMDPOptimizee(traj)

    ## Outerloop optimizer initialization
    parameters = ClassicGDParameters(learning_rate=0.001, exploration_step_size=0.001,
                                     n_random_steps=50, n_iteration=30,
                                     stop_criterion=np.Inf, seed=1234)
    #parameters = AdamParameters(learning_rate=0.01, exploration_step_size=0.01, n_random_steps=15, first_order_decay=0.8,
    #                             second_order_decay=0.8, n_iteration=83, stop_criterion=np.Inf, seed=99)
    # parameters = StochasticGDParameters(learning_rate=0.01, stochastic_deviation=1, stochastic_decay=0.99,
    #                                     exploration_step_size=0.01, n_random_steps=5, n_iteration=100,
    #                                     stop_criterion=np.Inf)
    #parameters = RMSPropParameters(learning_rate=0.01, exploration_step_size=0.01,
    #                               n_random_steps=5, momentum_decay=0.5,
    #                               n_iteration=100, stop_criterion=np.Inf, seed=99)

    optimizer = GradientDescentOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
                                         optimizee_fitness_weights=(-1.,),
                                         parameters=parameters,
                                         optimizee_bounding_func=optimizee.bounding_func,
                                         base_point_evaluations=10)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#34
0
def main(path_name, resolution, fixed_delay, state_handling, use_pecevski,
         num_trials):
    name = path_name
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        use_scoop=True,
        multiproc=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        log_stdout=False,  # Sends stdout to logs
    )

    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = SAMGraphOptimizee(traj,
                                  n_NEST_threads=1,
                                  time_resolution=resolution,
                                  fixed_delay=fixed_delay,
                                  use_pecevski=use_pecevski,
                                  state_handling=state_handling,
                                  plots_directory=paths.output_dir_path,
                                  num_fitness_trials=num_trials)

    # Get bounds for mu and sigma calculation.
    param_spec = OrderedDict(sorted(SAMGraph.parameter_spec(4).items()))
    names = [k for k, _ in param_spec.items()]
    mu = np.array([(v_min + v_max) / 2
                   for k, (v_min, v_max) in param_spec.items()])
    sigma = np.array([(v_max - v_min) / 2
                      for k, (v_min, v_max) in param_spec.items()])

    print("Using means: {}\nUsing stds: {}".format(dict(zip(names, mu)),
                                                   dict(zip(names, sigma))))

    # NOTE: Outerloop optimizer initialization
    parameters = NaturalEvolutionStrategiesParameters(
        seed=0,
        pop_size=96,
        n_iteration=40,
        learning_rate_sigma=0.5,
        learning_rate_mu=0.5,
        mu=mu,
        sigma=sigma,
        mirrored_sampling_enabled=True,
        fitness_shaping_enabled=True,
        stop_criterion=np.Inf)

    optimizer = NaturalEvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.0, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func,
        fitness_plot_name=path_name)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#35
0
def _batch_run(dir='unnamed',
               batch_id='template',
               space=None,
               save_data_in_hdf5=False,
               single_method=single_run,
               process_method=null_processing,
               post_process_method=None,
               final_process_method=None,
               multiprocessing=True,
               resumable=True,
               overwrite=False,
               sim_config=None,
               params=None,
               config=None,
               post_kwargs={},
               run_kwargs={}):
    saved_args = locals()
    # print(locals())
    traj_name = f'{batch_id}_traj'
    parent_dir_path = f'{BatchRunFolder}/{dir}'
    dir_path = os.path.join(parent_dir_path, batch_id)
    plot_path = os.path.join(dir_path, f'{batch_id}.pdf')
    data_path = os.path.join(dir_path, f'{batch_id}.csv')
    filename = f'{dir_path}/{batch_id}.hdf5'
    build_new = True
    if os.path.exists(parent_dir_path) and os.path.exists(
            dir_path) and overwrite == False:
        build_new = False
        try:
            print('Trying to resume existing trajectory')
            env = Environment(continuable=True)
            env.resume(trajectory_name=traj_name, resume_folder=dir_path)
            print('Resumed existing trajectory')
            build_new = False
        except:
            try:
                print('Trying to load existing trajectory')
                traj = load_trajectory(filename=filename,
                                       name=traj_name,
                                       load_all=0)
                env = Environment(trajectory=traj)
                traj.f_load(index=None, load_parameters=2, load_results=0)
                traj.f_expand(space)
                print('Loaded existing trajectory')
                build_new = False
            except:
                print(
                    'Neither of resuming or expanding of existing trajectory worked'
                )

    if build_new:
        if multiprocessing:
            multiproc = True
            resumable = False
            wrap_mode = pypetconstants.WRAP_MODE_QUEUE
        else:
            multiproc = False
            resumable = True
            wrap_mode = pypetconstants.WRAP_MODE_LOCK
        # try:
        print('Trying to create novel environment')
        env = Environment(
            trajectory=traj_name,
            filename=filename,
            file_title=batch_id,
            comment=f'{batch_id} batch run!',
            large_overview_tables=True,
            overwrite_file=True,
            resumable=False,
            resume_folder=dir_path,
            multiproc=multiproc,
            ncores=4,
            use_pool=
            True,  # Our runs are inexpensive we can get rid of overhead by using a pool
            freeze_input=
            True,  # We can avoid some overhead by freezing the input to the pool
            wrap_mode=wrap_mode,
            graceful_exit=True)
        traj = env.traj
        print('Created novel environment')
        fly_params, env_params, sim_params = sim_config[
            'fly_params'], sim_config['env_params'], sim_config['sim_params']
        if all(v is not None for v in [sim_params, env_params, fly_params]):
            traj = load_default_configuration(traj,
                                              sim_params=sim_params,
                                              env_params=env_params,
                                              fly_params=fly_params)
        elif params is not None:
            for p in params:
                traj.f_apar(p, 0.0)
        if config is not None:
            for k, v in config.items():
                traj.f_aconf(k, v)
        traj.f_aconf('parent_dir_path',
                     parent_dir_path,
                     comment='The parent directory')
        traj.f_aconf('dir_path',
                     dir_path,
                     comment='The directory path for saving data')
        traj.f_aconf('plot_path',
                     plot_path,
                     comment='The file path for saving plot')
        traj.f_aconf('data_path',
                     data_path,
                     comment='The file path for saving data')
        traj.f_aconf('dataset_path',
                     f'{dir_path}/{batch_id}',
                     comment='The directory path for saving datasets')
        traj.f_explore(space)
        # except:
        #     raise ValueError(f'Failed to perform batch run {batch_id}')

    if post_process_method is not None:
        env.add_postprocessing(post_process_method, **post_kwargs)
    env.run(single_method,
            process_method,
            save_data_in_hdf5=save_data_in_hdf5,
            save_to=dir_path,
            common_folder=batch_id,
            **run_kwargs)
    env.disable_logging()
    print('Batch run complete')
    if final_process_method is not None:
        return final_process_method(env.traj)
# Explore the parameters with a cartesian product for the first trajectory:
traj1.f_explore(
    cartesian_product({
        'x': [1.0, 2.0, 3.0, 4.0],
        'y': [6.0, 7.0, 8.0]
    }))
# Let's explore slightly differently for the second:
traj2.f_explore(
    cartesian_product({
        'x': [3.0, 4.0, 5.0, 6.0],
        'y': [7.0, 8.0, 9.0]
    }))

# Run the simulations with all parameter combinations
env1.run(multiply)
env2.run(multiply)

# Now we merge them together into traj1
# We want to remove duplicate entries
# like the parameter space point x=3.0, y=7.0.
# Several points have been explored by both trajectories and we need them only once.
# Therefore, we set remove_duplicates=True (Note this takes O(N1*N2)!).
# We also want to backup both trajectories, but we let the system choose the filename.
# Accordingly we choose backup_filename=True instead of providing a filename.
# We want to move the hdf5 nodes from one trajectory to the other.
# Thus we set move_nodes=True.
# Finally,we want to delete the other trajectory afterwards since we already have a backup.
traj1.f_merge(traj2,
              remove_duplicates=True,
              backup_filename=True,
示例#37
0
                  filename=filename,
                  file_title='Example_14_Links',
                  overwrite_file=True,
                  comment='How to use links')

# The environment has created a trajectory container for us
traj = env.trajectory

# Add both parameters
traj.par.x = Parameter('x', 1, 'I am the first dimension!')
traj.par.y = Parameter('y', 1, 'I am the second dimension!')

# Explore just two points
traj.f_explore({'x': [3, 4]})

# So far everything was as in the first example. However now we add links:
traj.f_add_link('mylink1', traj.f_get('x'))
# Note the `f_get` here to ensure to get the parameter instance, not the value 1
# This allows us now to access x differently:
print('x=' + str(traj.mylink1))
# We can try to avoid fast access as well, and recover the original parameter
print(str(traj.f_get('mylink1')))
# And also colon notation is allowed that creates new groups on the fly
traj.f_add_link('parameters.mynewgroup.mylink2', traj.f_get('y'))

# And, of course, we can also use the links during run:
env.run(multiply)

# Finally disable logging and close all log-files
env.disable_logging()
def main():

    filename = os.path.join('hdf5', 'example_05.hdf5')
    env = Environment(trajectory='Example_05_Euler_Integration',
                      filename=filename,
                      file_title='Example_05_Euler_Integration',
                      overwrite_file=True,
                      comment='Go for Euler!')


    traj = env.trajectory
    trajectory_name = traj.v_name

    # 1st a) phase parameter addition
    add_parameters(traj)

    # 1st b) phase preparation
    # We will add the differential equation (well, its source code only) as a derived parameter
    traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
                                 comment='Source code of our equation!')

    # We want to explore some initial conditions
    traj.f_explore({'initial_conditions' : [
        np.array([0.01,0.01,0.01]),
        np.array([2.02,0.02,0.02]),
        np.array([42.0,4.2,0.42])
    ]})
    # 3 different conditions are enough for an illustrative example

    # 2nd phase let's run the experiment
    # We pass `euler_scheme` as our top-level simulation function and
    # the Lorenz equation 'diff_lorenz' as an additional argument
    env.run(euler_scheme, diff_lorenz)

    # We don't have a 3rd phase of post-processing here

    # 4th phase analysis.
    # I would recommend to do post-processing completely independent from the simulation,
    # but for simplicity let's do it here.

    # Let's assume that we start all over again and load the entire trajectory new.
    # Yet, there is an error within this approach, do you spot it?
    del traj
    traj = Trajectory(filename=filename)

    # We will only fully load parameters and derived parameters.
    # Results will be loaded manually later on.
    try:
        # However, this will fail because our trajectory does not know how to
        # build the FunctionParameter. You have seen this coming, right?
        traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
                    load_results=1)
    except ImportError as e:

        print('That did\'nt work, I am sorry: %s ' % str(e))

        # Ok, let's try again but this time with adding our parameter to the imports
        traj = Trajectory(filename=filename,
                           dynamically_imported_classes=FunctionParameter)

        # Now it works:
        traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
                    load_results=1)


    #For the fun of it, let's print the source code
    print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)

    # Let's get the exploration array:
    initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
    # Now let's plot our simulated equations for the different initial conditions:
    # We will iterate through the run names
    for idx, run_name in enumerate(traj.f_get_run_names()):

        #Get the result of run idx from the trajectory
        euler_result = traj.results.f_get(run_name).euler_evolution
        # Now we manually need to load the result. Actually the results are not so large and we
        # could load them all at once. But for demonstration we do as if they were huge:
        traj.f_load_item(euler_result)
        euler_data = euler_result.data

        #Plot fancy 3d plot
        fig = plt.figure(idx)
        ax = fig.gca(projection='3d')
        x = euler_data[:,0]
        y = euler_data[:,1]
        z = euler_data[:,2]
        ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
        plt.legend()
        plt.show()

        # Now we free the data again (because we assume its huuuuuuge):
        del euler_data
        euler_result.f_empty()

    # You have to click through the images to stop the example_05 module!

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#39
0
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing with SCOOP you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see http://scoop.readthedocs.org/en/latest/usage.html#pitfalls).

    """

    # Load settings from file
    settings_file = 'pypet_settings.pkl'
    settings = load_obj(settings_file)
    # Print settings dictionary
    print('\nSettings dictionary:')
    for key, value in settings.items():
        print(key, ' : ', value)
    print('\nParameters to explore:')
    for key, value in settings.items():
        if isinstance(value, list):
            print(key, ' : ', value)

    # Create new folder to store results
    traj_dir = os.getcwd()
    # Read output path (if provided)
    if len(sys.argv) > 1:
        # Only use specified folder if it exists
        if os.path.isdir(sys.argv[1]):
            # Get name of directory
            traj_dir = os.path.dirname(sys.argv[1])
            # Convert to full path
            traj_dir = os.path.abspath(traj_dir)
    # Add time stamp (final '' is to make sure there is a trailing slash)
    traj_dir = os.path.join(traj_dir,
                            datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss"), '')
    # Create directory with time stamp
    os.makedirs(traj_dir)
    # Change current directory to the one containing the trajectory files
    os.chdir(traj_dir)
    print('Trajectory and results will be stored in: {0}'.format(traj_dir))

    # Create an environment that handles running.
    # Let's enable multiprocessing with scoop:
    env = Environment(
        trajectory='traj',
        comment='',
        add_time=False,
        log_config='DEFAULT',
        log_stdout=
        True,  # log everything that is printed, will make the log file HUGE
        filename=
        traj_dir,  # filename or just folder (name will be automatic in this case)
        multiproc=False,
        #use_pool=True,
        #ncores=10,
        #freeze_input=True,
        use_scoop=False,
        #wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        memory_cap=1,
        swap_cap=1
        #cpu_cap=30

        #,git_repository='' #path to the root git folder. The commit code will be added in the trajectory
        #,git_fail=True #no automatic commits
        #,sumatra_project='' #path to sumatra root folder,
        #graceful_exit=True
    )

    traj = env.trajectory

    # Add config parameters (those that DO NOT influence the final result of the experiment)
    traj.f_add_config('parallel_target_analysis', True)
    #traj.f_add_config('debug', False)
    #traj.f_add_config('max_mem_frac', 0.7)

    # Set up trajectory parameters
    param_to_explore = {}
    for key, val in settings.items():
        if isinstance(val, list):
            param_to_explore[key] = val
            traj.f_add_parameter(key, val[0])
        else:
            traj.f_add_parameter(key, val)

    # Define parameter combinations to explore (a trajectory in
    # the parameter space). The second argument, the tuple, specifies the order
    #  of the cartesian product.
    # The variable on the right most side changes fastest and defines the
    # 'inner for-loop' of the cartesian product
    explore_dict = cartesian_product(param_to_explore,
                                     tuple(param_to_explore.keys()))
    # explore_dict = cartesian_product(
    #     {
    #         'network_inference.algorithm': ['bMI_greedy', 'bTE_greedy', 'mTE_greedy'],
    #         #'node_coupling.initial.weight_distribution': ['fixed'],
    #         'repetition_i': np.arange(0, 5, 1).tolist(),
    #         'topology.initial.nodes_n': np.arange(50, 50+1, 300).tolist(),
    #         'node_dynamics.samples_n': np.array([1000, 10000]).tolist(),
    #         'network_inference.p_value': np.array([0.001]).tolist(),
    #         #'node_coupling.initial.self_coupling': np.arange(-0.5, 0.5 + 0.001, 0.1).tolist(),
    #         #'node_coupling.initial.total_cross_coupling': np.arange(-1., 1 + 0.001, 0.2).tolist(),
    #         #'topology.initial.WS_p': np.around(np.logspace(-2.2, 0, 10), decimals=4).tolist(),
    #     },
    #     (
    #         'network_inference.algorithm',
    #         #'node_coupling.initial.weight_distribution',
    #         'network_inference.p_value',
    #         'node_dynamics.samples_n',
    #         'topology.initial.nodes_n',
    #         #'topology.initial.WS_p',
    #         #'node_coupling.initial.self_coupling',
    #         #'node_coupling.initial.total_cross_coupling',
    #         'repetition_i',
    #     )
    # )
    traj.f_explore(explore_dict)

    # Store trajectory parameters to disk
    pypet_utils.print_traj_leaves(traj, 'parameters', 'traj_parameters.txt')

    # Run the experiment
    env.run(information_network_inference)
    # env.run(bTE_on_existing_links)

    # Check that all runs are completed
    assert traj.f_is_completed()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#40
0
def main():
    name = 'LTL-MDP-CE_6_8_TD1_New'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Benchmark function
    optimizee = StateActionOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    # TODO: Change the optimizer to the appropriate Optimizer class
    parameters = CrossEntropyParameters(pop_size=75,
                                        rho=0.2,
                                        smoothing=0.0,
                                        temp_decay=0,
                                        n_iteration=75,
                                        distribution=NoisyGaussian(
                                            noise_magnitude=1,
                                            noise_decay=0.95),
                                        stop_criterion=np.inf,
                                        seed=102)
    optimizer = CrossEntropyOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1., ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Add Recorder
    recorder = Recorder(trajectory=traj,
                        optimizee_name='SNN StateAction',
                        optimizee_parameters=['gamma', 'eta'],
                        optimizer_name=optimizer.__class__.__name__,
                        optimizer_parameters=optimizer.get_params())
    recorder.start()

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)
    recorder.end()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#41
0
def main():
    name = 'LTL-MDP-ES_6_8_TD1'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation"
        )

    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')
    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
                      comment=u'{} data'.format(name),
                      add_time=True,
                      # freeze_input=True,
                      # multiproc=True,
                      # use_scoop=True,
                      wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                      automatic_storing=True,
                      log_stdout=False,  # Sends stdout to logs
                      log_folder=os.path.join(paths.output_dir_path, 'logs')
                      )
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    ## Benchmark function

    optimizee = DLSMDPOptimizee(traj)

    ## Innerloop simulator

    ## Outerloop optimizer initialization
    optimizer_seed = 1234
    parameters = EvolutionStrategiesParameters(
        learning_rate=0.5,
        learning_rate_decay=0.95,
        noise_std=0.1,
        mirrored_sampling_enabled=True,
        fitness_shaping_enabled=True,
        pop_size=25,
        n_iteration=30,
        stop_criterion=np.Inf,
        seed=optimizer_seed)

    optimizer = EvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.,),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    ## Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()