示例#1
0
def main():
    """ Main *boilerplate* function to start simulation """
    # Now let's make use of logging
    logger = logging.getLogger()

    # Create folders for data and plots
    folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet')
    if not os.path.isdir(folder):
        os.makedirs(folder)
    filename = os.path.join(folder, 'all_patterns.hdf5')

    # Create an environment
    env = Environment(trajectory='cellular_automata',
                      multiproc=True,
                      ncores=4,
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    # extract the trajectory
    traj = env.traj

    traj.par.ncells = Parameter('ncells', 400, 'Number of cells')
    traj.par.steps = Parameter('steps', 250, 'Number of timesteps')
    traj.par.rule_number = Parameter('rule_number', 30, 'The ca rule')
    traj.par.initial_name = Parameter('initial_name', 'random',
                                      'The type of initial state')
    traj.par.seed = Parameter('seed', 100042, 'RNG Seed')

    # Explore
    exp_dict = {
        'rule_number': [10, 30, 90, 110, 184],
        'initial_name': ['single', 'random'],
    }
    # # You can uncomment the ``exp_dict`` below to see that changing the
    # # exploration scheme is now really easy:
    # exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
    #             'ncells' : [100, 200, 300],
    #             'seed': [333444555, 123456]}
    exp_dict = cartesian_product(exp_dict)
    traj.f_explore(exp_dict)

    # Run the simulation
    logger.info('Starting Simulation')
    env.run(wrap_automaton)

    # Load all data
    traj.f_load(load_data=2)

    logger.info('Printing data')
    for idx, run_name in enumerate(traj.f_iter_runs()):
        # Plot all patterns
        filename = os.path.join(folder, make_filename(traj))
        plot_pattern(traj.crun.pattern, traj.rule_number, filename)
        progressbar(idx, len(traj), logger=logger)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#2
0
def main():
    # pypet environment
    env = Environment(
        trajectory=SIM_NAME,
        comment="Experiment on network size with binary covariates",
        log_config=None,
        multiproc=False,
        ncores=1,
        filename=SIM_PATH + "/results/",
        overwrite_file=True)
    traj = env.trajectory

    # parameters (data generation)

    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_adj", np.float64(1.),
                         "True variance in the link Probit model")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")
    traj.f_add_parameter("model.adj_model", "Logistic", "Adjacency model")
    traj.f_add_parameter("model.bin_model", "Logistic",
                         "Binary covariate model")

    # parameters (fit)
    traj.f_add_parameter("fit.n_iter", np.int64(20),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_vmp", np.int64(5),
                         "Number of VMP iterations per E-step")
    traj.f_add_parameter("fit.n_gd", np.int64(5),
                         "Number of GD iterations per M-step")
    traj.f_add_parameter("fit.step_size", np.float64(0.01), "GD Step size")

    # experiment
    explore_dict = {
        "data.N": np.array([50, 100, 200, 500, 1000, 2000]),
        "data.p_bin": np.array([10, 100, 500]),
        "data.seed": np.arange(0, 100, 1)
    }
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)

    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
示例#3
0
    def make_env(self, **kwargs):

        self.mode.__dict__.update(kwargs)
        filename = 'log_testing.hdf5'
        self.filename = make_temp_dir(filename)
        self.traj_name = make_trajectory_name(self)
        self.env = Environment(trajectory=self.traj_name,
                               filename=self.filename, **self.mode.__dict__)
        self.traj = self.env.v_traj
示例#4
0
    def test_hdf5_settings_and_context(self):

        filename = make_temp_dir('hdfsettings.hdf5')
        with Environment('testraj',
                         filename=filename,
                         add_time=True,
                         comment='',
                         dynamic_imports=None,
                         log_config=None,
                         multiproc=False,
                         ncores=3,
                         wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                         continuable=False,
                         use_hdf5=True,
                         complevel=4,
                         complib='zlib',
                         shuffle=True,
                         fletcher32=True,
                         pandas_format='t',
                         pandas_append=True,
                         purge_duplicate_comments=True,
                         summary_tables=True,
                         small_overview_tables=True,
                         large_overview_tables=True,
                         results_per_run=19,
                         derived_parameters_per_run=17) as env:

            traj = env.v_trajectory

            traj.f_store()

            hdf5file = pt.openFile(filename=filename)

            table = hdf5file.root._f_getChild(traj.v_name)._f_getChild(
                'overview')._f_getChild('hdf5_settings')

            row = table[0]

            self.assertTrue(row['complevel'] == 4)

            self.assertTrue(row['complib'] == compat.tobytes('zlib'))

            self.assertTrue(row['shuffle'])
            self.assertTrue(row['fletcher32'])
            self.assertTrue(row['pandas_format'] == compat.tobytes('t'))

            for attr_name, table_name in HDF5StorageService.NAME_TABLE_MAPPING.items(
            ):
                self.assertTrue(row[table_name])

            self.assertTrue(row['purge_duplicate_comments'])
            self.assertTrue(row['results_per_run'] == 19)
            self.assertTrue(row['derived_parameters_per_run'] == 17)

            hdf5file.close()
示例#5
0
def main(name,
         explore_dict,
         postprocess=False,
         ncores=1,
         testrun=False,
         commit=None):

    if not testrun:
        if commit is None:
            raise Exception("Non testrun needs a commit")

    filename = os.path.join(os.getcwd(), 'data/', name + '.hdf5')

    # if not the first run, tr2 will be merged later
    label = 'tr1'

    # if only post processing, can't use the same label
    # (generates HDF5 error)
    if postprocess:
        label += '_postprocess-%.6d' % random.randint(0, 999999)

    env = Environment(
        trajectory=label,
        add_time=False,
        filename=filename,
        continuable=False,  # ??
        lazy_debug=False,  # ??
        multiproc=True,
        ncores=ncores,
        use_pool=False,  # likely not working w/ brian2
        wrap_mode='QUEUE',  # ??
        overwrite_file=False)

    tr = env.trajectory

    add_params(tr)

    if not testrun:
        tr.f_add_parameter('mconfig.git.sha1', str(commit))
        tr.f_add_parameter('mconfig.git.message', commit.message)

    tr.f_explore(explore_dict)

    def run_sim(tr):
        try:
            run_net(tr)
        except TimeoutError:
            print("Unable to plot, must run analysis manually")

        post_process(tr)

    if postprocess:
        env.run(post_process)
    else:
        env.run(run_sim)
示例#6
0
def fail_on_diff():
    try:
        Environment(trajectory='fail',
                 filename=os.path.join('fail',
                                       'HDF5',),
                  file_title='failing',
                  git_repository='.', git_message='Im a message!',
                  git_fail=True)
        raise RuntimeError('You should not be here!')
    except GitDiffError as exc:
        print('I expected the GitDiffError: `%s`' % repr(exc))
示例#7
0
文件: main.py 项目: fontaine618/NAIVI
def main(path, name, explore_dict):
    comment = "\n".join(
        ["{}: {}".format(k, v) for k, v in explore_dict.items()])
    # pypet environment
    env = Environment(trajectory=name,
                      comment=comment,
                      log_config=None,
                      multiproc=False,
                      ncores=1,
                      filename=path + name + "/results/",
                      overwrite_file=True)
    traj = env.trajectory
    traj.f_add_parameter("path", path + name, "Path")

    # parameters (data generation)
    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.center", np.int64(1), "Ego-network center")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")

    # parameters (fit)
    traj.f_add_parameter("fit.algo", "MLE", "Inference algorithm")
    traj.f_add_parameter("fit.max_iter", np.int64(500),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_sample", np.int64(1),
                         "Number of samples for VIMC")
    traj.f_add_parameter("fit.eps", np.float64(1.0e-6),
                         "convergence threshold")
    traj.f_add_parameter("fit.lr", np.float64(0.01), "GD Step size")

    # experiment
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)
    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
示例#8
0
def main(fail=False):
    try:
        sumatra_project = '.'

        if fail:
            print('There better be not any diffs.')

        # Create an environment that handles running
        with Environment(trajectory='Example1_Quick_And_Not_So_Dirty',
                         filename=os.path.join(
                             'experiments',
                             'HDF5',
                         ),
                         file_title='Example1_Quick_And_Not_So_Dirty',
                         comment='The first example!',
                         complib='blosc',
                         small_overview_tables=False,
                         git_repository='.',
                         git_message='Im a message!',
                         git_fail=fail,
                         sumatra_project=sumatra_project,
                         sumatra_reason='Testing!') as env:

            # Get the trajectory from the environment
            traj = env.v_trajectory

            # Add both parameters
            traj.f_add_parameter('x', 1, comment='Im the first dimension!')
            traj.f_add_parameter('y', 1, comment='Im the second dimension!')

            # Explore the parameters with a cartesian product:
            traj.f_explore(cartesian_product({'x': [1, 2, 3], 'y': [6, 7, 8]}))

            # Run the simulation
            env.f_run(multiply)

            # Check that git information was added to the trajectory
            assert 'config.git.hexsha' in traj
            assert 'config.git.committed_date' in traj
            assert 'config.git.message' in traj
            assert 'config.git.name_rev' in traj

            print("Python git test successful")

            # traj.f_expand({'x':[3,3],'y':[42,43]})
            #
            # env.f_run(multiply)
    except Exception as exc:
        print(repr(exc))
        sys.exit(1)
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing under Windows you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see https://docs.python.org/2/library/multiprocessing.html#windows).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with 2 workers.
    filename = os.path.join('hdf5', 'example_04.hdf5')
    env = Environment(
        trajectory='Example_04_MP',
        filename=filename,
        file_title='Example_04_MP',
        log_stdout=True,
        comment='Multiprocessing example!',
        multiproc=True,
        ncores=4,
        use_pool=True,  # Our runs are inexpensive we can get rid of overhead
        # by using a pool
        freeze_input=True,  # We can avoid some
        # overhead by freezing the input to the pool
        wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
        graceful_exit=True,  # We want to exit in a data friendly way
        # that safes all results after hitting CTRL+C, try it ;-)
        overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(
        cartesian_product({
            'x': [float(x) for x in range(20)],
            'y': [float(y) for y in range(20)]
        }))

    # Run the simulation
    env.run(multiply)

    # Finally disable logging and close all log-files
    env.disable_logging()
def get_runtime(length):
    filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')

    with Environment(filename=filename,
                     log_levels=20,
                     report_progress=(0.0000002, 'progress', 50),
                     overwrite_file=True,
                     purge_duplicate_comments=False,
                     log_stdout=False,
                     summary_tables=False,
                     small_overview_tables=False) as env:

        traj = env.v_traj

        traj.par.f_apar('x', 0, 'parameter')

        traj.f_explore({'x': range(length)})

        max_run = 100

        for idx in range(len(traj)):
            if idx > max_run:
                traj.f_get_run_information(idx, copy=False)['completed'] = 1
        traj.f_store()

        if not os.path.isdir('./tmp'):
            os.mkdir('tmp')
        graphviz = CustomOutput()
        graphviz.output_file = './tmp/run_profile_storage_%d.png' % len(traj)
        service_filter = GlobbingFilter(include=['*storageservice.*'])

        config = Config(groups=True, verbose=True)
        config.trace_filter = service_filter

        print('RUN PROFILE')
        with PyCallGraph(config=config, output=graphviz):
            # start = time.time()
            # env.f_run(job)
            # end = time.time()
            for irun in range(100):
                traj._make_single_run(irun + len(traj) / 2)
                # Measure start time
                traj._set_start()
                traj.f_ares('$set.$', 42, comment='A result')
                traj._set_finish()
                traj._store_final(store_data=2)
                traj._finalize_run()
            print('STARTING_to_PLOT')
        print('DONE RUN PROFILE')
示例#11
0
    def test_hdf5_store_load_result(self):
        traj_name = make_trajectory_name(self)
        file_name = make_temp_dir(
            os.path.join('brian2', 'tests', 'hdf5',
                         'test_%s.hdf5' % traj_name))
        env = Environment(trajectory=traj_name,
                          filename=file_name,
                          log_config=get_log_config(),
                          dynamic_imports=[Brian2Result],
                          add_time=False,
                          storage_service=HDF5StorageService)
        traj = env.v_trajectory
        traj.v_standard_result = Brian2Result
        traj.f_add_result('brian2.single.millivolts_single_a',
                          10 * mvolt,
                          comment='single value a')
        traj.f_add_result('brian2.single.millivolts_single_c',
                          11 * mvolt,
                          comment='single value b')

        traj.f_add_result('brian2.array.millivolts_array_a', [11, 12] * mvolt,
                          comment='array')
        traj.f_add_result('mV1', 42.0 * mV)
        # results can hold much more than a single data item:
        traj.f_add_result('ampere1',
                          1 * mA,
                          44,
                          test=300 * mV,
                          test2=[1, 2, 3],
                          test3=np.array([1, 2, 3]) * mA,
                          comment='Result keeping track of many things')
        traj.f_add_result('integer', 16)
        traj.f_add_result('kHz05', 0.5 * kHz)
        traj.f_add_result('nested_array',
                          np.array([[6., 7., 8.], [9., 10., 11.]]) * ms)
        traj.f_add_result('b2a', np.array([1., 2.]) * mV)

        traj.f_add_result('nounit',
                          Quantity(np.array([[6., 7., 8.], [9., 10., 11.]])))

        traj.f_store()

        traj2 = load_trajectory(filename=file_name,
                                name=traj_name,
                                dynamic_imports=[Brian2Result],
                                load_data=2)

        self.compare_trajectories(traj, traj2)
示例#12
0
def test_run():

    global filename

    np.random.seed()
    trajname = 'profiling'
    filename = make_temp_dir(os.path.join('hdf5', 'test%s.hdf5' % trajname))

    env = Environment(trajectory=trajname,
                      filename=filename,
                      file_title=trajname,
                      log_stdout=False,
                      results_per_run=5,
                      derived_parameters_per_run=5,
                      multiproc=False,
                      ncores=1,
                      wrap_mode='LOCK',
                      use_pool=False,
                      overwrite_file=True)

    traj = env.v_trajectory

    traj.v_standard_parameter = Parameter

    ## Create some parameters
    param_dict = {}
    create_param_dict(param_dict)
    ### Add some parameter:
    add_params(traj, param_dict)

    #remember the trajectory and the environment
    traj = traj
    env = env

    traj.f_add_parameter('TEST', 'test_run')
    ###Explore
    explore(traj)

    ### Make a test run
    simple_arg = -13
    simple_kwarg = 13.0
    env.f_run(simple_calculations, simple_arg, simple_kwarg=simple_kwarg)

    size = os.path.getsize(filename)
    size_in_mb = size / 1000000.
    print('Size is %sMB' % str(size_in_mb))
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing with SCOOP you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see http://scoop.readthedocs.org/en/latest/usage.html#pitfalls).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with scoop:
    filename = os.path.join('hdf5', 'example_21.hdf5')
    env = Environment(trajectory='Example_21_SCOOP',
                      filename=filename,
                      file_title='Example_21_SCOOP',
                      log_stdout=True,
                      comment='Multiprocessing example using SCOOP!',
                      multiproc=True,
                      freeze_input=True, # We want to save overhead and freeze input
                      use_scoop=True, # Yes we want SCOOP!
                      wrap_mode=pypetconstants.WRAP_MODE_LOCAL,  # SCOOP only works with 'LOCAL'
                      # or 'NETLOCK' wrapping
                      overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
                                      'y':[float(y) for y in range(20)]}))
    # Run the simulation
    env.run(multiply)

    # Let's check that all runs are completed!
    assert traj.f_is_completed()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#14
0
def main():
    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(
        trajectory='FiringRatePipeline',
        comment='Experiment to measure the firing rate '
        'of a leaky integrate and fire neuron. '
        'Exploring different input currents, '
        'as well as refractory periods',
        add_time=False,  # We don't want to add the current time to the name,
        log_stdout=True,
        multiproc=True,
        ncores=2,  #My laptop has 2 cores ;-)
        filename=filename,
        overwrite_file=True)

    env.pipeline(mypipeline)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#15
0
    def test_overwrite_annotations_and_results(self):

        filename = make_temp_dir('overwrite.hdf5')

        env = Environment(trajectory='testoverwrite',
                          filename=filename,
                          log_config=get_log_config(),
                          overwrite_file=True)

        traj = env.v_traj

        traj.f_add_parameter('grp.x', 5, comment='hi')
        traj.grp.v_comment = 'hi'
        traj.grp.v_annotations['a'] = 'b'

        traj.f_store()

        traj.f_remove_child('parameters', recursive=True)

        traj.f_load(load_data=2)

        self.assertTrue(traj.x == 5)
        self.assertTrue(traj.grp.v_comment == 'hi')
        self.assertTrue(traj.grp.v_annotations['a'] == 'b')

        traj.f_get('x').f_unlock()
        traj.grp.x = 22
        traj.f_get('x').v_comment = 'hu'
        traj.grp.v_annotations['a'] = 'c'
        traj.grp.v_comment = 'hu'

        traj.f_store_item(traj.f_get('x'), store_data=3)
        traj.f_store_item(traj.grp, store_data=3)

        traj.f_remove_child('parameters', recursive=True)

        traj.f_load(load_data=2)

        self.assertTrue(traj.x == 22)
        self.assertTrue(traj.grp.v_comment == 'hu')
        self.assertTrue(traj.grp.v_annotations['a'] == 'c')

        env.f_disable_logging()
示例#16
0
 def test_net(self):
     env = Environment(
         trajectory='Test_' + repr(time.time()).replace('.', '_'),
         filename=make_temp_dir(
             os.path.join('experiments', 'tests', 'briantests', 'HDF5',
                          'briantest.hdf5')),
         file_title='test',
         log_config=get_log_config(),
         dynamic_imports=[
             'pypet.brian2.parameter.Brian2Parameter', Brian2MonitorResult
         ],
         multiproc=False)
     traj = env.v_traj
     traj.f_add_parameter(Brian2Parameter,
                          'v0',
                          0.0 * mV,
                          comment='Input bias')
     traj.f_explore({'v0': [11 * mV, 13 * mV, 15 * mV]})
     env.f_run(run_network)
     self.get_data(traj)
示例#17
0
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5','example_18.hdf5')
    env = Environment(trajectory='Multiplication',
                      filename=filename,
                      file_title='Example_18_Many_Runs',
                      overwrite_file=True,
                      comment='Contains many runs',
                      multiproc=True,
                      use_pool=True,
                      freeze_input=True,
                      ncores=2,
                      wrap_mode='QUEUE')

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, yielding 2500 runs
    traj.f_explore(cartesian_product({'x': range(50), 'y': range(50)}))

    # Run the simulation
    env.run(multiply)

    # Disable logging
    env.disable_logging()

    # turn auto loading on, since results have not been loaded, yet
    traj.v_auto_load = True
    # Use the `v_idx` functionality
    traj.v_idx = 2042
    print('The result of run %d is: ' % traj.v_idx)
    # Now we can rely on the wildcards
    print(traj.res.crunset.crun.z)
    traj.v_idx = -1
    # Or we can use the shortcuts `rts_X` (run to set) and `r_X` to get particular results
    print('The result of run %d is: ' % 2044)
    print(traj.res.rts_2044.r_2044.z)
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5', 'example_12.hdf5')
    env = Environment(
        trajectory='Multiplication',
        filename=filename,
        file_title='Example_12_Sharing_Data',
        overwrite_file=True,
        comment='The first example!',
        continuable=
        False,  # We have shared data in terms of a multiprocessing list,
        # so we CANNOT use the continue feature.
        multiproc=True,
        ncores=2)

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product
    traj.f_explore(cartesian_product({'x': [1, 2, 3, 4], 'y': [6, 7, 8]}))

    # We want a shared list where we can put all out results in. We use a manager for this:
    result_list = mp.Manager().list()
    # Let's make some space for potential results
    result_list[:] = [0 for _dummy in range(len(traj))]

    # Run the simulation
    env.run(multiply, result_list)

    # Now we want to store the final list as numpy array
    traj.f_add_result('z', np.array(result_list))

    # Finally let's print the result to see that it worked
    print(traj.z)

    #Disable logging and close all log-files
    env.disable_logging()
示例#19
0
def main(inputargs=None):
    if inputargs is None:
        inputargs = sys.argv[1:] if len(sys.argv) > 1 else ""
    args = docopt(__doc__, argv=inputargs)
    wavpath = path.join(modulePath, "resources", "tone_in_noise")
    stimuli = [path.join(wavpath, i) for i in glob.glob(path.join(wavpath, "*.wav"))]
    outfile = path.realpath(path.expanduser(args["--out"]))
    env = Environment(trajectory='tone-in-noise',
                      filename=outfile,
                      overwrite_file=True,
                      file_title="Tone in noise at different SNR",
                      comment="some comment",
                      large_overview_tables="False",
                      # freeze_input=True,
                      # use_pool=True,
                      multiproc=True,
                      ncores=3,
                      graceful_exit=True,
                      #wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
                      )

    traj = env.trajectory
    traj.f_add_parameter('periphery', 'verhulst', comment="which periphery was used")
    traj.f_add_parameter('brainstem', 'nelsoncarney04', comment="which brainstem model was used")
    traj.f_add_parameter('weighting', "--no-cf-weighting ", comment="weighted CFs")
    traj.f_add_parameter('wavfile', '', comment="Which wav file to run")
    traj.f_add_parameter('level', 80, comment="stimulus level, spl")
    traj.f_add_parameter('neuropathy', "none", comment="")

    parameter_dict = {
        "periphery" : ['verhulst', 'zilany'],
        "brainstem" : ['nelsoncarney04', 'carney2015'],
        "weighting" : [cf_weighting, ""],
        "wavfile"   : stimuli,
        "level"     : [80],
        "neuropathy": ["none", "moderate", "severe", "ls-moderate", "ls-severe"]
    }

    traj.f_explore(cartesian_product(parameter_dict))
    env.run(tone_in_noise)
    return 0
示例#20
0
def main():
    filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')
    with Environment(filename=filename,
                     log_levels=0,
                     report_progress=(2, 'progress', 50),
                     overwrite_file=True) as env:

        traj = env.v_traj

        traj.par.x = BrianParameter('', 0 * ms, 'parameter')

        traj.f_explore({'x': [x * ms for x in range(1000)]})

        traj.f_store()

        # env.f_run(job)

        dicts = [traj.f_get_run_information(x) for x in range(len(traj))]
        runtimes = [
            dic['finish_timestamp'] - dic['timestamp'] for dic in dicts
        ]
示例#21
0
    def setUp(self):
        self.set_mode()
        self.logfolder = make_temp_dir(os.path.join('experiments',
                                                      'tests',
                                                      'Log'))

        random.seed()
        self.trajname = make_trajectory_name(self)
        self.filename = make_temp_dir(os.path.join('experiments',
                                                    'tests',
                                                    'HDF5',
                                                    'test%s.hdf5' % self.trajname))

        env = Environment(trajectory=self.trajname, filename=self.filename,
                          file_title=self.trajname,
                          log_stdout=False,
                          port=self.url,
                          log_config=get_log_config(),
                          results_per_run=5,
                          derived_parameters_per_run=5,
                          multiproc=self.multiproc,
                          ncores=self.ncores,
                          wrap_mode=self.mode,
                          use_pool=self.use_pool,
                          fletcher32=self.fletcher32,
                          complevel=self.complevel,
                          complib=self.complib,
                          shuffle=self.shuffle,
                          pandas_append=self.pandas_append,
                          pandas_format=self.pandas_format,
                          encoding=self.encoding)

        traj = env.v_trajectory

        self.param_dict={}
        create_param_dict(self.param_dict)
        add_params(traj,self.param_dict)

        self.traj = traj
        self.env = env
示例#22
0
def main():

    filename = os.path.join('hdf5', 'Clustered_Network.hdf5')
    # If we pass a filename to the trajectory a new HDF5StorageService will
    # be automatically created
    traj = Trajectory(filename=filename,
                      dynamically_imported_classes=[
                          BrianDurationParameter, BrianMonitorResult,
                          BrianParameter
                      ])

    # Let's create and fake environment to enable logging:
    env = Environment(traj, do_single_runs=False)

    # Load the trajectory, but onyl laod the skeleton of the results
    traj.f_load(index=-1,
                load_parameters=2,
                load_derived_parameters=2,
                load_results=1)

    # Find the result instances related to the fano factor
    fano_dict = traj.f_get_from_runs('mean_fano_factor', fast_access=False)

    # Load the data of the fano factor results
    ffs = fano_dict.values()
    traj.f_load_items(ffs)

    # Extract all values and R_ee values for each run
    ffs_values = [x.f_get() for x in ffs]
    Rees = traj.f_get('R_ee').f_get_range()

    # Plot average fano factor as a function of R_ee
    plt.plot(Rees, ffs_values)
    plt.xlabel('R_ee')
    plt.ylabel('Avg. Fano Factor')
    plt.show()

    # Finally disable logging and close all log-files
    env.f_disable_logging()
示例#23
0
    def test_hdf5_store_load_parameter(self):
        traj_name = make_trajectory_name(self)
        file_name = make_temp_dir(
            os.path.join('brian2', 'tests', 'hdf5',
                         'test_%s.hdf5' % traj_name))
        env = Environment(trajectory=traj_name,
                          filename=file_name,
                          log_config=get_log_config(),
                          dynamic_imports=[Brian2Parameter],
                          add_time=False,
                          storage_service=HDF5StorageService)
        traj = env.v_trajectory
        traj.v_standard_parameter = Brian2Parameter
        traj.f_add_parameter('brian2.single.millivolts',
                             10 * mvolt,
                             comment='single value')

        #traj.f_add_parameter('brian2.array.millivolts', [11, 12]*mvolt, comment='array')
        #traj.f_add_parameter('mV1', 42.0*mV)
        #traj.f_add_parameter('ampere1', 1*mA)
        #traj.f_add_parameter('integer', 16)
        #traj.f_add_parameter('kHz05', 0.5*kHz)
        #traj.f_add_parameter('nested_array', np.array([[6.,7.,8.],[9.,10.,11.]]) * ms)
        #traj.f_add_parameter('b2a', np.array([1., 2.]) * mV)

        # We also need to check if explorations work with hdf5 store!
        #explore_dict = {'ampere1': [1*mA, 2*mA, 3*mA],
        #                'integer': [42,43,44],
        #                'b2a': [np.array([1., 2.]) * mV, np.array([1., 4.]) * mV,
        #                       np.array([1., 2.]) * mV]}
        #traj.f_explore(explore_dict)

        traj.f_store()

        traj2 = load_trajectory(filename=file_name,
                                name=traj_name,
                                dynamic_imports=[Brian2Parameter],
                                load_data=2)
        self.compare_trajectories(traj, traj2)
def get_runtime(length):
    filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')

    with Environment(
            filename=filename,
            log_levels=50,
            report_progress=(0.0002, 'progress', 50),
            overwrite_file=True,
            purge_duplicate_comments=False,
            log_stdout=False,
            multiproc=False,
            ncores=2,
            use_pool=True,
            wrap_mode='PIPE',  #freeze_input=True,
            summary_tables=False,
            small_overview_tables=False) as env:

        traj = env.v_traj

        traj.par.f_apar('x', 0, 'parameter')

        traj.f_explore({'x': range(length)})

        # traj.v_full_copy = False

        max_run = 1000

        for idx in range(len(traj)):
            if idx > max_run:
                traj.f_get_run_information(idx, copy=False)['completed'] = 1
        start = time.time()
        env.f_run(job)
        end = time.time()
        # dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))]
    total = end - start
    return total / float(min(len(traj), max_run)), total / float(
        min(len(traj), max_run)) * len(traj)
示例#25
0
    def test_maximum_overview_size(self):

        filename = make_temp_dir('maxisze.hdf5')

        env = Environment(trajectory='Testmigrate',
                          filename=filename,
                          log_config=get_log_config(),
                          add_time=True)

        traj = env.v_trajectory
        for irun in range(pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH):
            traj.f_add_parameter('f%d.x' % irun, 5)

        traj.f_store()

        store = ptcompat.open_file(filename, mode='r+')
        table = ptcompat.get_child(store.root,
                                   traj.v_name).overview.parameters_overview
        self.assertEquals(table.nrows,
                          pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH)
        store.close()

        for irun in range(pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH,
                          2 * pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH):
            traj.f_add_parameter('f%d.x' % irun, 5)

        traj.f_store()

        store = ptcompat.open_file(filename, mode='r+')
        table = ptcompat.get_child(store.root,
                                   traj.v_name).overview.parameters_overview
        self.assertEquals(table.nrows,
                          pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH)
        store.close()

        env.f_disable_logging()
示例#26
0
def main():
    name = 'LTL-MDP-CE_6_8_TD1_New'
    try:
        with open('path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    print("All output logs can be found in directory ", paths.logs_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        freeze_input=True,
        multiproc=True,
        use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Benchmark function
    optimizee = StateActionOptimizee(traj)

    # NOTE: Outerloop optimizer initialization
    # TODO: Change the optimizer to the appropriate Optimizer class
    parameters = CrossEntropyParameters(pop_size=75,
                                        rho=0.2,
                                        smoothing=0.0,
                                        temp_decay=0,
                                        n_iteration=75,
                                        distribution=NoisyGaussian(
                                            noise_magnitude=1,
                                            noise_decay=0.95),
                                        stop_criterion=np.inf,
                                        seed=102)
    optimizer = CrossEntropyOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1., ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Add Recorder
    recorder = Recorder(trajectory=traj,
                        optimizee_name='SNN StateAction',
                        optimizee_parameters=['gamma', 'eta'],
                        optimizer_name=optimizer.__class__.__name__,
                        optimizer_parameters=optimizer.get_params())
    recorder.start()

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)
    recorder.end()

    # Finally disable logging and close all log-files
    env.disable_logging()
# Let's reuse the simple multiplication example
def multiply(traj):
    """Sophisticated simulation of multiplication"""
    z = traj.x * traj.y
    traj.f_add_result(
        'z',
        z=z,
        comment='I am the product of two reals!',
    )


# Create 2 environments that handle running
filename = os.path.join('hdf5', 'example_03.hdf5')
env1 = Environment(
    trajectory='Traj1',
    filename=filename,
    file_title='Example_03',
    add_time=True,  # Add the time of trajectory creation to its name
    comment='I will be increased!')

env2 = Environment(
    trajectory='Traj2',
    filename=filename,
    file_title='Example_03',
    log_config=None,  # One environment keeping log files
    # is enough
    add_time=True,
    comment='I am going to be merged into some other trajectory!')

# Get the trajectories from the environment
traj1 = env1.trajectory
traj2 = env2.trajectory
示例#28
0
filename = os.path.join(os.getcwd(), 'data/', name + '.hdf5')

# if not the first run, tr2 will be merged later
label = 'tr1'

# if only post processing, can't use the same label
# (generates HDF5 error)
if args.postprocess:
    label += '_postprocess-%.6d' % random.randint(0, 999999)

env = Environment(
    trajectory=label,
    add_time=False,
    filename=filename,
    continuable=False,  # ??
    lazy_debug=False,  # ??
    multiproc=True,
    ncores=ncores,
    use_pool=False,  # likely not working w/ brian2
    wrap_mode='QUEUE',  # ??
    overwrite_file=False)

tr = env.trajectory

add_params(tr)

if not args.testrun:
    tr.f_add_parameter('mconfig.git.sha1', str(commit))
    tr.f_add_parameter('mconfig.git.message', commit.message)

tr.f_explore(explore_dict)
示例#29
0
def main(path_name, resolution, fixed_delay, state_handling, use_pecevski,
         num_trials):
    name = path_name
    try:
        with open('bin/path.conf') as f:
            root_dir_path = f.read().strip()
    except FileNotFoundError:
        raise FileNotFoundError(
            "You have not set the root path to store your results."
            " Write the path to a path.conf text file in the bin directory"
            " before running the simulation")
    paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)

    traj_file = os.path.join(paths.output_dir_path, 'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title='{} data'.format(name),
        comment='{} data'.format(name),
        add_time=True,
        automatic_storing=True,
        use_scoop=True,
        multiproc=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
        log_stdout=False,  # Sends stdout to logs
    )

    create_shared_logger_data(logger_names=['bin', 'optimizers'],
                              log_levels=['INFO', 'INFO'],
                              log_to_consoles=[True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    # NOTE: Innerloop simulator
    optimizee = SAMGraphOptimizee(traj,
                                  n_NEST_threads=1,
                                  time_resolution=resolution,
                                  fixed_delay=fixed_delay,
                                  use_pecevski=use_pecevski,
                                  state_handling=state_handling,
                                  plots_directory=paths.output_dir_path,
                                  num_fitness_trials=num_trials)

    # Get bounds for mu and sigma calculation.
    param_spec = OrderedDict(sorted(SAMGraph.parameter_spec(4).items()))
    names = [k for k, _ in param_spec.items()]
    mu = np.array([(v_min + v_max) / 2
                   for k, (v_min, v_max) in param_spec.items()])
    sigma = np.array([(v_max - v_min) / 2
                      for k, (v_min, v_max) in param_spec.items()])

    print("Using means: {}\nUsing stds: {}".format(dict(zip(names, mu)),
                                                   dict(zip(names, sigma))))

    # NOTE: Outerloop optimizer initialization
    parameters = NaturalEvolutionStrategiesParameters(
        seed=0,
        pop_size=96,
        n_iteration=40,
        learning_rate_sigma=0.5,
        learning_rate_mu=0.5,
        mu=mu,
        sigma=sigma,
        mirrored_sampling_enabled=True,
        fitness_shaping_enabled=True,
        stop_criterion=np.Inf)

    optimizer = NaturalEvolutionStrategiesOptimizer(
        traj,
        optimizee_create_individual=optimizee.create_individual,
        optimizee_fitness_weights=(-1.0, ),
        parameters=parameters,
        optimizee_bounding_func=optimizee.bounding_func,
        fitness_plot_name=path_name)

    # Add post processing
    env.add_postprocessing(optimizer.post_process)

    # Run the simulation with all parameter combinations
    env.run(optimizee.simulate)

    # NOTE: Outerloop optimizer end
    optimizer.end(traj)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#30
0
def main(dependent, optimizer):
    opt = optimizer.upper()
    identifier = '{:05x}'.format(np.random.randint(16**5))
    print('Identifier: ' + identifier)
    allocated_id = '07'  # dls.get_allocated_board_ids()[0]
    board_calibration_map = {
        'B291698': {
            'dac': 'dac_default.json',
            'cap': 'cap_mem_29.json'
        },
        '07': {
            'dac': 'dac_07_chip_20.json',
            'cap': 'calibration_20.json'
        },
        'B201319': {
            'dac': 'dac_B201319_chip_21.json',
            'cap': 'calibration_24.json'
        },
        'B201330': {
            'dac': 'dac_B201330_chip_22.json',
            'cap': 'calibration_22.json'
        }
    }

    dep_name = 'DEP' if dependent else 'IND'
    name = 'MAB_ANN_{}_{}_{}'.format(identifier, opt, dep_name)
    root_dir_path = os.path.expanduser('~/simulations')
    paths = Paths(name, dict(run_no=u'test'), root_dir_path=root_dir_path)

    with open(os.path.expanduser('~/LTL/bin/logging.yaml')) as f:
        l_dict = yaml.load(f)
        log_output_file = os.path.join(paths.results_path,
                                       l_dict['handlers']['file']['filename'])
        l_dict['handlers']['file']['filename'] = log_output_file
        logging.config.dictConfig(l_dict)

    print("All output logs can be found in directory " + str(paths.logs_path))

    traj_file = os.path.join(paths.output_dir_path, u'data.h5')

    # Create an environment that handles running our simulation
    # This initializes a PyPet environment
    env = Environment(
        trajectory=name,
        filename=traj_file,
        file_title=u'{} data'.format(name),
        comment=u'{} data'.format(name),
        add_time=True,
        # freeze_input=True,
        # multiproc=True,
        # use_scoop=True,
        wrap_mode=pypetconstants.WRAP_MODE_LOCK,
        automatic_storing=True,
        log_stdout=False,  # Sends stdout to logs
        log_folder=os.path.join(paths.output_dir_path, 'logs'))
    create_shared_logger_data(logger_names=['bin', 'optimizers', 'optimizees'],
                              log_levels=['INFO', 'INFO', 'INFO'],
                              log_to_consoles=[True, True, True],
                              sim_name=name,
                              log_directory=paths.logs_path)
    configure_loggers()

    # Get the trajectory from the environment
    traj = env.trajectory

    optimizee_seed = 100

    with open('../adv/' + board_calibration_map[allocated_id]['cap']) as f:
        calibrated_config = json.load(f)
    with open('../adv/' + board_calibration_map[allocated_id]['dac']) as f:
        dac_config = json.load(f)

    class Dummy(object):
        def __init__(self, connector):
            self.connector = connector

        def __enter__(self):
            return self.connector

        def __exit__(self, exc_type, exc_val, exc_tb):
            pass

    class Mgr(object):
        def __init__(self):
            self.connector = None

        def establish(self):
            return Dummy(self.connector)

    max_learning_rate = 1.

    mgr = Mgr()
    optimizee_parameters = \
        BanditParameters(n_arms=2, n_pulls=100, n_samples=40, seed=optimizee_seed,
                         max_learning_rate=max_learning_rate, learning_rule=ANNLearningRule,
                         establish_connection=mgr.establish)
    optimizee = BanditOptimizee(traj, optimizee_parameters, dp=dependent)

    # Add post processing
    optimizer = None
    pop_size = 200
    n_iteration = 60
    if opt == 'CE':
        ce_optimizer_parameters = CrossEntropyParameters(
            pop_size=pop_size,
            rho=0.06,
            smoothing=0.3,
            temp_decay=0,
            n_iteration=n_iteration,
            distribution=NoisyGaussian(noise_magnitude=.2, noise_decay=.925),
            #Gaussian(),#NoisyGaussian(noise_magnitude=1., noise_decay=0.99),
            stop_criterion=np.inf,
            seed=102)
        ce_optimizer = CrossEntropyOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=(1, ),
            parameters=ce_optimizer_parameters,
            optimizee_bounding_func=optimizee.bounding_func)
        optimizer = ce_optimizer
    elif opt == 'ES':
        es_optimizer_parameters = EvolutionStrategiesParameters(
            learning_rate=1.8,
            learning_rate_decay=.93,
            noise_std=.03,
            mirrored_sampling_enabled=True,
            fitness_shaping_enabled=True,
            pop_size=int(pop_size / 2),
            n_iteration=n_iteration,
            stop_criterion=np.inf,
            seed=102)
        optimizer = EvolutionStrategiesOptimizer(traj,
                                                 optimizee.create_individual,
                                                 (1, ),
                                                 es_optimizer_parameters,
                                                 optimizee.bounding_func)
    elif opt == 'GD':
        gd_parameters = ClassicGDParameters(learning_rate=.003,
                                            exploration_step_size=.1,
                                            n_random_steps=pop_size,
                                            n_iteration=n_iteration,
                                            stop_criterion=np.inf,
                                            seed=102)
        optimizer = GradientDescentOptimizer(traj, optimizee.create_individual,
                                             (1, ), gd_parameters,
                                             optimizee.bounding_func)
    elif opt == 'SA':
        sa_parameters = SimulatedAnnealingParameters(
            n_parallel_runs=pop_size,
            noisy_step=.1,
            temp_decay=.9,
            n_iteration=n_iteration,
            stop_criterion=np.inf,
            seed=102,
            cooling_schedule=AvailableCoolingSchedules.EXPONENTIAL_ADDAPTIVE)
        optimizer = SimulatedAnnealingOptimizer(traj,
                                                optimizee.create_individual,
                                                (1, ), sa_parameters,
                                                optimizee.bounding_func)
    elif opt == 'GS':
        n_grid_points = 5
        gs_optimizer_parameters = GridSearchParameters(
            param_grid={
                'weight_prior': (0, 1, n_grid_points),
                'learning_rate': (0, 1, n_grid_points),
                'stim_inhibition': (0, 1, n_grid_points),
                'action_inhibition': (0, 1, n_grid_points),
                'learning_rate_decay': (0, 1, n_grid_points)
            })
        gs_optimizer = GridSearchOptimizer(
            traj,
            optimizee_create_individual=optimizee.create_individual,
            optimizee_fitness_weights=(1, ),
            parameters=gs_optimizer_parameters)
        optimizer = gs_optimizer
    else:
        exit(1)
    env.add_postprocessing(optimizer.post_process)

    # Add Recorder
    recorder = Recorder(trajectory=traj,
                        optimizee_name='MAB',
                        optimizee_parameters=optimizee_parameters,
                        optimizer_name=optimizer.__class__.__name__,
                        optimizer_parameters=optimizer.get_params())
    recorder.start()

    # Run the simulation with all parameter combinations
    # optimizee.simulate(traj)
    # exit(0)
    with Connector(calibrated_config, dac_config, 3) as connector:
        mgr.connector = connector
        env.run(optimizee.simulate)
    mgr.connector.disconnect()

    ## Outerloop optimizer end
    optimizer.end(traj)
    recorder.end()

    # Finally disable logging and close all log-files
    env.disable_logging()