示例#1
0
    def test_without_pre_run(self):
        runner = NetworkRunner()
        components = [TheNeurons(), TheConnection(), TheSimulation()]
        analyser = [TheMonitors()]
        nm = NetworkManager(network_runner=runner, component_list=components,
                            analyser_list=analyser)


        env = Environment(trajectory='Test_'+repr(time.time()).replace('.','_'),
                          filename=make_temp_dir(os.path.join(
                              'experiments',
                              'tests',
                              'briantests',
                              'HDF5',
                               'briantest.hdf5')),
                          file_title='test',
                          log_config=get_log_config(),
                          dynamic_imports=['pypet.brian2.parameter.BrianParameter',
                                                        BrianMonitorResult, BrianResult],
                          multiproc=True,
                          ncores=2)
        traj = env.v_traj
        nm.add_parameters(traj)
        traj.f_explore({'v01': [11*mV, 13*mV]})
        env.f_run(nm.run_network)
        self.check_data(traj)
示例#2
0
文件: plotff.py 项目: nigroup/pypet
def main():

    filename = os.path.join('hdf5', 'Clustered_Network.hdf5')
    # If we pass a filename to the trajectory a new HDF5StorageService will
    # be automatically created
    traj = Trajectory(filename=filename,
                    dynamically_imported_classes=[BrianMonitorResult,
                                                  BrianParameter])

    # Let's create and fake environment to enable logging:
    env = Environment(traj, do_single_runs=False)


    # Load the trajectory, but onyl laod the skeleton of the results
    traj.f_load(index=-1, load_parameters=2, load_derived_parameters=2, load_results=1)

    # Find the result instances related to the fano factor
    fano_dict = traj.f_get_from_runs('mean_fano_factor', fast_access=False)

    # Load the data of the fano factor results
    ffs = fano_dict.values()
    traj.f_load_items(ffs)

    # Extract all values and R_ee values for each run
    ffs_values = [x.f_get() for x in ffs]
    Rees = traj.f_get('R_ee').f_get_range()

    # Plot average fano factor as a function of R_ee
    plt.plot(Rees, ffs_values)
    plt.xlabel('R_ee')
    plt.ylabel('Avg. Fano Factor')
    plt.show()

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#3
0
    def test_logging_stdout(self):
        filename = 'teststdoutlog.hdf5'
        filename = make_temp_dir(filename)
        folder = make_temp_dir('logs')
        env = Environment(trajectory=make_trajectory_name(self),
                          filename=filename, log_config=get_log_config(),
                          # log_levels=logging.CRITICAL, # needed for the test
                          log_stdout=('STDOUT', 50), #log_folder=folder
                          )

        env.f_run(log_error)
        traj = env.v_traj
        path = get_log_path(traj)

        mainstr = 'sTdOuTLoGGinG'
        print(mainstr)
        env.f_disable_logging()

        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        self.assertTrue(mainstr in full_text)
        self.assertTrue('4444444' not in full_text)
        self.assertTrue('DEBUG' not in full_text)
示例#4
0
def main():
    batch = get_batch()

    filename = 'saga_%s.hdf5' % str(batch)
    env = Environment(trajectory='Example_22_Euler_Integration_%s' % str(batch),
                      filename=filename,
                      file_title='Example_22_Euler_Integration',
                      comment='Go for Euler!',
                      overwrite_file=True,
                      multiproc=True,  # Yes we can use multiprocessing within each batch!
                      ncores=4)

    traj = env.trajectory
    trajectory_name = traj.v_name

    # 1st a) phase parameter addition
    add_parameters(traj)

    # 1st b) phase preparation
    # We will add the differential equation (well, its source code only) as a derived parameter
    traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
                                 comment='Source code of our equation!')

    # explore the trajectory
    explore_batch(traj, batch)

    # 2nd phase let's run the experiment
    # We pass `euler_scheme` as our top-level simulation function and
    # the Lorenz equation 'diff_lorenz' as an additional argument
    env.run(euler_scheme, diff_lorenz)
示例#5
0
文件: the_task.py 项目: nigroup/pypet
def main():
    batch = get_batch()

    filename = 'saga_%s.hdf5' % str(batch)
    env = Environment(
        trajectory='Example_22_Euler_Integration_%s' % str(batch),
        filename=filename,
        file_title='Example_22_Euler_Integration',
        comment='Go for Euler!',
        overwrite_file=True,
        multiproc=True,  # Yes we can use multiprocessing within each batch!
        ncores=4)

    traj = env.trajectory
    trajectory_name = traj.v_name

    # 1st a) phase parameter addition
    add_parameters(traj)

    # 1st b) phase preparation
    # We will add the differential equation (well, its source code only) as a derived parameter
    traj.f_add_derived_parameter(FunctionParameter,
                                 'diff_eq',
                                 diff_lorenz,
                                 comment='Source code of our equation!')

    # explore the trajectory
    explore_batch(traj, batch)

    # 2nd phase let's run the experiment
    # We pass `euler_scheme` as our top-level simulation function and
    # the Lorenz equation 'diff_lorenz' as an additional argument
    env.run(euler_scheme, diff_lorenz)
示例#6
0
    def test_maximum_overview_size(self):

        filename = make_temp_dir('maxisze.hdf5')

        env = Environment(trajectory='Testmigrate', filename=filename,

                          log_config=get_log_config())

        traj = env.v_trajectory
        for irun in range(pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH):
            traj.f_add_parameter('f%d.x' % irun, 5)

        traj.f_store()


        store = ptcompat.open_file(filename, mode='r+')
        table = ptcompat.get_child(store.root,traj.v_name).overview.parameters_overview
        self.assertEquals(table.nrows, pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH)
        store.close()

        for irun in range(pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH,
                  2*pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH):
            traj.f_add_parameter('f%d.x' % irun, 5)

        traj.f_store()

        store = ptcompat.open_file(filename, mode='r+')
        table = ptcompat.get_child(store.root,traj.v_name).overview.parameters_overview
        self.assertEquals(table.nrows, pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH)
        store.close()

        env.f_disable_logging()
示例#7
0
文件: main.py 项目: lsolanka/pypet
def main():

    env = Environment(trajectory='FiringRate',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_folder='./logs/',
                      log_level=logging.INFO,
                      log_stdout=True,
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      wrap_mode='QUEUE',
                      filename='./hdf5/', # We only pass a folder here, so the name is chosen
                      # automatically to be the same as the Trajectory
                      )

    traj = env.v_trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.f_add_postprocessing(neuron_postproc)

    # Run the experiment
    env.f_run(run_neuron)
示例#8
0
    def test_without_pre_run(self):
        runner = NetworkRunner()
        components = [TheNeurons(), TheConnection(), TheSimulation()]
        analyser = [TheMonitors()]
        nm = NetworkManager(network_runner=runner,
                            component_list=components,
                            analyser_list=analyser)

        env = Environment(
            trajectory='Test_' + repr(time.time()).replace('.', '_'),
            filename=make_temp_dir(
                os.path.join('experiments', 'tests', 'briantests', 'HDF5',
                             'briantest.hdf5')),
            file_title='test',
            log_config=get_log_config(),
            dynamic_imports=[
                'pypet.brian2.parameter.BrianParameter', BrianMonitorResult,
                BrianResult
            ],
            multiproc=True,
            ncores=2)
        traj = env.v_traj
        nm.add_parameters(traj)
        traj.f_explore({'v01': [11 * mV, 13 * mV]})
        env.f_run(nm.run_network)
        self.check_data(traj)
示例#9
0
    def test_maximum_overview_size(self):

        filename = make_temp_dir('maxisze.hdf5')

        env = Environment(trajectory='Testmigrate', filename=filename,

                          log_config=get_log_config(), add_time=True)

        traj = env.v_trajectory
        for irun in range(pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH):
            traj.f_add_parameter('f%d.x' % irun, 5)

        traj.f_store()


        store = pt.open_file(filename, mode='r+')
        table = store.root._f_get_child(traj.v_name).overview.parameters_overview
        self.assertEquals(table.nrows, pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH)
        store.close()

        for irun in range(pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH,
                  2*pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH):
            traj.f_add_parameter('f%d.x' % irun, 5)

        traj.f_store()

        store = pt.open_file(filename, mode='r+')
        table = store.root._f_get_child(traj.v_name).overview.parameters_overview
        self.assertEquals(table.nrows, pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH)
        store.close()

        env.f_disable_logging()
示例#10
0
def main():
    """ Main *boilerplate* function to start simulation """
    # Now let's make use of logging
    logger = logging.getLogger()

    # Create folders for data and plots
    folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet')
    if not os.path.isdir(folder):
        os.makedirs(folder)
    filename = os.path.join(folder, 'all_patterns.hdf5')

    # Create an environment
    env = Environment(trajectory='cellular_automata',
                      multiproc=True,
                      ncores=4,
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    # extract the trajectory
    traj = env.traj

    traj.v_lazy_adding = True
    traj.par.ncells = 400, 'Number of cells'
    traj.par.steps = 250, 'Number of timesteps'
    traj.par.rule_number = 30, 'The ca rule'
    traj.par.initial_name = 'random', 'The type of initial state'
    traj.par.seed = 100042, 'RNG Seed'


    # Explore
    exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
                'initial_name' : ['single', 'random'],}
    # # You can uncomment the ``exp_dict`` below to see that changing the
    # # exploration scheme is now really easy:
    # exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
    #             'ncells' : [100, 200, 300],
    #             'seed': [333444555, 123456]}
    exp_dict = cartesian_product(exp_dict)
    traj.f_explore(exp_dict)

    # Run the simulation
    logger.info('Starting Simulation')
    env.run(wrap_automaton)

    # Load all data
    traj.f_load(load_data=2)

    logger.info('Printing data')
    for idx, run_name in enumerate(traj.f_iter_runs()):
        # Plot all patterns
        filename = os.path.join(folder, make_filename(traj))
        plot_pattern(traj.crun.pattern, traj.rule_number, filename)
        progressbar(idx, len(traj), logger=logger)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#11
0
    def make_env(self, **kwargs):

        self.mode.__dict__.update(kwargs)
        filename = 'log_testing.hdf5'
        self.filename = make_temp_dir(filename)
        self.traj_name = make_trajectory_name(self)
        self.env = Environment(trajectory=self.traj_name,
                               filename=self.filename, **self.mode.__dict__)
        self.traj = self.env.v_traj
示例#12
0
def main():

    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(
        trajectory='FiringRate',
        comment='Experiment to measure the firing rate '
        'of a leaky integrate and fire neuron. '
        'Exploring different input currents, '
        'as well as refractory periods',
        add_time=False,  # We don't want to add the current time to the name,
        log_stdout=True,
        log_config='DEFAULT',
        multiproc=True,
        ncores=4,  #I think my laptop has 4 cores
        git_repository='/home/pinolej/th',
        wrap_mode='QUEUE',
        filename=filename,
        overwrite_file=True)

    traj = env.trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.add_postprocessing(neuron_postproc)

    # Run the experiment
    env.run(run_neuron)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#13
0
文件: main.py 项目: fontaine618/NAIVI
def main():
    # pypet environment
    env = Environment(trajectory=SIM_NAME,
                      comment="Experiment on density with binary covariates",
                      log_config=None,
                      multiproc=False,
                      ncores=1,
                      filename=SIM_PATH + "/results/",
                      overwrite_file=True)
    traj = env.trajectory

    # parameters (data generation)

    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_adj", np.float64(1.),
                         "True variance in the link Probit model")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")
    traj.f_add_parameter("model.adj_model", "Logistic", "Adjacency model")
    traj.f_add_parameter("model.bin_model", "Logistic",
                         "Binary covariate model")

    # parameters (fit)
    traj.f_add_parameter("fit.n_iter", np.int64(20),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_vmp", np.int64(5),
                         "Number of VMP iterations per E-step")
    traj.f_add_parameter("fit.n_gd", np.int64(5),
                         "Number of GD iterations per M-step")
    traj.f_add_parameter("fit.step_size", np.float64(0.01), "GD Step size")

    # experiment
    explore_dict = {
        "data.alpha_mean":
        np.array([-3.2, -2.8, -2.4, -2., -1.6, -1.2, -0.8, -0.4, 0.0, 0.4]),
        "data.p_bin":
        np.array([10, 100, 500]),
        "data.seed":
        np.arange(0, 100, 1)
    }
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)

    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
def get_runtime(length):
    filename = os.path.join('tmp', 'hdf5', 'many_runs_improved.hdf5')
    start = time.time()
    with Environment(filename=filename,
                     log_levels=50,
                     report_progress=(2, 'progress', 50),
                     overwrite_file=True,
                     purge_duplicate_comments=False,
                     summary_tables=False,
                     small_overview_tables=False) as env:

        traj = env.v_traj

        traj.par.x = 0, 'parameter'

        traj.f_explore({'x': range(length)})

        max_run = 1000000000

        for idx in range(len(traj)):
            if idx > max_run:
                traj.f_get_run_information(idx, copy=False)['completed'] = 1

        env.f_run(job)
        end = time.time()
        dicts = [
            traj.f_get_run_information(x)
            for x in range(min(len(traj), max_run))
        ]
    total = end - start
    return total / float(len(traj)), total
示例#15
0
def test_run():

    global filename


    np.random.seed()
    trajname = 'profiling'
    filename = make_temp_dir(os.path.join('hdf5', 'test%s.hdf5' % trajname))

    env = Environment(trajectory=trajname, filename=filename,
                      file_title=trajname,
                      log_stdout=False,
                      results_per_run=5,
                      derived_parameters_per_run=5,
                      multiproc=False,
                      ncores=1,
                      wrap_mode='LOCK',
                      use_pool=False,
                      overwrite_file=True)

    traj = env.v_trajectory

    traj.v_standard_parameter=Parameter

    ## Create some parameters
    param_dict={}
    create_param_dict(param_dict)
    ### Add some parameter:
    add_params(traj,param_dict)

    #remember the trajectory and the environment
    traj = traj
    env = env

    traj.f_add_parameter('TEST', 'test_run')
    ###Explore
    explore(traj)

    ### Make a test run
    simple_arg = -13
    simple_kwarg= 13.0
    env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg)

    size=os.path.getsize(filename)
    size_in_mb = size/1000000.
    print('Size is %sMB' % str(size_in_mb))
示例#16
0
 def test_fail_on_wrong_kwarg(self):
     with self.assertRaises(ValueError):
         filename = 'testsfail.hdf5'
         env = Environment(filename=make_temp_dir(filename),
                       log_stdout=True,
                       log_config=get_log_config(),
                       logger_names=('STDERROR', 'STDOUT'),
                       foo='bar')
示例#17
0
def test_run():

    global filename

    np.random.seed()
    trajname = 'profiling'
    filename = make_temp_dir(os.path.join('hdf5', 'test%s.hdf5' % trajname))

    env = Environment(trajectory=trajname,
                      filename=filename,
                      file_title=trajname,
                      log_stdout=False,
                      results_per_run=5,
                      derived_parameters_per_run=5,
                      multiproc=False,
                      ncores=1,
                      wrap_mode='LOCK',
                      use_pool=False,
                      overwrite_file=True)

    traj = env.v_trajectory

    traj.v_standard_parameter = Parameter

    ## Create some parameters
    param_dict = {}
    create_param_dict(param_dict)
    ### Add some parameter:
    add_params(traj, param_dict)

    #remember the trajectory and the environment
    traj = traj
    env = env

    traj.f_add_parameter('TEST', 'test_run')
    ###Explore
    explore(traj)

    ### Make a test run
    simple_arg = -13
    simple_kwarg = 13.0
    env.f_run(simple_calculations, simple_arg, simple_kwarg=simple_kwarg)

    size = os.path.getsize(filename)
    size_in_mb = size / 1000000.
    print('Size is %sMB' % str(size_in_mb))
示例#18
0
    def test_parsing(self):

        filename = make_temp_dir('config_test.hdf5')
        env = Environment(filename=filename, config=self.parser)

        traj = env.v_traj
        self.assertTrue(traj.v_auto_load)
        self.assertEqual(traj.v_storage_service.filename, filename)

        self.assertEqual(traj.x, 42)
        self.assertEqual(traj.f_get('y').v_comment, 'This is the second variable')
        self.assertTrue(traj.testconfig)

        self.assertTrue(env._logging_manager.log_config is not None)
        self.assertTrue(env._logging_manager._sp_config is not None)

        env.f_disable_logging()
示例#19
0
文件: main.py 项目: MehmetTimur/pypet
def main():

    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(trajectory='FiringRate',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_stdout=True,
                      log_config='DEFAULT',
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    traj = env.trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.add_postprocessing(neuron_postproc)

    # Run the experiment
    env.run(run_neuron)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing under Windows you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see https://docs.python.org/2/library/multiprocessing.html#windows).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with 2 workers.
    filename = os.path.join('hdf5', 'example_04.hdf5')
    env = Environment(trajectory='Example_04_MP',
                      filename=filename,
                      file_title='Example_04_MP',
                      log_stdout=True,
                      comment='Multiprocessing example!',
                      multiproc=True,
                      ncores=4,
                      use_pool=True,  # Our runs are inexpensive we can get rid of overhead
                      # by using a pool
                      freeze_input=True,  # We can avoid some
                      # overhead by freezing the input to the pool
                      wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
                      graceful_exit=True,  # We want to exit in a data friendly way
                      # that safes all results after hitting CTRL+C, try it ;-)
                      overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
                                      'y':[float(y) for y in range(20)]}))

    # Run the simulation
    env.run(multiply)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#21
0
def main():

    env = Environment(trajectory='FiringRatePipeline',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_folder='./logs/',
                      log_level=logging.INFO,
                      log_stdout=True,
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      filename='./hdf5/', # We only pass a folder here, so the name is chosen
                      # automatically to be the same as the Trajectory
                      )

    env.f_pipeline(mypipeline)
示例#22
0
    def make_env(self, **kwargs):

        self.mode.__dict__.update(kwargs)
        filename = 'log_testing.hdf5'
        self.filename = make_temp_dir(filename)
        self.traj_name = make_trajectory_name(self)
        self.env = Environment(trajectory=self.traj_name,
                               filename=self.filename, **self.mode.__dict__)
        self.traj = self.env.v_traj
示例#23
0
def main():
    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(trajectory='FiringRatePipeline',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_stdout=True,
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      filename=filename,
                      overwrite_file=True)

    env.pipeline(mypipeline)

    # Finally disable logging and close all log-files
    env.disable_logging()
 def test_net(self):
     env = Environment(trajectory='Test_'+repr(time.time()).replace('.','_'),
                       filename=make_temp_dir(os.path.join(
                           'experiments',
                           'tests',
                           'briantests',
                           'HDF5',
                            'briantest.hdf5')),
                       file_title='test',
                       log_config=get_log_config(),
                       dynamic_imports=['pypet.brian2.parameter.Brian2Parameter',
                                                     Brian2MonitorResult],
                       multiproc=False)
     traj = env.v_traj
     traj.f_add_parameter(Brian2Parameter, 'v0', 0.0*mV,
                          comment='Input bias')
     traj.f_explore({'v0': [11*mV, 13*mV, 15*mV]})
     env.f_run(run_network)
     self.get_data(traj)
示例#25
0
    def test_overwrite_annotations_and_results(self):

        filename = make_temp_dir('overwrite.hdf5')

        env = Environment(trajectory='testoverwrite',
                          filename=filename,
                          log_config=get_log_config(),
                          overwrite_file=True)

        traj = env.v_traj

        traj.f_add_parameter('grp.x', 5, comment='hi')
        traj.grp.v_comment = 'hi'
        traj.grp.v_annotations['a'] = 'b'

        traj.f_store()

        traj.f_remove_child('parameters', recursive=True)

        traj.f_load(load_data=2)

        self.assertTrue(traj.x == 5)
        self.assertTrue(traj.grp.v_comment == 'hi')
        self.assertTrue(traj.grp.v_annotations['a'] == 'b')

        traj.f_get('x').f_unlock()
        traj.grp.x = 22
        traj.f_get('x').v_comment = 'hu'
        traj.grp.v_annotations['a'] = 'c'
        traj.grp.v_comment = 'hu'

        traj.f_store_item(traj.f_get('x'), store_data=3)
        traj.f_store_item(traj.grp, store_data=3)

        traj.f_remove_child('parameters', recursive=True)

        traj.f_load(load_data=2)

        self.assertTrue(traj.x == 22)
        self.assertTrue(traj.grp.v_comment == 'hu')
        self.assertTrue(traj.grp.v_annotations['a'] == 'c')

        env.f_disable_logging()
示例#26
0
    def test_hdf5_settings_and_context(self):

        filename = make_temp_dir('hdfsettings.hdf5')
        with Environment('testraj',
                         filename=filename,
                         add_time=True,
                         comment='',
                         dynamic_imports=None,
                         log_config=None,
                         multiproc=False,
                         ncores=3,
                         wrap_mode=pypetconstants.WRAP_MODE_LOCK,
                         continuable=False,
                         use_hdf5=True,
                         complevel=4,
                         complib='zlib',
                         shuffle=True,
                         fletcher32=True,
                         pandas_format='t',
                         pandas_append=True,
                         purge_duplicate_comments=True,
                         summary_tables=True,
                         small_overview_tables=True,
                         large_overview_tables=True,
                         results_per_run=19,
                         derived_parameters_per_run=17) as env:

            traj = env.v_trajectory

            traj.f_store()

            hdf5file = pt.openFile(filename=filename)

            table = hdf5file.root._f_getChild(traj.v_name)._f_getChild(
                'overview')._f_getChild('hdf5_settings')

            row = table[0]

            self.assertTrue(row['complevel'] == 4)

            self.assertTrue(row['complib'] == compat.tobytes('zlib'))

            self.assertTrue(row['shuffle'])
            self.assertTrue(row['fletcher32'])
            self.assertTrue(row['pandas_format'] == compat.tobytes('t'))

            for attr_name, table_name in HDF5StorageService.NAME_TABLE_MAPPING.items(
            ):
                self.assertTrue(row[table_name])

            self.assertTrue(row['purge_duplicate_comments'])
            self.assertTrue(row['results_per_run'] == 19)
            self.assertTrue(row['derived_parameters_per_run'] == 17)

            hdf5file.close()
示例#27
0
 def test_net(self):
     env = Environment(
         trajectory='Test_' + repr(time.time()).replace('.', '_'),
         filename=make_temp_dir(
             os.path.join('experiments', 'tests', 'briantests', 'HDF5',
                          'briantest.hdf5')),
         file_title='test',
         log_config=get_log_config(),
         dynamic_imports=[
             'pypet.brian2.parameter.Brian2Parameter', Brian2MonitorResult
         ],
         multiproc=False)
     traj = env.v_traj
     traj.f_add_parameter(Brian2Parameter,
                          'v0',
                          0.0 * mV,
                          comment='Input bias')
     traj.f_explore({'v0': [11 * mV, 13 * mV, 15 * mV]})
     env.f_run(run_network)
     self.get_data(traj)
示例#28
0
def fail_on_diff():
    try:
        Environment(trajectory='fail',
                 filename=os.path.join('fail',
                                       'HDF5',),
                  file_title='failing',
                  git_repository='.', git_message='Im a message!',
                  git_fail=True)
        raise RuntimeError('You should not be here!')
    except GitDiffError as exc:
        print('I expected the GitDiffError: `%s`' % repr(exc))
示例#29
0
    def test_overwrite_annotations_and_results(self):

        filename = make_temp_dir('overwrite.hdf5')

        env = Environment(trajectory='testoverwrite', filename=filename,
                          log_config=get_log_config())

        traj = env.v_traj

        traj.f_add_parameter('grp.x', 5, comment='hi')
        traj.grp.v_comment='hi'
        traj.grp.v_annotations['a'] = 'b'

        traj.f_store()

        traj.f_remove_child('parameters', recursive=True)

        traj.f_load(load_data=2)

        self.assertTrue(traj.x == 5)
        self.assertTrue(traj.grp.v_comment == 'hi')
        self.assertTrue(traj.grp.v_annotations['a'] == 'b')

        traj.f_get('x').f_unlock()
        traj.grp.x = 22
        traj.f_get('x').v_comment='hu'
        traj.grp.v_annotations['a'] = 'c'
        traj.grp.v_comment = 'hu'

        traj.f_store_item(traj.f_get('x'), store_data=3)
        traj.f_store_item(traj.grp, store_data=3)

        traj.f_remove_child('parameters', recursive=True)

        traj.f_load(load_data=2)

        self.assertTrue(traj.x == 22)
        self.assertTrue(traj.grp.v_comment == 'hu')
        self.assertTrue(traj.grp.v_annotations['a'] == 'c')

        env.f_disable_logging()
示例#30
0
def main(inputargs=None):
    if inputargs is None:
        inputargs = sys.argv[1:] if len(sys.argv) > 1 else ""
    args = docopt(__doc__, argv=inputargs)
    wavpath = path.join(modulePath, "resources", "tone_in_noise")
    stimuli = [path.join(wavpath, i) for i in glob.glob(path.join(wavpath, "*.wav"))]
    outfile = path.realpath(path.expanduser(args["--out"]))
    env = Environment(trajectory='tone-in-noise',
                      filename=outfile,
                      overwrite_file=True,
                      file_title="Tone in noise at different SNR",
                      comment="some comment",
                      large_overview_tables="False",
                      # freeze_input=True,
                      # use_pool=True,
                      multiproc=True,
                      ncores=3,
                      graceful_exit=True,
                      #wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
                      )

    traj = env.trajectory
    traj.f_add_parameter('periphery', 'verhulst', comment="which periphery was used")
    traj.f_add_parameter('brainstem', 'nelsoncarney04', comment="which brainstem model was used")
    traj.f_add_parameter('weighting', "--no-cf-weighting ", comment="weighted CFs")
    traj.f_add_parameter('wavfile', '', comment="Which wav file to run")
    traj.f_add_parameter('level', 80, comment="stimulus level, spl")
    traj.f_add_parameter('neuropathy', "none", comment="")

    parameter_dict = {
        "periphery" : ['verhulst', 'zilany'],
        "brainstem" : ['nelsoncarney04', 'carney2015'],
        "weighting" : [cf_weighting, ""],
        "wavfile"   : stimuli,
        "level"     : [80],
        "neuropathy": ["none", "moderate", "severe", "ls-moderate", "ls-severe"]
    }

    traj.f_explore(cartesian_product(parameter_dict))
    env.run(tone_in_noise)
    return 0
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5','example_18.hdf5')
    env = Environment(trajectory='Multiplication',
                      filename=filename,
                      file_title='Example_18_Many_Runs',
                      overwrite_file=True,
                      comment='Contains many runs',
                      multiproc=True,
                      use_pool=True,
                      freeze_input=True,
                      ncores=2,
                      wrap_mode='QUEUE')

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, yielding 2500 runs
    traj.f_explore(cartesian_product({'x': range(50), 'y': range(50)}))

    # Run the simulation
    env.run(multiply)

    # Disable logging
    env.disable_logging()

    # turn auto loading on, since results have not been loaded, yet
    traj.v_auto_load = True
    # Use the `v_idx` functionality
    traj.v_idx = 2042
    print('The result of run %d is: ' % traj.v_idx)
    # Now we can rely on the wildcards
    print(traj.res.crunset.crun.z)
    traj.v_idx = -1
    # Or we can use the shortcuts `rts_X` (run to set) and `r_X` to get particular results
    print('The result of run %d is: ' % 2044)
    print(traj.res.rts_2044.r_2044.z)
示例#32
0
    def test_throw_warning_if_old_kw_is_used(self):
        pass

        filename = make_temp_dir('hdfwarning.hdf5')

        with warnings.catch_warnings(record=True) as w:

            env = Environment(trajectory='test', filename=filename,
                              dynamically_imported_classes=[],
                              log_config=get_log_config())

        with warnings.catch_warnings(record=True) as w:
            traj = Trajectory(dynamically_imported_classes=[])

        traj = env.v_trajectory
        traj.f_store()

        with warnings.catch_warnings(record=True) as w:
            traj.f_load(dynamically_imported_classes=[])

        env.f_disable_logging()
示例#33
0
    def test_logging_stdout(self):
        filename = 'teststdoutlog.hdf5'
        filename = make_temp_dir(filename)
        folder = make_temp_dir('logs')
        env = Environment(trajectory=make_trajectory_name(self),
                          filename=filename, log_config=get_log_config(),
                          # log_levels=logging.CRITICAL, # needed for the test
                          log_stdout=('STDOUT', 50), #log_folder=folder
                          )

        env.f_run(log_error)
        traj = env.v_traj
        path = get_log_path(traj)

        mainstr = 'sTdOuTLoGGinG'
        print(mainstr)
        env.f_disable_logging()

        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        self.assertTrue(mainstr in full_text)
        self.assertTrue('4444444' not in full_text)
        self.assertTrue('DEBUG' not in full_text)
示例#34
0
文件: main.py 项目: fontaine618/NAIVI
def main(path, name, explore_dict):
    comment = "\n".join(
        ["{}: {}".format(k, v) for k, v in explore_dict.items()])
    # pypet environment
    env = Environment(trajectory=name,
                      comment=comment,
                      log_config=None,
                      multiproc=False,
                      ncores=1,
                      filename=path + name + "/results/",
                      overwrite_file=True)
    traj = env.trajectory
    traj.f_add_parameter("path", path + name, "Path")

    # parameters (data generation)
    traj.f_add_parameter("data.N", np.int64(500), "Number of nodes")
    traj.f_add_parameter("data.K", np.int64(5),
                         "True number of latent components")
    traj.f_add_parameter("data.p_cts", np.int64(0),
                         "Number of continuous covariates")
    traj.f_add_parameter("data.p_bin", np.int64(0),
                         "Number of binary covariates")
    traj.f_add_parameter("data.var_cov", np.float64(1.),
                         "True variance in the covariate model (cts and bin)")
    traj.f_add_parameter("data.missing_rate", np.float64(0.1), "Missing rate")
    traj.f_add_parameter("data.seed", np.int64(1), "Random seed")
    traj.f_add_parameter("data.center", np.int64(1), "Ego-network center")
    traj.f_add_parameter("data.alpha_mean", np.float64(-1.85),
                         "Mean of the heterogeneity parameter")

    # parameters (model)
    traj.f_add_parameter("model.K", np.int64(5),
                         "Number of latent components in the model")

    # parameters (fit)
    traj.f_add_parameter("fit.algo", "MLE", "Inference algorithm")
    traj.f_add_parameter("fit.max_iter", np.int64(500),
                         "Number of VEM iterations")
    traj.f_add_parameter("fit.n_sample", np.int64(1),
                         "Number of samples for VIMC")
    traj.f_add_parameter("fit.eps", np.float64(1.0e-6),
                         "convergence threshold")
    traj.f_add_parameter("fit.lr", np.float64(0.01), "GD Step size")

    # experiment
    experiment = cartesian_product(explore_dict, tuple(explore_dict.keys()))
    traj.f_explore(experiment)
    env.add_postprocessing(post_processing)
    env.run(run)
    env.disable_logging()
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5', 'example_12.hdf5')
    env = Environment(trajectory='Multiplication',
                      filename=filename,
                      file_title='Example_12_Sharing_Data',
                      overwrite_file=True,
                      comment='The first example!',
                      continuable=False, # We have shared data in terms of a multiprocessing list,
                      # so we CANNOT use the continue feature.
                      multiproc=True,
                      ncores=2)

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product
    traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))

    # We want a shared list where we can put all out results in. We use a manager for this:
    result_list = mp.Manager().list()
    # Let's make some space for potential results
    result_list[:] =[0 for _dummy in range(len(traj))]

    # Run the simulation
    env.run(multiply, result_list)

    # Now we want to store the final list as numpy array
    traj.f_add_result('z', np.array(result_list))

    # Finally let's print the result to see that it worked
    print(traj.z)

    #Disable logging and close all log-files
    env.disable_logging()
示例#36
0
def main(inputargs):
    args = docopt(__doc__, argv=inputargs)
    wavpath = path.join(modulePath, "resources", "tone_in_noise")
    stimuli = [path.join(wavpath, i) for i in glob.glob(path.join(wavpath, "*.wav"))]
    outfile = path.realpath(path.expanduser(args["--out"]))
    env = Environment(trajectory='tone-in-noise',
                      filename=outfile,
                      overwrite_file=True,
                      file_title="Tone in noise at different SNR",
                      comment="some comment",
                      large_overview_tables="False",
                      # freeze_input=True,
                      # use_pool=True,
                      multiproc=True,
                      ncores=3,
                      graceful_exit=True,
                      #wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
                      )

    traj = env.trajectory
    traj.f_add_parameter('periphery', 'verhulst', comment="which periphery was used")
    traj.f_add_parameter('brainstem', 'nelsoncarney04', comment="which brainstem model was used")
    traj.f_add_parameter('weighting', "--no-cf-weighting ", comment="weighted CFs")
    traj.f_add_parameter('wavfile', '', comment="Which wav file to run")
    traj.f_add_parameter('level', 80, comment="stimulus level, spl")
    traj.f_add_parameter('neuropathy', "none", comment="")

    parameter_dict = {
        "periphery" : ['verhulst', 'zilany'],
        "brainstem" : ['nelsoncarney04', 'carney2015'],
        "weighting" : [cf_weighting, ""],
        "wavfile"   : stimuli,
        "level"     : [80],
        "neuropathy": ["none", "moderate", "severe", "ls-moderate", "ls-severe"]
    }

    traj.f_explore(cartesian_product(parameter_dict))
    env.run(tone_in_noise)
    return 0
示例#37
0
def main():
    """ Main *boilerplate* function to start simulation """
    # Now let's make use of logging
    logger = logging.getLogger()

    # Create folders for data and plots
    folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet')
    if not os.path.isdir(folder):
        os.makedirs(folder)
    filename = os.path.join(folder, 'all_patterns.hdf5')

    # Create an environment
    env = Environment(trajectory='cellular_automata',
                      multiproc=True,
                      ncores=4,
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    # extract the trajectory
    traj = env.traj

    traj.par.ncells = Parameter('ncells', 400, 'Number of cells')
    traj.par.steps = Parameter('steps', 250, 'Number of timesteps')
    traj.par.rule_number = Parameter('rule_number', 30, 'The ca rule')
    traj.par.initial_name = Parameter('initial_name', 'random',
                                      'The type of initial state')
    traj.par.seed = Parameter('seed', 100042, 'RNG Seed')

    # Explore
    exp_dict = {
        'rule_number': [10, 30, 90, 110, 184],
        'initial_name': ['single', 'random'],
    }
    # # You can uncomment the ``exp_dict`` below to see that changing the
    # # exploration scheme is now really easy:
    # exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
    #             'ncells' : [100, 200, 300],
    #             'seed': [333444555, 123456]}
    exp_dict = cartesian_product(exp_dict)
    traj.f_explore(exp_dict)

    # Run the simulation
    logger.info('Starting Simulation')
    env.run(wrap_automaton)

    # Load all data
    traj.f_load(load_data=2)

    logger.info('Printing data')
    for idx, run_name in enumerate(traj.f_iter_runs()):
        # Plot all patterns
        filename = os.path.join(folder, make_filename(traj))
        plot_pattern(traj.crun.pattern, traj.rule_number, filename)
        progressbar(idx, len(traj), logger=logger)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#38
0
def main(fail=False):
    try:
        sumatra_project = '.'

        if fail:
            print('There better be not any diffs.')

        # Create an environment that handles running
        with Environment(trajectory='Example1_Quick_And_Not_So_Dirty',
                         filename=os.path.join(
                             'experiments',
                             'HDF5',
                         ),
                         file_title='Example1_Quick_And_Not_So_Dirty',
                         comment='The first example!',
                         complib='blosc',
                         small_overview_tables=False,
                         git_repository='.',
                         git_message='Im a message!',
                         git_fail=fail,
                         sumatra_project=sumatra_project,
                         sumatra_reason='Testing!') as env:

            # Get the trajectory from the environment
            traj = env.v_trajectory

            # Add both parameters
            traj.f_add_parameter('x', 1, comment='Im the first dimension!')
            traj.f_add_parameter('y', 1, comment='Im the second dimension!')

            # Explore the parameters with a cartesian product:
            traj.f_explore(cartesian_product({'x': [1, 2, 3], 'y': [6, 7, 8]}))

            # Run the simulation
            env.f_run(multiply)

            # Check that git information was added to the trajectory
            assert 'config.git.hexsha' in traj
            assert 'config.git.committed_date' in traj
            assert 'config.git.message' in traj
            assert 'config.git.name_rev' in traj

            print("Python git test successful")

            # traj.f_expand({'x':[3,3],'y':[42,43]})
            #
            # env.f_run(multiply)
    except Exception as exc:
        print(repr(exc))
        sys.exit(1)
def get_runtime(length):
    filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')

    with Environment(filename=filename,
                     log_levels=20,
                     report_progress=(0.0000002, 'progress', 50),
                     overwrite_file=True,
                     purge_duplicate_comments=False,
                     log_stdout=False,
                     summary_tables=False,
                     small_overview_tables=False) as env:

        traj = env.v_traj

        traj.par.f_apar('x', 0, 'parameter')

        traj.f_explore({'x': range(length)})

        max_run = 100

        for idx in range(len(traj)):
            if idx > max_run:
                traj.f_get_run_information(idx, copy=False)['completed'] = 1
        traj.f_store()

        if not os.path.isdir('./tmp'):
            os.mkdir('tmp')
        graphviz = CustomOutput()
        graphviz.output_file = './tmp/run_profile_storage_%d.png' % len(traj)
        service_filter = GlobbingFilter(include=['*storageservice.*'])

        config = Config(groups=True, verbose=True)
        config.trace_filter = service_filter

        print('RUN PROFILE')
        with PyCallGraph(config=config, output=graphviz):
            # start = time.time()
            # env.f_run(job)
            # end = time.time()
            for irun in range(100):
                traj._make_single_run(irun + len(traj) / 2)
                # Measure start time
                traj._set_start()
                traj.f_ares('$set.$', 42, comment='A result')
                traj._set_finish()
                traj._store_final(store_data=2)
                traj._finalize_run()
            print('STARTING_to_PLOT')
        print('DONE RUN PROFILE')
示例#40
0
    def test_hdf5_store_load_result(self):
        traj_name = make_trajectory_name(self)
        file_name = make_temp_dir(
            os.path.join('brian2', 'tests', 'hdf5',
                         'test_%s.hdf5' % traj_name))
        env = Environment(trajectory=traj_name,
                          filename=file_name,
                          log_config=get_log_config(),
                          dynamic_imports=[Brian2Result],
                          add_time=False,
                          storage_service=HDF5StorageService)
        traj = env.v_trajectory
        traj.v_standard_result = Brian2Result
        traj.f_add_result('brian2.single.millivolts_single_a',
                          10 * mvolt,
                          comment='single value a')
        traj.f_add_result('brian2.single.millivolts_single_c',
                          11 * mvolt,
                          comment='single value b')

        traj.f_add_result('brian2.array.millivolts_array_a', [11, 12] * mvolt,
                          comment='array')
        traj.f_add_result('mV1', 42.0 * mV)
        # results can hold much more than a single data item:
        traj.f_add_result('ampere1',
                          1 * mA,
                          44,
                          test=300 * mV,
                          test2=[1, 2, 3],
                          test3=np.array([1, 2, 3]) * mA,
                          comment='Result keeping track of many things')
        traj.f_add_result('integer', 16)
        traj.f_add_result('kHz05', 0.5 * kHz)
        traj.f_add_result('nested_array',
                          np.array([[6., 7., 8.], [9., 10., 11.]]) * ms)
        traj.f_add_result('b2a', np.array([1., 2.]) * mV)

        traj.f_add_result('nounit',
                          Quantity(np.array([[6., 7., 8.], [9., 10., 11.]])))

        traj.f_store()

        traj2 = load_trajectory(filename=file_name,
                                name=traj_name,
                                dynamic_imports=[Brian2Result],
                                load_data=2)

        self.compare_trajectories(traj, traj2)
示例#41
0
def main(name,
         explore_dict,
         postprocess=False,
         ncores=1,
         testrun=False,
         commit=None):

    if not testrun:
        if commit is None:
            raise Exception("Non testrun needs a commit")

    filename = os.path.join(os.getcwd(), 'data/', name + '.hdf5')

    # if not the first run, tr2 will be merged later
    label = 'tr1'

    # if only post processing, can't use the same label
    # (generates HDF5 error)
    if postprocess:
        label += '_postprocess-%.6d' % random.randint(0, 999999)

    env = Environment(
        trajectory=label,
        add_time=False,
        filename=filename,
        continuable=False,  # ??
        lazy_debug=False,  # ??
        multiproc=True,
        ncores=ncores,
        use_pool=False,  # likely not working w/ brian2
        wrap_mode='QUEUE',  # ??
        overwrite_file=False)

    tr = env.trajectory

    add_params(tr)

    if not testrun:
        tr.f_add_parameter('mconfig.git.sha1', str(commit))
        tr.f_add_parameter('mconfig.git.message', commit.message)

    tr.f_explore(explore_dict)

    def run_sim(tr):
        try:
            run_net(tr)
        except TimeoutError:
            print("Unable to plot, must run analysis manually")

        post_process(tr)

    if postprocess:
        env.run(post_process)
    else:
        env.run(run_sim)
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing under Windows you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see https://docs.python.org/2/library/multiprocessing.html#windows).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with 2 workers.
    filename = os.path.join('hdf5', 'example_04.hdf5')
    env = Environment(
        trajectory='Example_04_MP',
        filename=filename,
        file_title='Example_04_MP',
        log_stdout=True,
        comment='Multiprocessing example!',
        multiproc=True,
        ncores=4,
        use_pool=True,  # Our runs are inexpensive we can get rid of overhead
        # by using a pool
        freeze_input=True,  # We can avoid some
        # overhead by freezing the input to the pool
        wrap_mode=pypetconstants.WRAP_MODE_QUEUE,
        graceful_exit=True,  # We want to exit in a data friendly way
        # that safes all results after hitting CTRL+C, try it ;-)
        overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(
        cartesian_product({
            'x': [float(x) for x in range(20)],
            'y': [float(y) for y in range(20)]
        }))

    # Run the simulation
    env.run(multiply)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#43
0
def main():
    filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')
    with Environment(filename=filename,
                     log_levels=0,
                     report_progress=(2, 'progress', 50),
                     overwrite_file=True) as env:

        traj = env.v_traj

        traj.par.x = BrianParameter('', 0 * ms, 'parameter')

        traj.f_explore({'x': [x * ms for x in range(1000)]})

        traj.f_store()

        # env.f_run(job)

        dicts = [traj.f_get_run_information(x) for x in range(len(traj))]
        runtimes = [
            dic['finish_timestamp'] - dic['timestamp'] for dic in dicts
        ]
示例#44
0
    def setUp(self):
        self.set_mode()
        self.logfolder = make_temp_dir(os.path.join('experiments',
                                                      'tests',
                                                      'Log'))

        random.seed()
        self.trajname = make_trajectory_name(self)
        self.filename = make_temp_dir(os.path.join('experiments',
                                                    'tests',
                                                    'HDF5',
                                                    'test%s.hdf5' % self.trajname))

        env = Environment(trajectory=self.trajname, filename=self.filename,
                          file_title=self.trajname,
                          log_stdout=False,
                          port=self.url,
                          log_config=get_log_config(),
                          results_per_run=5,
                          derived_parameters_per_run=5,
                          multiproc=self.multiproc,
                          ncores=self.ncores,
                          wrap_mode=self.mode,
                          use_pool=self.use_pool,
                          fletcher32=self.fletcher32,
                          complevel=self.complevel,
                          complib=self.complib,
                          shuffle=self.shuffle,
                          pandas_append=self.pandas_append,
                          pandas_format=self.pandas_format,
                          encoding=self.encoding)

        traj = env.v_trajectory

        self.param_dict={}
        create_param_dict(self.param_dict)
        add_params(traj,self.param_dict)

        self.traj = traj
        self.env = env
示例#45
0
    def test_hdf5_store_load_parameter(self):
        traj_name = make_trajectory_name(self)
        file_name = make_temp_dir(
            os.path.join('brian2', 'tests', 'hdf5',
                         'test_%s.hdf5' % traj_name))
        env = Environment(trajectory=traj_name,
                          filename=file_name,
                          log_config=get_log_config(),
                          dynamic_imports=[Brian2Parameter],
                          add_time=False,
                          storage_service=HDF5StorageService)
        traj = env.v_trajectory
        traj.v_standard_parameter = Brian2Parameter
        traj.f_add_parameter('brian2.single.millivolts',
                             10 * mvolt,
                             comment='single value')

        #traj.f_add_parameter('brian2.array.millivolts', [11, 12]*mvolt, comment='array')
        #traj.f_add_parameter('mV1', 42.0*mV)
        #traj.f_add_parameter('ampere1', 1*mA)
        #traj.f_add_parameter('integer', 16)
        #traj.f_add_parameter('kHz05', 0.5*kHz)
        #traj.f_add_parameter('nested_array', np.array([[6.,7.,8.],[9.,10.,11.]]) * ms)
        #traj.f_add_parameter('b2a', np.array([1., 2.]) * mV)

        # We also need to check if explorations work with hdf5 store!
        #explore_dict = {'ampere1': [1*mA, 2*mA, 3*mA],
        #                'integer': [42,43,44],
        #                'b2a': [np.array([1., 2.]) * mV, np.array([1., 4.]) * mV,
        #                       np.array([1., 2.]) * mV]}
        #traj.f_explore(explore_dict)

        traj.f_store()

        traj2 = load_trajectory(filename=file_name,
                                name=traj_name,
                                dynamic_imports=[Brian2Parameter],
                                load_data=2)
        self.compare_trajectories(traj, traj2)
def get_runtime(length):
    filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')

    with Environment(
            filename=filename,
            log_levels=50,
            report_progress=(0.0002, 'progress', 50),
            overwrite_file=True,
            purge_duplicate_comments=False,
            log_stdout=False,
            multiproc=False,
            ncores=2,
            use_pool=True,
            wrap_mode='PIPE',  #freeze_input=True,
            summary_tables=False,
            small_overview_tables=False) as env:

        traj = env.v_traj

        traj.par.f_apar('x', 0, 'parameter')

        traj.f_explore({'x': range(length)})

        # traj.v_full_copy = False

        max_run = 1000

        for idx in range(len(traj)):
            if idx > max_run:
                traj.f_get_run_information(idx, copy=False)['completed'] = 1
        start = time.time()
        env.f_run(job)
        end = time.time()
        # dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))]
    total = end - start
    return total / float(min(len(traj), max_run)), total / float(
        min(len(traj), max_run)) * len(traj)
示例#47
0
def main():
    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(
        trajectory='FiringRatePipeline',
        comment='Experiment to measure the firing rate '
        'of a leaky integrate and fire neuron. '
        'Exploring different input currents, '
        'as well as refractory periods',
        add_time=False,  # We don't want to add the current time to the name,
        log_stdout=True,
        multiproc=True,
        ncores=2,  #My laptop has 2 cores ;-)
        filename=filename,
        overwrite_file=True)

    env.pipeline(mypipeline)

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing with SCOOP you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see http://scoop.readthedocs.org/en/latest/usage.html#pitfalls).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with scoop:
    filename = os.path.join('hdf5', 'example_21.hdf5')
    env = Environment(trajectory='Example_21_SCOOP',
                      filename=filename,
                      file_title='Example_21_SCOOP',
                      log_stdout=True,
                      comment='Multiprocessing example using SCOOP!',
                      multiproc=True,
                      freeze_input=True, # We want to save overhead and freeze input
                      use_scoop=True, # Yes we want SCOOP!
                      wrap_mode=pypetconstants.WRAP_MODE_LOCAL,  # SCOOP only works with 'LOCAL'
                      # or 'NETLOCK' wrapping
                      overwrite_file=True)

    # Get the trajectory from the environment
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
                                      'y':[float(y) for y in range(20)]}))
    # Run the simulation
    env.run(multiply)

    # Let's check that all runs are completed!
    assert traj.f_is_completed()

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5', 'example_12.hdf5')
    env = Environment(
        trajectory='Multiplication',
        filename=filename,
        file_title='Example_12_Sharing_Data',
        overwrite_file=True,
        comment='The first example!',
        continuable=
        False,  # We have shared data in terms of a multiprocessing list,
        # so we CANNOT use the continue feature.
        multiproc=True,
        ncores=2)

    # The environment has created a trajectory container for us
    traj = env.trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product
    traj.f_explore(cartesian_product({'x': [1, 2, 3, 4], 'y': [6, 7, 8]}))

    # We want a shared list where we can put all out results in. We use a manager for this:
    result_list = mp.Manager().list()
    # Let's make some space for potential results
    result_list[:] = [0 for _dummy in range(len(traj))]

    # Run the simulation
    env.run(multiply, result_list)

    # Now we want to store the final list as numpy array
    traj.f_add_result('z', np.array(result_list))

    # Finally let's print the result to see that it worked
    print(traj.z)

    #Disable logging and close all log-files
    env.disable_logging()
示例#50
0
class LoggingTest(TrajectoryComparator):

    tags = 'integration', 'environment', 'logging'

    def setUp(self):
        root = logging.getLogger()
        for logger in itools.chain(root.manager.loggerDict.values(), [root]):
            if hasattr(logger, 'handlers'):
                for handler in logger.handlers:
                    if hasattr(handler, 'flush'):
                        handler.flush()
                    if hasattr(handler, 'close'):
                        handler.close()
                logger.handlers = []
            if hasattr(logger, 'setLevel'):
                logger.setLevel(logging.NOTSET)
        self.set_mode()

    def tearDown(self):
        super(LoggingTest, self).tearDown()

    def set_mode(self):
        self.mode = Dummy()
        self.mode.wrap_mode = 'LOCK'
        self.mode.multiproc = False
        self.mode.ncores = 1
        self.mode.use_pool=True
        self.mode.pandas_format='fixed'
        self.mode.pandas_append=False
        self.mode.complib = 'blosc'
        self.mode.complevel=9
        self.mode.shuffle=True
        self.mode.fletcher32 = False
        self.mode.encoding = 'utf8'
        self.mode.log_stdout=False
        self.mode.log_config=get_log_config()


    def make_env(self, **kwargs):

        self.mode.__dict__.update(kwargs)
        filename = 'log_testing.hdf5'
        self.filename = make_temp_dir(filename)
        self.traj_name = make_trajectory_name(self)
        self.env = Environment(trajectory=self.traj_name,
                               filename=self.filename, **self.mode.__dict__)
        self.traj = self.env.v_traj


    def add_params(self, traj):

        traj.v_lazy_adding = True
        traj.par.p1 = 42, 'Hey'
        traj.f_apar('g1.p2', 145, comment='Test')


    def explore(self, traj):
        traj.f_explore({'p1': range(7)})

    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_creation_normal(self):
        # if not self.multiproc:
        #     return
        self.make_env()
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_wo_error_levels)
        self.env.f_disable_logging()

        traj = self.env.v_traj

        log_path = get_log_path(traj)

        if self.mode.multiproc:
            if self.mode.use_pool:
                length = self.mode.ncores * 2
            else:
                length = 2 * len(traj)
            if self.mode.wrap_mode == 'LOCK':
                length += 2
            elif self.mode.wrap_mode == 'QUEUE':
                length += 4
            else:
                raise RuntimeError('You shall not pass!')
        else:
            length = 2


        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings
        total_error_count = 0
        total_store_count = 0
        total_info_count = 0
        total_retry_count = 0
        for file in file_list:
            with open(os.path.join(log_path, file), mode='r') as fh:
                text = fh.read()
            count = text.count('INFO_Test!')
            total_info_count += count
            error_count = text.count('ERROR_Test!')
            total_error_count += error_count
            store_count = text.count('STORE_Test!')
            total_store_count += store_count
            retry_count = text.count('Retry')
            total_retry_count += retry_count
            if 'LOG.txt' == file:
                if self.mode.multiproc:
                    self.assertEqual(count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(count, len(traj))
                    self.assertEqual(store_count, len(traj))
            elif 'ERROR' in file:
                full_path = os.path.join(log_path, file)
                filesize = os.path.getsize(full_path)
                with open(full_path) as fh:
                    text = fh.read()
                if 'Retry' not in text:
                    self.assertEqual(filesize, 0)
            elif 'Queue' in file:
                self.assertEqual(store_count, len(traj))
            elif 'LOG' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertGreaterEqual(count, 0, '%d < 1 for file %s' % (count, file))
                else:
                    self.assertEqual(count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertEqual(store_count, 1)
            else:
                self.assertTrue(False, 'There`s a file in the log folder that does not '
                                       'belong there: %s' % str(file))
        self.assertEqual(total_store_count, len(traj))
        self.assertEqual(total_error_count, 0)
        self.assertEqual(total_info_count, len(traj))
        self.assertLess(total_retry_count, len(traj))

    def test_throw_error_when_specifying_config_and_old_method(self):
        with self.assertRaises(ValueError):
            self.make_env(log_config=None, logger_names='test')

    def test_disable(self):
        # if not self.multiproc:
        #     return
        self.make_env(log_config=None)
        traj = self.env.v_traj

        log_path = get_log_path(traj)

        self.assertFalse(os.path.isdir(log_path))
        self.assertTrue(self.env._logging_manager._sp_config is None)
        self.assertTrue(self.env._logging_manager._mp_config is None)
        self.assertTrue(self.env._logging_manager.log_config is None)

        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)

        self.assertFalse(os.path.isdir(log_path))
        self.assertTrue(self.env._logging_manager._sp_config is None)
        self.assertTrue(self.env._logging_manager._mp_config is None)
        self.assertTrue(self.env._logging_manager.log_config is None)

        self.env.f_disable_logging()
        # pypet_path = os.path.abspath(os.path.dirname(pypet.pypetlogging))
        # init_path = os.path.join(pypet_path, 'logging')
        # log_config = os.path.join(init_path, 'default.ini')


    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_creation_with_errors(self):
         # if not self.multiproc:
        #     return
        self.make_env()
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        if self.mode.multiproc:
            logging.getLogger('pypet.test').error('ttt')
        self.env.f_disable_logging()

        traj = self.env.v_traj
        log_path = get_log_path(traj)

        if self.mode.multiproc:
            if self.mode.use_pool:
                length = self.mode.ncores * 2
            else:
                length = 2 * len(traj)
            if self.mode.wrap_mode == 'LOCK':
                length += 2
            elif self.mode.wrap_mode == 'QUEUE':
                length += + 4
            else:
                raise RuntimeError('You shall not pass!')
        else:
            length = 2

        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings

        total_error_count = 0
        total_store_count = 0
        total_info_count = 0
        total_retry_count = 0
        for file in file_list:
            with open(os.path.join(log_path, file), mode='r') as fh:
                text = fh.read()
            count = text.count('INFO_Test!')
            total_info_count += count
            error_count = text.count('ERROR_Test!')
            total_error_count += error_count
            store_count = text.count('STORE_Test!')
            total_store_count += store_count
            retry_count = text.count('Retry')
            total_retry_count += retry_count
            if 'LOG.txt' == file:
                if self.mode.multiproc:
                    self.assertEqual(count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(count, len(traj))
                    self.assertEqual(store_count, len(traj))
            elif 'ERROR.txt' == file:
                self.assertEqual(count, 0)
                if self.mode.multiproc:
                    self.assertEqual(error_count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(error_count, len(traj))
                    self.assertEqual(store_count, len(traj))

            elif 'Queue' in file and 'ERROR' in file:
                self.assertEqual(store_count, len(traj))
            elif 'Queue' in file and 'LOG' in file:
                self.assertEqual(store_count, len(traj))
            elif 'LOG' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertGreaterEqual(count, 0)
                    self.assertGreaterEqual(error_count, 0)
                else:
                    self.assertEqual(count, 1)
                    self.assertEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertEqual(store_count, 1)
            elif 'ERROR' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 1)
                else:
                    self.assertEqual(count, 0)
                    self.assertEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertEqual(store_count, 1)
            else:
                self.assertTrue(False, 'There`s a file in the log folder that does not '
                                       'belong there: %s' % str(file))
        self.assertEqual(total_store_count, 2*len(traj))
        self.assertEqual(total_error_count, 2*len(traj))
        self.assertEqual(total_info_count, len(traj))
        self.assertLess(total_retry_count, len(traj))

    def test_file_renaming(self):
        traj_name = 'test'
        traj = Trajectory('test', add_time=False)
        traj.f_add_parameter('x', 42)
        traj.f_explore({'x': [1,2,3]})
        rename_string = '$traj_$set_$run'
        solution_1 = 'test_run_set_ALL_run_ALL'
        solution_2 = 'test_run_set_00000_run_00000000'
        renaming_1 = rename_log_file(rename_string, traj)
        self.assertEqual(renaming_1, solution_1)
        traj.v_idx = 0
        renaming_2 = rename_log_file(rename_string, traj)
        self.assertEqual(renaming_2, solution_2)


    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_old_way_creation_with_errors(self):
         # if not self.multiproc:
        #     return
        del self.mode.__dict__['log_config']
        self.make_env(logger_names = ('','pypet'), log_level=logging.ERROR,
                      log_folder=make_temp_dir('logs'))
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        if self.mode.multiproc:
            logging.getLogger('pypet.test').error('ttt')
        self.env.f_disable_logging()

        traj = self.env.v_traj
        log_path = get_log_path(traj)

        if self.mode.multiproc:
            if self.mode.use_pool:
                length = self.mode.ncores * 2
            else:
                length = 2 * len(traj)
            if self.mode.wrap_mode == 'LOCK':
                length += 2
            elif self.mode.wrap_mode == 'QUEUE':
                length += 4
            else:
                raise RuntimeError('You shall not pass!')
        else:
            length = 2

        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings

        total_error_count = 0
        total_store_count = 0
        total_info_count = 0
        total_retry_count = 0

        for file in file_list:
            with open(os.path.join(log_path, file), mode='r') as fh:
                text = fh.read()
            count = text.count('INFO_Test!')
            total_info_count += count
            error_count = text.count('ERROR_Test!')
            total_error_count += error_count
            store_count = text.count('STORE_Test!')
            total_store_count += store_count
            retry_count = text.count('Retry')
            total_retry_count += retry_count
            if 'LOG.txt' == file:
                if self.mode.multiproc:
                    self.assertEqual(count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(store_count, len(traj))
            elif 'ERROR.txt' == file:
                self.assertEqual(count, 0)
                if self.mode.multiproc:
                    self.assertEqual(error_count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertGreaterEqual(error_count, len(traj))
                    self.assertGreaterEqual(store_count, len(traj))

            elif 'Queue' in file and 'ERROR' in file:
                self.assertGreaterEqual(store_count, len(traj))
            elif 'Queue' in file and 'LOG' in file:
                self.assertGreaterEqual(store_count, len(traj))
            elif 'LOG' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 0)
                else:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertGreaterEqual(store_count, 1)
            elif 'ERROR' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 0)
                else:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertGreaterEqual(store_count, 1)
            else:
                self.assertTrue(False, 'There`s a file in the log folder that does not '
                                       'belong there: %s' % str(file))
        self.assertGreaterEqual(total_store_count, 2*len(traj))
        self.assertGreaterEqual(total_error_count, 2*len(traj))
        self.assertEqual(total_info_count, 0)
        self.assertLess(total_retry_count, len(traj))

    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_old_way_disabling_mp_log(self):
         # if not self.multiproc:
        #     return
        del self.mode.__dict__['log_config']
        self.make_env(logger_names = ('','pypet'), log_level=logging.ERROR,
                      log_folder=make_temp_dir('logs'), log_multiproc=False)
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        if self.mode.multiproc:
            logging.getLogger('pypet.test').error('ttt')
        self.env.f_disable_logging()

        traj = self.env.v_traj
        log_path = get_log_path(traj)

        # if self.mode.multiproc:
        length = 2

        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings

        # total_error_count = 0
        # total_store_count = 0
        # total_info_count = 0
        # total_retry_count = 0
        #
        # for file in file_list:
        #     with open(os.path.join(log_path, file), mode='r') as fh:
        #         text = fh.read()
        #     count = text.count('INFO_Test!')
        #     total_info_count += count
        #     error_count = text.count('ERROR_Test!')
        #     total_error_count += error_count
        #     store_count = text.count('STORE_Test!')
        #     total_store_count += store_count
        #     retry_count = text.count('Retry')
        #     total_retry_count += retry_count
        #     if 'LOG.txt' == file:
        #         if self.mode.multiproc:
        #             self.assertEqual(count,0)
        #             self.assertEqual(store_count, 0)
        #         else:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(store_count, len(traj))
        #     elif 'ERROR.txt' == file:
        #         self.assertEqual(count, 0)
        #         if self.mode.multiproc:
        #             self.assertEqual(error_count,0)
        #             self.assertEqual(store_count, 0)
        #         else:
        #             self.assertGreaterEqual(error_count, len(traj))
        #             self.assertGreaterEqual(store_count, len(traj))
        #
        #     elif 'Queue' in file and 'ERROR' in file:
        #         self.assertGreaterEqual(store_count, len(traj))
        #     elif 'Queue' in file and 'LOG' in file:
        #         self.assertGreaterEqual(store_count, len(traj))
        #     elif 'LOG' in file:
        #         if self.mode.multiproc and self.mode.use_pool:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 0)
        #         else:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 1)
        #             if self.mode.wrap_mode == 'QUEUE':
        #                 self.assertEqual(store_count, 0)
        #             else:
        #                 self.assertGreaterEqual(store_count, 1)
        #     elif 'ERROR' in file:
        #         if self.mode.multiproc and self.mode.use_pool:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 0)
        #         else:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 1)
        #             if self.mode.wrap_mode == 'QUEUE':
        #                 self.assertEqual(store_count, 0)
        #             else:
        #                 self.assertGreaterEqual(store_count, 1)
        #     else:
        #         self.assertTrue(False, 'There`s a file in the log folder that does not '
        #                                'belong there: %s' % str(file))
        # self.assertGreaterEqual(total_store_count, 2*len(traj))
        # self.assertGreaterEqual(total_error_count, 2*len(traj))
        # self.assertEqual(total_info_count, 0)
        # self.assertLess(total_retry_count, len(traj))

    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logging_stdout(self):
        filename = 'teststdoutlog.hdf5'
        filename = make_temp_dir(filename)
        folder = make_temp_dir('logs')
        env = Environment(trajectory=make_trajectory_name(self),
                          filename=filename, log_config=get_log_config(),
                          # log_levels=logging.CRITICAL, # needed for the test
                          log_stdout=('STDOUT', 50), #log_folder=folder
                          )

        env.f_run(log_error)
        traj = env.v_traj
        path = get_log_path(traj)

        mainstr = 'sTdOuTLoGGinG'
        print(mainstr)
        env.f_disable_logging()

        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        self.assertTrue(mainstr in full_text)
        self.assertTrue('4444444' not in full_text)
        self.assertTrue('DEBUG' not in full_text)


    def test_logging_show_progress(self):
        self.make_env(log_config=get_log_config(),
                      # log_folder=make_temp_dir('logs'),
                      # log_levels=logging.ERROR,
                      report_progress=(3, 'progress', 40))
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        self.env.f_disable_logging()

        traj = self.env.v_traj

        path = get_log_path(traj)
        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        progress = 'PROGRESS: Finished'
        self.assertTrue(progress in full_text)
        bar = '[=='
        self.assertTrue(bar in full_text)


    def test_logging_show_progress_print(self):
        self.make_env(log_config=get_log_config(), log_stdout=('prostdout', 50),
                      report_progress=(3, 'print'))
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        self.env.f_disable_logging()

        path = get_log_path(self.env.v_traj)
        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        progress = 'prostdout CRITICAL PROGRESS: Finished'
        self.assertTrue(progress in full_text)
        bar = '[=='
        self.assertIn(bar, full_text)
from pypet import Environment, cartesian_product


# Let's reuse the simple multiplication example
def multiply(traj):
    """Sophisticated simulation of multiplication"""
    z=traj.x*traj.y
    traj.f_add_result('z',z=z, comment='I am the product of two reals!',)



# Create 2 environments that handle running
filename = os.path.join('hdf5', 'example_03.hdf5')
env1 = Environment(trajectory='Traj1',
                   filename=filename,
                   file_title='Example_03',
                   add_time=True,  # Add the time of trajectory creation to its name
                   comment='I will be increased!')

env2 = Environment(trajectory='Traj2',
                   filename=filename,
                   file_title='Example_03', log_config=None, # One environment keeping log files
                   # is enough
                   add_time=True,
                   comment = 'I am going to be merged into some other trajectory!')

# Get the trajectories from the environment
traj1 = env1.trajectory
traj2 = env2.trajectory

# Add both parameters
def main():

    filename = os.path.join('hdf5', 'example_06.hdf5')
    env = Environment(trajectory='Example_06_Euler_Integration',
                      filename=filename,
                      file_title='Example_06_Euler_Integration',
                      comment = 'Go for Euler!')


    traj = env.v_trajectory

    # 1st a) phase parameter addition
    # Remember we have some control flow in the `add_parameters` function, the default parameter
    # set we choose is the `'diff_lorenz'` one, but we want to deviate from that and use the
    # `'diff_roessler'`.
    # In order to do that we can preset the corresponding name parameter to change the
    # control flow:
    traj.f_preset_parameter('diff_name', 'diff_roessler') # If you erase this line, you will get
                                                          # again the lorenz attractor
    add_parameters(traj)

    # 1st b) phase preparation
    # Let's check which function we want to use
    if traj.diff_name=='diff_lorenz':
        diff_eq = diff_lorenz
    elif traj.diff_name=='diff_roessler':
        diff_eq = diff_roessler
    else:
        raise ValueError('I don\'t know what %s is.' % traj.diff_name)
    # And add the source code of the function as a derived parameter.
    traj.f_add_derived_parameter(FunctionParameter, 'diff_eq', diff_eq,
                                     comment='Source code of our equation!')

    # We want to explore some initial conditions
    traj.f_explore({'initial_conditions' : [
        np.array([0.01,0.01,0.01]),
        np.array([2.02,0.02,0.02]),
        np.array([42.0,4.2,0.42])
    ]})
    # 3 different conditions are enough for now

    # 2nd phase let's run the experiment
    # We pass 'euler_scheme' as our top-level simulation function and
    # the Roessler function as an additional argument
    env.f_run(euler_scheme, diff_eq)

    # Again no post-processing

    # 4th phase analysis.
    # I would recommend to do the analysis completely independent from the simulation
    # but for simplicity let's do it here.
    # We won't reload the trajectory this time but simply update the skeleton
    traj.f_load_skeleton()

    #For the fun of it, let's print the source code
    print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)

    # Let's get the exploration array:
    initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
    # Now let's plot our simulated equations for the different initial conditions.
    # We will iterate through the run names
    for idx, run_name in enumerate(traj.f_get_run_names()):

        # Get the result of run idx from the trajectory
        euler_result = traj.results.f_get(run_name).euler_evolution
        # Now we manually need to load the result. Actually the results are not so large and we
        # could load them all at once, but for demonstration we do as if they were huge:
        traj.f_load_item(euler_result)
        euler_data = euler_result.data

        # Plot fancy 3d plot
        fig = plt.figure(idx)
        ax = fig.gca(projection='3d')
        x = euler_data[:,0]
        y = euler_data[:,1]
        z = euler_data[:,2]
        ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
        plt.legend()
        plt.show()

        # Now we free the data again (because we assume its huuuuuuge):
        del euler_data
        euler_result.f_empty()

    # Finally disable logging and close all log-files
    env.f_disable_logging()
# Let's reuse the simple multiplication example
def multiply(traj):
    """Sophisticated simulation of multiplication"""
    z=traj.x*traj.y
    traj.f_add_result('z',z=z, comment='I am the product of two reals!')



# Create an environment that handles running.
# Let's enable multiprocessing with 2 workers.
env = Environment(trajectory='Example_04_MP',
                  filename='experiments/example_04/HDF5/example_04.hdf5',
                  file_title='Example_04_MP',
                  log_folder='experiments/example_04/LOGS/',
                  comment = 'Multiprocessing example!',
                  multiproc=True,
                  ncores=2,
                  use_pool=True,
                  wrap_mode=pypetconstants.WRAP_MODE_LOCK)

# Get the trajectory from the environment
traj = env.v_trajectory

# Add both parameters
traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

# Explore the parameters with a cartesian product, but we want to explore a bit more
traj.f_explore(cartesian_product({'x':[float(x) for x in range(15)],
                                  'y':[float(y) for y in range(15)]}))
import os # For path names working under Windows ans Linux

from pypet import Environment, cartesian_product
from pypet import pypetconstants


def multiply(traj):
    """Sophisticated simulation of multiplication"""
    z=traj.x * traj.y
    traj.f_add_result('z', z, comment='I am the product of two reals!')


# Create an environment that handles running
filename = os.path.join('hdf5', 'example_10.hdf5')
env = Environment(trajectory='Example10', filename=filename,
                  file_title='Example10',
                  comment='Another example!')

# Get the trajectory from the environment
traj = env.v_trajectory

# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')

# Explore the parameters with a cartesian product:
x_length = 12
y_length = 12
traj.f_explore(cartesian_product({'x': range(x_length), 'y': range(y_length)}))

# Run the simulation
        the parameters in a particular combination,
        it also serves as a container for results.


    """
    z=traj.x*traj.y
    result_list[traj.v_idx] = z



# Create an environment that handles running
env = Environment(trajectory='Multiplication',
                  filename='experiments/example_01/HDF5/example_01.hdf5',
                  file_title='Example_01_First_Steps',
                  log_folder='experiments/example_01/LOGS/',
                  comment='The first example!',
                  continuable=False, # We have shared data in terms of a multiprocessing list,
                  # so we CANNOT use the continue feature.
                  multiproc=True,
                  ncores=2)

# The environment has created a trajectory container for us
traj = env.v_trajectory

# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')

# Explore the parameters with a cartesian product
traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))
def main():

    filename = os.path.join('hdf5', 'example_05.hdf5')
    env = Environment(trajectory='Example_05_Euler_Integration',
                      filename=filename,
                      file_title='Example_05_Euler_Integration',
                      comment='Go for Euler!')


    traj = env.v_trajectory
    trajectory_name = traj.v_name

    # 1st a) phase parameter addition
    add_parameters(traj)

    # 1st b) phase preparation
    # We will add the differential equation (well, its source code only) as a derived parameter
    traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
                                 comment='Source code of our equation!')

    # We want to explore some initial conditions
    traj.f_explore({'initial_conditions' : [
        np.array([0.01,0.01,0.01]),
        np.array([2.02,0.02,0.02]),
        np.array([42.0,4.2,0.42])
    ]})
    # 3 different conditions are enough for an illustrative example

    # 2nd phase let's run the experiment
    # We pass `euler_scheme` as our top-level simulation function and
    # the Lorenz equation 'diff_lorenz' as an additional argument
    env.f_run(euler_scheme, diff_lorenz)

    # We don't have a 3rd phase of post-processing here

    # 4th phase analysis.
    # I would recommend to do post-processing completely independent from the simulation,
    # but for simplicity let's do it here.

    # Let's assume that we start all over again and load the entire trajectory new.
    # Yet, there is an error within this approach, do you spot it?
    del traj
    traj = Trajectory(filename=filename)

    # We will only fully load parameters and derived parameters.
    # Results will be loaded manually later on.
    try:
        # However, this will fail because our trajectory does not know how to
        # build the FunctionParameter. You have seen this coming, right?
        traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
                    load_results=1)
    except ImportError as e:

        print('That did\'nt work, I am sorry: %s ' % str(e))

        # Ok, let's try again but this time with adding our parameter to the imports
        traj = Trajectory(filename=filename,
                           dynamically_imported_classes=FunctionParameter)

        # Now it works:
        traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
                    load_results=1)


    #For the fun of it, let's print the source code
    print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)

    # Let's get the exploration array:
    initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
    # Now let's plot our simulated equations for the different initial conditions:
    # We will iterate through the run names
    for idx, run_name in enumerate(traj.f_get_run_names()):

        #Get the result of run idx from the trajectory
        euler_result = traj.results.f_get(run_name).euler_evolution
        # Now we manually need to load the result. Actually the results are not so large and we
        # could load them all at once. But for demonstration we do as if they were huge:
        traj.f_load_item(euler_result)
        euler_data = euler_result.data

        #Plot fancy 3d plot
        fig = plt.figure(idx)
        ax = fig.gca(projection='3d')
        x = euler_data[:,0]
        y = euler_data[:,1]
        z = euler_data[:,2]
        ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
        plt.legend()
        plt.show()

        # Now we free the data again (because we assume its huuuuuuge):
        del euler_data
        euler_result.f_empty()

    # You have to click through the images to stop the example_05 module!

    # Finally disable logging and close all log-files
    env.f_disable_logging()
示例#57
0
        Trajectory containing
        the parameters in a particular combination,
        it also serves as a container for results.

    """
    z=traj.mylink1*traj.mylink2 # And again we now can also use the different names
    # due to the creation of links
    traj.res = Result('runs.$.z', z, 'Result of our simulation!')


# Create an environment that handles running
filename = os.path.join('hdf5','example_14.hdf5')
env = Environment(trajectory='Multiplication',
                  filename=filename,
                  file_title='Example_14_Links',
                  overwrite_file=True,
                  comment='How to use links')

# The environment has created a trajectory container for us
traj = env.trajectory

# Add both parameters
traj.v_lazy_adding = True
traj.par.x = 1, 'I am the first dimension!'
traj.par.y = 1, 'I am the second dimension!'

# Explore just two points
traj.f_explore({'x': [3, 4]})

# So far everything was as in the first example. However now we add links:
    traj.f_add_parameter('location', 'mars', comment='location determining g-force')

def run_simulation(traj):
    sim = FanSimulator(traj.N, vent_radius=traj.vent_radius, vmax=traj.vmax,
                       dt=traj.dt, location=traj.location)
    sim.init_positions()
    sim.init_velocities()
    sim.incline_and_vary_jet(incline=traj.incline, jitter=traj.jitter)
    sim.loop()
    sim.plot(save=True, equal=False)
    traj.f_add_result('positions', sim.positions, comment='End positions of particles')
    traj.f_add_result('t', sim.t, comment='duration of flight')

env = Environment(trajectory='FanSimulation', filename='./pypet/',
                  large_overview_tables=True,
                  add_time=True,
                  multiproc=False,
                  ncores=6,
                  log_config='DEFAULT')

traj = env.v_trajectory

add_parameters(traj, dt=1e-2)

explore_dict = {'vent_radius':[0.1, 0.5, 1.0],
                'vmax':[10, 50, 100],
                'incline':[0.1, 1.0, 5.0]}

to_explore = cartesian_product(explore_dict)
traj.f_explore(to_explore)

env.f_run(run_simulation)