示例#1
0
    def test_without_pre_run(self):
        runner = NetworkRunner()
        components = [TheNeurons(), TheConnection(), TheSimulation()]
        analyser = [TheMonitors()]
        nm = NetworkManager(network_runner=runner, component_list=components,
                            analyser_list=analyser)


        env = Environment(trajectory='Test_'+repr(time.time()).replace('.','_'),
                          filename=make_temp_dir(os.path.join(
                              'experiments',
                              'tests',
                              'briantests',
                              'HDF5',
                               'briantest.hdf5')),
                          file_title='test',
                          log_config=get_log_config(),
                          dynamic_imports=['pypet.brian2.parameter.BrianParameter',
                                                        BrianMonitorResult, BrianResult],
                          multiproc=True,
                          ncores=2)
        traj = env.v_traj
        nm.add_parameters(traj)
        traj.f_explore({'v01': [11*mV, 13*mV]})
        env.f_run(nm.run_network)
        self.check_data(traj)
示例#2
0
    def test_logging_stdout(self):
        filename = 'teststdoutlog.hdf5'
        filename = make_temp_dir(filename)
        folder = make_temp_dir('logs')
        env = Environment(trajectory=make_trajectory_name(self),
                          filename=filename, log_config=get_log_config(),
                          # log_levels=logging.CRITICAL, # needed for the test
                          log_stdout=('STDOUT', 50), #log_folder=folder
                          )

        env.f_run(log_error)
        traj = env.v_traj
        path = get_log_path(traj)

        mainstr = 'sTdOuTLoGGinG'
        print(mainstr)
        env.f_disable_logging()

        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        self.assertTrue(mainstr in full_text)
        self.assertTrue('4444444' not in full_text)
        self.assertTrue('DEBUG' not in full_text)
示例#3
0
def main():

    filename = os.path.join('hdf5', 'FiringRate.hdf5')
    env = Environment(trajectory='FiringRate',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_stdout=True,
                      log_config='DEFAULT',
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    traj = env.v_trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.f_add_postprocessing(neuron_postproc)

    # Run the experiment
    env.f_run(run_neuron)

    # Finally disable logging and close all log-files
    env.f_disable_logging()
示例#4
0
    def test_without_pre_run(self):
        runner = NetworkRunner()
        components = [TheNeurons(), TheConnection(), TheSimulation()]
        analyser = [TheMonitors()]
        nm = NetworkManager(network_runner=runner,
                            component_list=components,
                            analyser_list=analyser)

        env = Environment(
            trajectory='Test_' + repr(time.time()).replace('.', '_'),
            filename=make_temp_dir(
                os.path.join('experiments', 'tests', 'briantests', 'HDF5',
                             'briantest.hdf5')),
            file_title='test',
            log_config=get_log_config(),
            dynamic_imports=[
                'pypet.brian2.parameter.BrianParameter', BrianMonitorResult,
                BrianResult
            ],
            multiproc=True,
            ncores=2)
        traj = env.v_traj
        nm.add_parameters(traj)
        traj.f_explore({'v01': [11 * mV, 13 * mV]})
        env.f_run(nm.run_network)
        self.check_data(traj)
示例#5
0
    def test_logging_stdout(self):
        filename = 'teststdoutlog.hdf5'
        filename = make_temp_dir(filename)
        folder = make_temp_dir('logs')
        env = Environment(trajectory=make_trajectory_name(self),
                          filename=filename, log_config=get_log_config(),
                          # log_levels=logging.CRITICAL, # needed for the test
                          log_stdout=('STDOUT', 50), #log_folder=folder
                          )

        env.f_run(log_error)
        traj = env.v_traj
        path = get_log_path(traj)

        mainstr = 'sTdOuTLoGGinG'
        print(mainstr)
        env.f_disable_logging()

        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        self.assertTrue(mainstr in full_text)
        self.assertTrue('4444444' not in full_text)
        self.assertTrue('DEBUG' not in full_text)
示例#6
0
文件: main.py 项目: lsolanka/pypet
def main():

    env = Environment(trajectory='FiringRate',
                      comment='Experiment to measure the firing rate '
                            'of a leaky integrate and fire neuron. '
                            'Exploring different input currents, '
                            'as well as refractory periods',
                      add_time=False, # We don't want to add the current time to the name,
                      log_folder='./logs/',
                      log_level=logging.INFO,
                      log_stdout=True,
                      multiproc=True,
                      ncores=2, #My laptop has 2 cores ;-)
                      wrap_mode='QUEUE',
                      filename='./hdf5/', # We only pass a folder here, so the name is chosen
                      # automatically to be the same as the Trajectory
                      )

    traj = env.v_trajectory

    # Add parameters
    add_parameters(traj)

    # Let's explore
    add_exploration(traj)

    # Ad the postprocessing function
    env.f_add_postprocessing(neuron_postproc)

    # Run the experiment
    env.f_run(run_neuron)
示例#7
0
def main():
    """ Main *boilerplate* function to start simulation """
    # Now let's make use of logging
    logger = logging.getLogger()

    # Create folders for data and plots
    folder = os.path.join(os.getcwd(), 'experiments', 'ca_patterns_pypet')
    if not os.path.isdir(folder):
        os.makedirs(folder)
    filename = os.path.join(folder, 'all_patterns.hdf5')

    # Create an environment
    env = Environment(trajectory='cellular_automata',
                      multiproc=True,
                      ncores=4,
                      wrap_mode='QUEUE',
                      filename=filename,
                      overwrite_file=True)

    # extract the trajectory
    traj = env.v_traj

    traj.v_lazy_adding = True
    traj.par.ncells = 400, 'Number of cells'
    traj.par.steps = 250, 'Number of timesteps'
    traj.par.rule_number = 30, 'The ca rule'
    traj.par.initial_name = 'random', 'The type of initial state'
    traj.par.seed = 100042, 'RNG Seed'


    # Explore
    exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
                'initial_name' : ['single', 'random'],}
    # # You can uncomment the ``exp_dict`` below to see that changing the
    # # exploration scheme is now really easy:
    # exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
    #             'ncells' : [100, 200, 300],
    #             'seed': [333444555, 123456]}
    exp_dict = cartesian_product(exp_dict)
    traj.f_explore(exp_dict)

    # Run the simulation
    logger.info('Starting Simulation')
    env.f_run(wrap_automaton)

    # Load all data
    traj.f_load(load_data=2)

    logger.info('Printing data')
    for idx, run_name in enumerate(traj.f_iter_runs()):
        # Plot all patterns
        filename = os.path.join(folder, make_filename(traj))
        plot_pattern(traj.crun.pattern, traj.rule_number, filename)
        progressbar(idx, len(traj), logger=logger)

    # Finally disable logging and close all log-files
    env.f_disable_logging()
示例#8
0
def test_run():

    global filename

    np.random.seed()
    trajname = 'profiling'
    filename = make_temp_dir(os.path.join('hdf5', 'test%s.hdf5' % trajname))

    env = Environment(trajectory=trajname,
                      filename=filename,
                      file_title=trajname,
                      log_stdout=False,
                      results_per_run=5,
                      derived_parameters_per_run=5,
                      multiproc=False,
                      ncores=1,
                      wrap_mode='LOCK',
                      use_pool=False,
                      overwrite_file=True)

    traj = env.v_trajectory

    traj.v_standard_parameter = Parameter

    ## Create some parameters
    param_dict = {}
    create_param_dict(param_dict)
    ### Add some parameter:
    add_params(traj, param_dict)

    #remember the trajectory and the environment
    traj = traj
    env = env

    traj.f_add_parameter('TEST', 'test_run')
    ###Explore
    explore(traj)

    ### Make a test run
    simple_arg = -13
    simple_kwarg = 13.0
    env.f_run(simple_calculations, simple_arg, simple_kwarg=simple_kwarg)

    size = os.path.getsize(filename)
    size_in_mb = size / 1000000.
    print('Size is %sMB' % str(size_in_mb))
示例#9
0
def test_run():

    global filename


    np.random.seed()
    trajname = 'profiling'
    filename = make_temp_dir(os.path.join('hdf5', 'test%s.hdf5' % trajname))

    env = Environment(trajectory=trajname, filename=filename,
                      file_title=trajname,
                      log_stdout=False,
                      results_per_run=5,
                      derived_parameters_per_run=5,
                      multiproc=False,
                      ncores=1,
                      wrap_mode='LOCK',
                      use_pool=False,
                      overwrite_file=True)

    traj = env.v_trajectory

    traj.v_standard_parameter=Parameter

    ## Create some parameters
    param_dict={}
    create_param_dict(param_dict)
    ### Add some parameter:
    add_params(traj,param_dict)

    #remember the trajectory and the environment
    traj = traj
    env = env

    traj.f_add_parameter('TEST', 'test_run')
    ###Explore
    explore(traj)

    ### Make a test run
    simple_arg = -13
    simple_kwarg= 13.0
    env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg)

    size=os.path.getsize(filename)
    size_in_mb = size/1000000.
    print('Size is %sMB' % str(size_in_mb))
 def test_net(self):
     env = Environment(trajectory='Test_'+repr(time.time()).replace('.','_'),
                       filename=make_temp_dir(os.path.join(
                           'experiments',
                           'tests',
                           'briantests',
                           'HDF5',
                            'briantest.hdf5')),
                       file_title='test',
                       log_config=get_log_config(),
                       dynamic_imports=['pypet.brian2.parameter.Brian2Parameter',
                                                     Brian2MonitorResult],
                       multiproc=False)
     traj = env.v_traj
     traj.f_add_parameter(Brian2Parameter, 'v0', 0.0*mV,
                          comment='Input bias')
     traj.f_explore({'v0': [11*mV, 13*mV, 15*mV]})
     env.f_run(run_network)
     self.get_data(traj)
示例#11
0
 def test_net(self):
     env = Environment(
         trajectory='Test_' + repr(time.time()).replace('.', '_'),
         filename=make_temp_dir(
             os.path.join('experiments', 'tests', 'briantests', 'HDF5',
                          'briantest.hdf5')),
         file_title='test',
         log_config=get_log_config(),
         dynamic_imports=[
             'pypet.brian2.parameter.Brian2Parameter', Brian2MonitorResult
         ],
         multiproc=False)
     traj = env.v_traj
     traj.f_add_parameter(Brian2Parameter,
                          'v0',
                          0.0 * mV,
                          comment='Input bias')
     traj.f_explore({'v0': [11 * mV, 13 * mV, 15 * mV]})
     env.f_run(run_network)
     self.get_data(traj)
def main():
    """Main function to protect the *entry point* of the program.

    If you want to use multiprocessing under Windows you need to wrap your
    main code creating an environment into a function. Otherwise
    the newly started child processes will re-execute the code and throw
    errors (also see https://docs.python.org/2/library/multiprocessing.html#windows).

    """

    # Create an environment that handles running.
    # Let's enable multiprocessing with 2 workers.
    filename = os.path.join('hdf5', 'example_04.hdf5')
    env = Environment(trajectory='Example_04_MP',
                      filename=filename,
                      file_title='Example_04_MP',
                      log_stdout=True,
                      comment='Multiprocessing example!',
                      multiproc=True,
                      ncores=4,
                      use_pool=True,  # Our runs are inexpensive we can get rid of overhead
                      # by using a pool
                      wrap_mode=pypetconstants.WRAP_MODE_QUEUE)

    # Get the trajectory from the environment
    traj = env.v_trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product, but we want to explore a bit more
    traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)],
                                      'y':[float(y) for y in range(12)]}))

    # Run the simulation
    env.f_run(multiply)

    # Finally disable logging and close all log-files
    env.f_disable_logging()
def main():
    # Create an environment that handles running
    filename = os.path.join('hdf5', 'example_12.hdf5')
    env = Environment(trajectory='Multiplication',
                      filename=filename,
                      file_title='Example_12_Sharing_Data',
                      comment='The first example!',
                      continuable=False, # We have shared data in terms of a multiprocessing list,
                      # so we CANNOT use the continue feature.
                      multiproc=True,
                      ncores=2)

    # The environment has created a trajectory container for us
    traj = env.v_trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product
    traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))

    # We want a shared list where we can put all out results in. We use a manager for this:
    result_list = mp.Manager().list()
    # Let's make some space for potential results
    result_list[:] =[0 for _dummy in range(len(traj))]

    # Run the simulation
    env.f_run(multiply, result_list)

    # Now we want to store the final list as numpy array
    traj.f_add_result('z', np.array(result_list))

    # Finally let's print the result to see that it worked
    print(traj.z)

    #Disable logging and close all log-files
    env.f_disable_logging()
traj2 = env2.v_trajectory

# Add both parameters
traj1.f_add_parameter("x", 1.0, comment="I am the first dimension!")
traj1.f_add_parameter("y", 1.0, comment="I am the second dimension!")
traj2.f_add_parameter("x", 1.0, comment="I am the first dimension!")
traj2.f_add_parameter("y", 1.0, comment="I am the second dimension!")

# Explore the parameters with a cartesian product for the first trajectory:
traj1.f_explore(cartesian_product({"x": [1.0, 2.0, 3.0, 4.0], "y": [6.0, 7.0, 8.0]}))
# Let's explore slightly differently for the second:
traj2.f_explore(cartesian_product({"x": [3.0, 4.0, 5.0, 6.0], "y": [7.0, 8.0, 9.0]}))


# Run the simulations with all parameter combinations
env1.f_run(multiply)
env2.f_run(multiply)

# Now we merge them together into traj1
# We want to remove duplicate entries
# like the parameter space point x=3.0, y=7.0.
# Several points have been explored by both trajectories and we need them only once.
# Therefore, we set remove_duplicates=True (Note this takes O(N1*N2)!).
# We also want to backup both trajectories, but we let the system choose the filename.
# Accordingly we choose backup_filename=True instead of providing a filename.
# We want to move the hdf5 nodes from one trajectory to the other.
# Thus we set move_nodes=True.
# Finally,we want to delete the other trajectory afterwards since we already have a backup.
traj1.f_merge(traj2, remove_duplicates=True, backup_filename=True, move_data=True, delete_other_trajectory=True)

# And that's it, now we can take a look at the new trajectory and print all x,y,z triplets.
示例#15
0
class LoggingTest(TrajectoryComparator):

    tags = 'integration', 'environment', 'logging'

    def setUp(self):
        root = logging.getLogger()
        for logger in itools.chain(root.manager.loggerDict.values(), [root]):
            if hasattr(logger, 'handlers'):
                for handler in logger.handlers:
                    if hasattr(handler, 'flush'):
                        handler.flush()
                    if hasattr(handler, 'close'):
                        handler.close()
                logger.handlers = []
            if hasattr(logger, 'setLevel'):
                logger.setLevel(logging.NOTSET)
        self.set_mode()

    def tearDown(self):
        super(LoggingTest, self).tearDown()

    def set_mode(self):
        self.mode = Dummy()
        self.mode.wrap_mode = 'LOCK'
        self.mode.multiproc = False
        self.mode.ncores = 1
        self.mode.use_pool=True
        self.mode.pandas_format='fixed'
        self.mode.pandas_append=False
        self.mode.complib = 'blosc'
        self.mode.complevel=9
        self.mode.shuffle=True
        self.mode.fletcher32 = False
        self.mode.encoding = 'utf8'
        self.mode.log_stdout=False
        self.mode.log_config=get_log_config()


    def make_env(self, **kwargs):

        self.mode.__dict__.update(kwargs)
        filename = 'log_testing.hdf5'
        self.filename = make_temp_dir(filename)
        self.traj_name = make_trajectory_name(self)
        self.env = Environment(trajectory=self.traj_name,
                               filename=self.filename, **self.mode.__dict__)
        self.traj = self.env.v_traj


    def add_params(self, traj):

        traj.v_lazy_adding = True
        traj.par.p1 = 42, 'Hey'
        traj.f_apar('g1.p2', 145, comment='Test')


    def explore(self, traj):
        traj.f_explore({'p1': range(7)})

    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_creation_normal(self):
        # if not self.multiproc:
        #     return
        self.make_env()
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_wo_error_levels)
        self.env.f_disable_logging()

        traj = self.env.v_traj

        log_path = get_log_path(traj)

        if self.mode.multiproc:
            if self.mode.use_pool:
                length = self.mode.ncores * 2
            else:
                length = 2 * len(traj)
            if self.mode.wrap_mode == 'LOCK':
                length += 2
            elif self.mode.wrap_mode == 'QUEUE':
                length += 4
            else:
                raise RuntimeError('You shall not pass!')
        else:
            length = 2


        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings
        total_error_count = 0
        total_store_count = 0
        total_info_count = 0
        total_retry_count = 0
        for file in file_list:
            with open(os.path.join(log_path, file), mode='r') as fh:
                text = fh.read()
            count = text.count('INFO_Test!')
            total_info_count += count
            error_count = text.count('ERROR_Test!')
            total_error_count += error_count
            store_count = text.count('STORE_Test!')
            total_store_count += store_count
            retry_count = text.count('Retry')
            total_retry_count += retry_count
            if 'LOG.txt' == file:
                if self.mode.multiproc:
                    self.assertEqual(count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(count, len(traj))
                    self.assertEqual(store_count, len(traj))
            elif 'ERROR' in file:
                full_path = os.path.join(log_path, file)
                filesize = os.path.getsize(full_path)
                with open(full_path) as fh:
                    text = fh.read()
                if 'Retry' not in text:
                    self.assertEqual(filesize, 0)
            elif 'Queue' in file:
                self.assertEqual(store_count, len(traj))
            elif 'LOG' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertGreaterEqual(count, 0, '%d < 1 for file %s' % (count, file))
                else:
                    self.assertEqual(count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertEqual(store_count, 1)
            else:
                self.assertTrue(False, 'There`s a file in the log folder that does not '
                                       'belong there: %s' % str(file))
        self.assertEqual(total_store_count, len(traj))
        self.assertEqual(total_error_count, 0)
        self.assertEqual(total_info_count, len(traj))
        self.assertLess(total_retry_count, len(traj))

    def test_throw_error_when_specifying_config_and_old_method(self):
        with self.assertRaises(ValueError):
            self.make_env(log_config=None, logger_names='test')

    def test_disable(self):
        # if not self.multiproc:
        #     return
        self.make_env(log_config=None)
        traj = self.env.v_traj

        log_path = get_log_path(traj)

        self.assertFalse(os.path.isdir(log_path))
        self.assertTrue(self.env._logging_manager._sp_config is None)
        self.assertTrue(self.env._logging_manager._mp_config is None)
        self.assertTrue(self.env._logging_manager.log_config is None)

        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)

        self.assertFalse(os.path.isdir(log_path))
        self.assertTrue(self.env._logging_manager._sp_config is None)
        self.assertTrue(self.env._logging_manager._mp_config is None)
        self.assertTrue(self.env._logging_manager.log_config is None)

        self.env.f_disable_logging()
        # pypet_path = os.path.abspath(os.path.dirname(pypet.pypetlogging))
        # init_path = os.path.join(pypet_path, 'logging')
        # log_config = os.path.join(init_path, 'default.ini')


    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_creation_with_errors(self):
         # if not self.multiproc:
        #     return
        self.make_env()
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        if self.mode.multiproc:
            logging.getLogger('pypet.test').error('ttt')
        self.env.f_disable_logging()

        traj = self.env.v_traj
        log_path = get_log_path(traj)

        if self.mode.multiproc:
            if self.mode.use_pool:
                length = self.mode.ncores * 2
            else:
                length = 2 * len(traj)
            if self.mode.wrap_mode == 'LOCK':
                length += 2
            elif self.mode.wrap_mode == 'QUEUE':
                length += + 4
            else:
                raise RuntimeError('You shall not pass!')
        else:
            length = 2

        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings

        total_error_count = 0
        total_store_count = 0
        total_info_count = 0
        total_retry_count = 0
        for file in file_list:
            with open(os.path.join(log_path, file), mode='r') as fh:
                text = fh.read()
            count = text.count('INFO_Test!')
            total_info_count += count
            error_count = text.count('ERROR_Test!')
            total_error_count += error_count
            store_count = text.count('STORE_Test!')
            total_store_count += store_count
            retry_count = text.count('Retry')
            total_retry_count += retry_count
            if 'LOG.txt' == file:
                if self.mode.multiproc:
                    self.assertEqual(count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(count, len(traj))
                    self.assertEqual(store_count, len(traj))
            elif 'ERROR.txt' == file:
                self.assertEqual(count, 0)
                if self.mode.multiproc:
                    self.assertEqual(error_count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(error_count, len(traj))
                    self.assertEqual(store_count, len(traj))

            elif 'Queue' in file and 'ERROR' in file:
                self.assertEqual(store_count, len(traj))
            elif 'Queue' in file and 'LOG' in file:
                self.assertEqual(store_count, len(traj))
            elif 'LOG' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertGreaterEqual(count, 0)
                    self.assertGreaterEqual(error_count, 0)
                else:
                    self.assertEqual(count, 1)
                    self.assertEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertEqual(store_count, 1)
            elif 'ERROR' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 1)
                else:
                    self.assertEqual(count, 0)
                    self.assertEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertEqual(store_count, 1)
            else:
                self.assertTrue(False, 'There`s a file in the log folder that does not '
                                       'belong there: %s' % str(file))
        self.assertEqual(total_store_count, 2*len(traj))
        self.assertEqual(total_error_count, 2*len(traj))
        self.assertEqual(total_info_count, len(traj))
        self.assertLess(total_retry_count, len(traj))

    def test_file_renaming(self):
        traj_name = 'test'
        traj = Trajectory('test', add_time=False)
        traj.f_add_parameter('x', 42)
        traj.f_explore({'x': [1,2,3]})
        rename_string = '$traj_$set_$run'
        solution_1 = 'test_run_set_ALL_run_ALL'
        solution_2 = 'test_run_set_00000_run_00000000'
        renaming_1 = rename_log_file(rename_string, traj)
        self.assertEqual(renaming_1, solution_1)
        traj.v_idx = 0
        renaming_2 = rename_log_file(rename_string, traj)
        self.assertEqual(renaming_2, solution_2)


    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_old_way_creation_with_errors(self):
         # if not self.multiproc:
        #     return
        del self.mode.__dict__['log_config']
        self.make_env(logger_names = ('','pypet'), log_level=logging.ERROR,
                      log_folder=make_temp_dir('logs'))
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        if self.mode.multiproc:
            logging.getLogger('pypet.test').error('ttt')
        self.env.f_disable_logging()

        traj = self.env.v_traj
        log_path = get_log_path(traj)

        if self.mode.multiproc:
            if self.mode.use_pool:
                length = self.mode.ncores * 2
            else:
                length = 2 * len(traj)
            if self.mode.wrap_mode == 'LOCK':
                length += 2
            elif self.mode.wrap_mode == 'QUEUE':
                length += 4
            else:
                raise RuntimeError('You shall not pass!')
        else:
            length = 2

        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings

        total_error_count = 0
        total_store_count = 0
        total_info_count = 0
        total_retry_count = 0

        for file in file_list:
            with open(os.path.join(log_path, file), mode='r') as fh:
                text = fh.read()
            count = text.count('INFO_Test!')
            total_info_count += count
            error_count = text.count('ERROR_Test!')
            total_error_count += error_count
            store_count = text.count('STORE_Test!')
            total_store_count += store_count
            retry_count = text.count('Retry')
            total_retry_count += retry_count
            if 'LOG.txt' == file:
                if self.mode.multiproc:
                    self.assertEqual(count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(store_count, len(traj))
            elif 'ERROR.txt' == file:
                self.assertEqual(count, 0)
                if self.mode.multiproc:
                    self.assertEqual(error_count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertGreaterEqual(error_count, len(traj))
                    self.assertGreaterEqual(store_count, len(traj))

            elif 'Queue' in file and 'ERROR' in file:
                self.assertGreaterEqual(store_count, len(traj))
            elif 'Queue' in file and 'LOG' in file:
                self.assertGreaterEqual(store_count, len(traj))
            elif 'LOG' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 0)
                else:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertGreaterEqual(store_count, 1)
            elif 'ERROR' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 0)
                else:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertGreaterEqual(store_count, 1)
            else:
                self.assertTrue(False, 'There`s a file in the log folder that does not '
                                       'belong there: %s' % str(file))
        self.assertGreaterEqual(total_store_count, 2*len(traj))
        self.assertGreaterEqual(total_error_count, 2*len(traj))
        self.assertEqual(total_info_count, 0)
        self.assertLess(total_retry_count, len(traj))

    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_old_way_disabling_mp_log(self):
         # if not self.multiproc:
        #     return
        del self.mode.__dict__['log_config']
        self.make_env(logger_names = ('','pypet'), log_level=logging.ERROR,
                      log_folder=make_temp_dir('logs'), log_multiproc=False)
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        if self.mode.multiproc:
            logging.getLogger('pypet.test').error('ttt')
        self.env.f_disable_logging()

        traj = self.env.v_traj
        log_path = get_log_path(traj)

        # if self.mode.multiproc:
        length = 2

        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings

        # total_error_count = 0
        # total_store_count = 0
        # total_info_count = 0
        # total_retry_count = 0
        #
        # for file in file_list:
        #     with open(os.path.join(log_path, file), mode='r') as fh:
        #         text = fh.read()
        #     count = text.count('INFO_Test!')
        #     total_info_count += count
        #     error_count = text.count('ERROR_Test!')
        #     total_error_count += error_count
        #     store_count = text.count('STORE_Test!')
        #     total_store_count += store_count
        #     retry_count = text.count('Retry')
        #     total_retry_count += retry_count
        #     if 'LOG.txt' == file:
        #         if self.mode.multiproc:
        #             self.assertEqual(count,0)
        #             self.assertEqual(store_count, 0)
        #         else:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(store_count, len(traj))
        #     elif 'ERROR.txt' == file:
        #         self.assertEqual(count, 0)
        #         if self.mode.multiproc:
        #             self.assertEqual(error_count,0)
        #             self.assertEqual(store_count, 0)
        #         else:
        #             self.assertGreaterEqual(error_count, len(traj))
        #             self.assertGreaterEqual(store_count, len(traj))
        #
        #     elif 'Queue' in file and 'ERROR' in file:
        #         self.assertGreaterEqual(store_count, len(traj))
        #     elif 'Queue' in file and 'LOG' in file:
        #         self.assertGreaterEqual(store_count, len(traj))
        #     elif 'LOG' in file:
        #         if self.mode.multiproc and self.mode.use_pool:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 0)
        #         else:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 1)
        #             if self.mode.wrap_mode == 'QUEUE':
        #                 self.assertEqual(store_count, 0)
        #             else:
        #                 self.assertGreaterEqual(store_count, 1)
        #     elif 'ERROR' in file:
        #         if self.mode.multiproc and self.mode.use_pool:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 0)
        #         else:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 1)
        #             if self.mode.wrap_mode == 'QUEUE':
        #                 self.assertEqual(store_count, 0)
        #             else:
        #                 self.assertGreaterEqual(store_count, 1)
        #     else:
        #         self.assertTrue(False, 'There`s a file in the log folder that does not '
        #                                'belong there: %s' % str(file))
        # self.assertGreaterEqual(total_store_count, 2*len(traj))
        # self.assertGreaterEqual(total_error_count, 2*len(traj))
        # self.assertEqual(total_info_count, 0)
        # self.assertLess(total_retry_count, len(traj))

    @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logging_stdout(self):
        filename = 'teststdoutlog.hdf5'
        filename = make_temp_dir(filename)
        folder = make_temp_dir('logs')
        env = Environment(trajectory=make_trajectory_name(self),
                          filename=filename, log_config=get_log_config(),
                          # log_levels=logging.CRITICAL, # needed for the test
                          log_stdout=('STDOUT', 50), #log_folder=folder
                          )

        env.f_run(log_error)
        traj = env.v_traj
        path = get_log_path(traj)

        mainstr = 'sTdOuTLoGGinG'
        print(mainstr)
        env.f_disable_logging()

        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        self.assertTrue(mainstr in full_text)
        self.assertTrue('4444444' not in full_text)
        self.assertTrue('DEBUG' not in full_text)


    def test_logging_show_progress(self):
        self.make_env(log_config=get_log_config(),
                      # log_folder=make_temp_dir('logs'),
                      # log_levels=logging.ERROR,
                      report_progress=(3, 'progress', 40))
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        self.env.f_disable_logging()

        traj = self.env.v_traj

        path = get_log_path(traj)
        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        progress = 'PROGRESS: Finished'
        self.assertTrue(progress in full_text)
        bar = '[=='
        self.assertTrue(bar in full_text)


    def test_logging_show_progress_print(self):
        self.make_env(log_config=get_log_config(), log_stdout=('prostdout', 50),
                      report_progress=(3, 'print'))
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        self.env.f_disable_logging()

        path = get_log_path(self.env.v_traj)
        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        progress = 'prostdout CRITICAL PROGRESS: Finished'
        self.assertTrue(progress in full_text)
        bar = '[=='
        self.assertIn(bar, full_text)
    z=traj.x*traj.y
    traj.f_add_result('z',z=z, comment='I am the product of two reals!')



# Create an environment that handles running.
# Let's enable multiprocessing with 2 workers.
env = Environment(trajectory='Example_04_MP',
                  filename='experiments/example_04/HDF5/example_04.hdf5',
                  file_title='Example_04_MP',
                  log_folder='experiments/example_04/LOGS/',
                  comment = 'Multiprocessing example!',
                  multiproc=True,
                  ncores=2,
                  use_pool=True,
                  wrap_mode=pypetconstants.WRAP_MODE_LOCK)

# Get the trajectory from the environment
traj = env.v_trajectory

# Add both parameters
traj.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj.f_add_parameter('y', 1.0, comment='I am the second dimension!')

# Explore the parameters with a cartesian product, but we want to explore a bit more
traj.f_explore(cartesian_product({'x':[float(x) for x in range(15)],
                                  'y':[float(y) for y in range(15)]}))

# Run the simulation
env.f_run(multiply)
def main():

    filename = os.path.join('hdf5', 'example_06.hdf5')
    env = Environment(trajectory='Example_06_Euler_Integration',
                      filename=filename,
                      file_title='Example_06_Euler_Integration',
                      comment = 'Go for Euler!')


    traj = env.v_trajectory

    # 1st a) phase parameter addition
    # Remember we have some control flow in the `add_parameters` function, the default parameter
    # set we choose is the `'diff_lorenz'` one, but we want to deviate from that and use the
    # `'diff_roessler'`.
    # In order to do that we can preset the corresponding name parameter to change the
    # control flow:
    traj.f_preset_parameter('diff_name', 'diff_roessler') # If you erase this line, you will get
                                                          # again the lorenz attractor
    add_parameters(traj)

    # 1st b) phase preparation
    # Let's check which function we want to use
    if traj.diff_name=='diff_lorenz':
        diff_eq = diff_lorenz
    elif traj.diff_name=='diff_roessler':
        diff_eq = diff_roessler
    else:
        raise ValueError('I don\'t know what %s is.' % traj.diff_name)
    # And add the source code of the function as a derived parameter.
    traj.f_add_derived_parameter(FunctionParameter, 'diff_eq', diff_eq,
                                     comment='Source code of our equation!')

    # We want to explore some initial conditions
    traj.f_explore({'initial_conditions' : [
        np.array([0.01,0.01,0.01]),
        np.array([2.02,0.02,0.02]),
        np.array([42.0,4.2,0.42])
    ]})
    # 3 different conditions are enough for now

    # 2nd phase let's run the experiment
    # We pass 'euler_scheme' as our top-level simulation function and
    # the Roessler function as an additional argument
    env.f_run(euler_scheme, diff_eq)

    # Again no post-processing

    # 4th phase analysis.
    # I would recommend to do the analysis completely independent from the simulation
    # but for simplicity let's do it here.
    # We won't reload the trajectory this time but simply update the skeleton
    traj.f_load_skeleton()

    #For the fun of it, let's print the source code
    print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)

    # Let's get the exploration array:
    initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
    # Now let's plot our simulated equations for the different initial conditions.
    # We will iterate through the run names
    for idx, run_name in enumerate(traj.f_get_run_names()):

        # Get the result of run idx from the trajectory
        euler_result = traj.results.f_get(run_name).euler_evolution
        # Now we manually need to load the result. Actually the results are not so large and we
        # could load them all at once, but for demonstration we do as if they were huge:
        traj.f_load_item(euler_result)
        euler_data = euler_result.data

        # Plot fancy 3d plot
        fig = plt.figure(idx)
        ax = fig.gca(projection='3d')
        x = euler_data[:,0]
        y = euler_data[:,1]
        z = euler_data[:,2]
        ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
        plt.legend()
        plt.show()

        # Now we free the data again (because we assume its huuuuuuge):
        del euler_data
        euler_result.f_empty()

    # Finally disable logging and close all log-files
    env.f_disable_logging()
def main():

    filename = os.path.join('hdf5', 'example_05.hdf5')
    env = Environment(trajectory='Example_05_Euler_Integration',
                      filename=filename,
                      file_title='Example_05_Euler_Integration',
                      comment='Go for Euler!')


    traj = env.v_trajectory
    trajectory_name = traj.v_name

    # 1st a) phase parameter addition
    add_parameters(traj)

    # 1st b) phase preparation
    # We will add the differential equation (well, its source code only) as a derived parameter
    traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
                                 comment='Source code of our equation!')

    # We want to explore some initial conditions
    traj.f_explore({'initial_conditions' : [
        np.array([0.01,0.01,0.01]),
        np.array([2.02,0.02,0.02]),
        np.array([42.0,4.2,0.42])
    ]})
    # 3 different conditions are enough for an illustrative example

    # 2nd phase let's run the experiment
    # We pass `euler_scheme` as our top-level simulation function and
    # the Lorenz equation 'diff_lorenz' as an additional argument
    env.f_run(euler_scheme, diff_lorenz)

    # We don't have a 3rd phase of post-processing here

    # 4th phase analysis.
    # I would recommend to do post-processing completely independent from the simulation,
    # but for simplicity let's do it here.

    # Let's assume that we start all over again and load the entire trajectory new.
    # Yet, there is an error within this approach, do you spot it?
    del traj
    traj = Trajectory(filename=filename)

    # We will only fully load parameters and derived parameters.
    # Results will be loaded manually later on.
    try:
        # However, this will fail because our trajectory does not know how to
        # build the FunctionParameter. You have seen this coming, right?
        traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
                    load_results=1)
    except ImportError as e:

        print('That did\'nt work, I am sorry: %s ' % str(e))

        # Ok, let's try again but this time with adding our parameter to the imports
        traj = Trajectory(filename=filename,
                           dynamically_imported_classes=FunctionParameter)

        # Now it works:
        traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
                    load_results=1)


    #For the fun of it, let's print the source code
    print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)

    # Let's get the exploration array:
    initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
    # Now let's plot our simulated equations for the different initial conditions:
    # We will iterate through the run names
    for idx, run_name in enumerate(traj.f_get_run_names()):

        #Get the result of run idx from the trajectory
        euler_result = traj.results.f_get(run_name).euler_evolution
        # Now we manually need to load the result. Actually the results are not so large and we
        # could load them all at once. But for demonstration we do as if they were huge:
        traj.f_load_item(euler_result)
        euler_data = euler_result.data

        #Plot fancy 3d plot
        fig = plt.figure(idx)
        ax = fig.gca(projection='3d')
        x = euler_data[:,0]
        y = euler_data[:,1]
        z = euler_data[:,2]
        ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
        plt.legend()
        plt.show()

        # Now we free the data again (because we assume its huuuuuuge):
        del euler_data
        euler_result.f_empty()

    # You have to click through the images to stop the example_05 module!

    # Finally disable logging and close all log-files
    env.f_disable_logging()
示例#19
0
    traj.f_add_result('positions', sim.positions, comment='End positions of particles')
    traj.f_add_result('t', sim.t, comment='duration of flight')

env = Environment(trajectory='FanSimulation', filename='./pypet/',
                  large_overview_tables=True,
                  add_time=True,
                  multiproc=False,
                  ncores=6,
                  log_config='DEFAULT')

traj = env.v_trajectory

add_parameters(traj, dt=1e-2)

explore_dict = {'vent_radius':[0.1, 0.5, 1.0],
                'vmax':[10, 50, 100],
                'incline':[0.1, 1.0, 5.0]}

to_explore = cartesian_product(explore_dict)
traj.f_explore(to_explore)

env.f_run(run_simulation)

env.f_disable_logging()


# In[ ]:



                  file_title='Example_01_First_Steps',
                  log_folder='experiments/example_01/LOGS/',
                  comment='The first example!',
                  continuable=False, # We have shared data in terms of a multiprocessing list,
                  # so we CANNOT use the continue feature.
                  multiproc=True,
                  ncores=2)

# The environment has created a trajectory container for us
traj = env.v_trajectory

# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')

# Explore the parameters with a cartesian product
traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))

# We want a shared list where we can put all out results in. We use a manager for this:
result_list = mp.Manager().list()
# Let's make some space for potential results
result_list[:] =[0 for _dummy in range(len(traj))]

# Run the simulation
env.f_run(multiply, result_list)

# Now we want to store the final list as numpy array
traj.f_add_result('z', np.array(result_list))

# Finally let's print the result to see that it worked
print traj.z
示例#21
0
class LoggingTest(TrajectoryComparator):

    tags = 'integration', 'environment', 'logging'

    def setUp(self):
        root = logging.getLogger()
        for logger in itools.chain(root.manager.loggerDict.values(), [root]):
            if hasattr(logger, 'handlers'):
                for handler in logger.handlers:
                    if hasattr(handler, 'flush'):
                        handler.flush()
                    if hasattr(handler, 'close'):
                        handler.close()
                logger.handlers = []
            if hasattr(logger, 'setLevel'):
                logger.setLevel(logging.NOTSET)
        self.set_mode()

    def tearDown(self):
        super(LoggingTest, self).tearDown()

    def set_mode(self):
        self.mode = Dummy()
        self.mode.wrap_mode = 'LOCK'
        self.mode.multiproc = False
        self.mode.ncores = 1
        self.mode.use_pool=True
        self.mode.pandas_format='fixed'
        self.mode.pandas_append=False
        self.mode.complib = 'blosc'
        self.mode.complevel=9
        self.mode.shuffle=True
        self.mode.fletcher32 = False
        self.mode.encoding = 'utf8'
        self.mode.log_stdout=False
        self.mode.log_config=get_log_config()


    def make_env(self, **kwargs):

        self.mode.__dict__.update(kwargs)
        filename = 'log_testing.hdf5'
        self.filename = make_temp_dir(filename)
        self.traj_name = make_trajectory_name(self)
        self.env = Environment(trajectory=self.traj_name,
                               filename=self.filename, **self.mode.__dict__)
        self.traj = self.env.v_traj


    def add_params(self, traj):

        traj.par.p1 = Parameter('', 42, 'Hey')
        traj.f_apar('g1.p2', 145, comment='Test')


    def explore(self, traj):
        traj.f_explore({'p1': range(7)})

    # @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_creation_normal(self):
        # if not self.multiproc:
        #     return
        self.make_env()
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_wo_error_levels)
        self.env.f_disable_logging()

        traj = self.env.v_traj

        log_path = get_log_path(traj)

        if self.mode.multiproc:
            if self.mode.use_pool:
                length = self.mode.ncores * 2
            else:
                length = 2 * len(traj)
            if self.mode.wrap_mode == 'LOCK':
                length += 2
            elif self.mode.wrap_mode == 'QUEUE':
                length += 4
            else:
                raise RuntimeError('You shall not pass!')
        else:
            length = 2


        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings
        total_error_count = 0
        total_store_count = 0
        total_info_count = 0
        total_retry_count = 0
        for file in file_list:
            with open(os.path.join(log_path, file), mode='r') as fh:
                text = fh.read()
            if len(text) == 0:
                continue
            count = text.count('INFO_Test!')
            total_info_count += count
            error_count = text.count('ERROR_Test!')
            total_error_count += error_count
            store_count = text.count('STORE_Test!')
            total_store_count += store_count
            retry_count = text.count('Retry')
            total_retry_count += retry_count
            if 'LOG.txt' == file:
                if self.mode.multiproc:
                    self.assertEqual(count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(count, len(traj))
                    self.assertEqual(store_count, len(traj))
            elif 'ERROR' in file:
                full_path = os.path.join(log_path, file)
                filesize = os.path.getsize(full_path)
                with open(full_path) as fh:
                    text = fh.read()
                if 'Retry' not in text:
                    self.assertEqual(filesize, 0)
            elif 'Queue' in file:
                self.assertEqual(store_count, len(traj))
            elif 'LOG' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertGreaterEqual(count, 0, '%d < 1 for file %s' % (count, file))
                else:
                    self.assertEqual(count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertEqual(store_count, 1)
            else:
                self.assertTrue(False, 'There`s a file in the log folder that does not '
                                       'belong there: %s' % str(file))
        self.assertEqual(total_store_count, len(traj))
        self.assertEqual(total_error_count, 0)
        self.assertEqual(total_info_count, len(traj))
        self.assertLess(total_retry_count, len(traj))

    def test_throw_error_when_specifying_config_and_old_method(self):
        with self.assertRaises(ValueError):
            self.make_env(log_config=None, logger_names='test')

    def test_disable(self):
        # if not self.multiproc:
        #     return
        self.make_env(log_config=None)
        traj = self.env.v_traj

        log_path = get_log_path(traj)

        self.assertFalse(os.path.isdir(log_path))
        self.assertTrue(self.env._logging_manager._sp_config is None)
        self.assertTrue(self.env._logging_manager._mp_config is None)
        self.assertTrue(self.env._logging_manager.log_config is None)

        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)

        self.assertFalse(os.path.isdir(log_path))
        self.assertTrue(self.env._logging_manager._sp_config is None)
        self.assertTrue(self.env._logging_manager._mp_config is None)
        self.assertTrue(self.env._logging_manager.log_config is None)

        self.env.f_disable_logging()
        # pypet_path = os.path.abspath(os.path.dirname(pypet.pypetlogging))
        # init_path = os.path.join(pypet_path, 'logging')
        # log_config = os.path.join(init_path, 'default.ini')


    # @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_creation_with_errors(self):
         # if not self.multiproc:
        #     return
        self.make_env()
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        if self.mode.multiproc:
            logging.getLogger('pypet.test').error('ttt')
        self.env.f_disable_logging()

        traj = self.env.v_traj
        log_path = get_log_path(traj)

        if self.mode.multiproc:
            if self.mode.use_pool:
                length = self.mode.ncores * 2
            else:
                length = 2 * len(traj)
            if self.mode.wrap_mode == 'LOCK':
                length += 2
            elif self.mode.wrap_mode == 'QUEUE':
                length += + 4
            else:
                raise RuntimeError('You shall not pass!')
        else:
            length = 2

        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings

        total_error_count = 0
        total_store_count = 0
        total_info_count = 0
        total_retry_count = 0
        for file in file_list:
            with open(os.path.join(log_path, file), mode='r') as fh:
                text = fh.read()
            if len(text) == 0:
                continue
            count = text.count('INFO_Test!')
            total_info_count += count
            error_count = text.count('ERROR_Test!')
            total_error_count += error_count
            store_count = text.count('STORE_Test!')
            total_store_count += store_count
            retry_count = text.count('Retry')
            total_retry_count += retry_count
            if 'LOG.txt' == file:
                if self.mode.multiproc:
                    self.assertEqual(count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(count, len(traj))
                    self.assertEqual(store_count, len(traj))
            elif 'ERROR.txt' == file:
                self.assertEqual(count, 0)
                if self.mode.multiproc:
                    self.assertEqual(error_count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(error_count, len(traj))
                    self.assertEqual(store_count, len(traj))

            elif 'Queue' in file and 'ERROR' in file:
                self.assertEqual(store_count, len(traj))
            elif 'Queue' in file and 'LOG' in file:
                self.assertEqual(store_count, len(traj))
            elif 'LOG' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertGreaterEqual(count, 0)
                    self.assertGreaterEqual(error_count, 0)
                else:
                    self.assertEqual(count, 1)
                    self.assertEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertEqual(store_count, 1)
            elif 'ERROR' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 1)
                else:
                    self.assertEqual(count, 0)
                    self.assertEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertEqual(store_count, 1)
            else:
                self.assertTrue(False, 'There`s a file in the log folder that does not '
                                       'belong there: %s' % str(file))
        self.assertEqual(total_store_count, 2*len(traj))
        self.assertEqual(total_error_count, 2*len(traj))
        self.assertEqual(total_info_count, len(traj))
        self.assertLess(total_retry_count, len(traj))

    def test_file_renaming(self):
        traj_name = 'test'
        traj = Trajectory('test', add_time=False)
        traj.f_add_parameter('x', 42)
        traj.f_explore({'x': [1,2,3]})
        rename_string = '$traj_$set_$run'
        solution_1 = 'test_run_set_ALL_run_ALL'
        solution_2 = 'test_run_set_00000_run_00000000'
        renaming_1 = rename_log_file(rename_string, traj)
        self.assertEqual(renaming_1, solution_1)
        traj.v_idx = 0
        renaming_2 = rename_log_file(rename_string, traj)
        self.assertEqual(renaming_2, solution_2)


    # @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_old_way_creation_with_errors(self):
         # if not self.multiproc:
        #     return
        del self.mode.__dict__['log_config']
        self.make_env(logger_names = ('','pypet'), log_level=logging.ERROR,
                      log_folder=make_temp_dir('logs'))
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        if self.mode.multiproc:
            logging.getLogger('pypet.test').error('ttt')
        self.env.f_disable_logging()

        traj = self.env.v_traj
        log_path = get_log_path(traj)

        if self.mode.multiproc:
            if self.mode.use_pool:
                length = self.mode.ncores * 2
            else:
                length = 2 * len(traj)
            if self.mode.wrap_mode == 'LOCK':
                length += 2
            elif self.mode.wrap_mode == 'QUEUE':
                length += 4
            else:
                raise RuntimeError('You shall not pass!')
        else:
            length = 2

        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings

        total_error_count = 0
        total_store_count = 0
        total_info_count = 0
        total_retry_count = 0

        for file in file_list:
            with open(os.path.join(log_path, file), mode='r') as fh:
                text = fh.read()
            if len(text) == 0:
                continue
            count = text.count('INFO_Test!')
            total_info_count += count
            error_count = text.count('ERROR_Test!')
            total_error_count += error_count
            store_count = text.count('STORE_Test!')
            total_store_count += store_count
            retry_count = text.count('Retry')
            total_retry_count += retry_count
            if 'LOG.txt' == file:
                if self.mode.multiproc:
                    self.assertEqual(count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(store_count, len(traj))
            elif 'ERROR.txt' == file:
                self.assertEqual(count, 0)
                if self.mode.multiproc:
                    self.assertEqual(error_count,0)
                    self.assertEqual(store_count, 0)
                else:
                    self.assertGreaterEqual(error_count, len(traj))
                    self.assertGreaterEqual(store_count, len(traj))

            elif 'Queue' in file and 'ERROR' in file:
                self.assertGreaterEqual(store_count, len(traj))
            elif 'Queue' in file and 'LOG' in file:
                self.assertGreaterEqual(store_count, len(traj))
            elif 'LOG' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 0)
                else:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertGreaterEqual(store_count, 1)
            elif 'ERROR' in file:
                if self.mode.multiproc and self.mode.use_pool:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 0)
                else:
                    self.assertEqual(count, 0)
                    self.assertGreaterEqual(error_count, 1)
                    if self.mode.wrap_mode == 'QUEUE':
                        self.assertEqual(store_count, 0)
                    else:
                        self.assertGreaterEqual(store_count, 1)
            else:
                self.assertTrue(False, 'There`s a file in the log folder that does not '
                                       'belong there: %s' % str(file))
        self.assertGreaterEqual(total_store_count, 2*len(traj))
        self.assertGreaterEqual(total_error_count, 2*len(traj))
        self.assertEqual(total_info_count, 0)
        self.assertLess(total_retry_count, len(traj))

    # @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logfile_old_way_disabling_mp_log(self):
         # if not self.multiproc:
        #     return
        del self.mode.__dict__['log_config']
        self.make_env(logger_names = ('','pypet'), log_level=logging.ERROR,
                      log_folder=make_temp_dir('logs'), log_multiproc=False)
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        if self.mode.multiproc:
            logging.getLogger('pypet.test').error('ttt')
        self.env.f_disable_logging()

        traj = self.env.v_traj
        log_path = get_log_path(traj)

        # if self.mode.multiproc:
        length = 2

        file_list = [file for file in os.listdir(log_path)]

        self.assertEqual(len(file_list), length) # assert that there are as many
        # files as runs plus main.txt and errors and warnings

        # total_error_count = 0
        # total_store_count = 0
        # total_info_count = 0
        # total_retry_count = 0
        #
        # for file in file_list:
        #     with open(os.path.join(log_path, file), mode='r') as fh:
        #         text = fh.read()
        #     count = text.count('INFO_Test!')
        #     total_info_count += count
        #     error_count = text.count('ERROR_Test!')
        #     total_error_count += error_count
        #     store_count = text.count('STORE_Test!')
        #     total_store_count += store_count
        #     retry_count = text.count('Retry')
        #     total_retry_count += retry_count
        #     if 'LOG.txt' == file:
        #         if self.mode.multiproc:
        #             self.assertEqual(count,0)
        #             self.assertEqual(store_count, 0)
        #         else:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(store_count, len(traj))
        #     elif 'ERROR.txt' == file:
        #         self.assertEqual(count, 0)
        #         if self.mode.multiproc:
        #             self.assertEqual(error_count,0)
        #             self.assertEqual(store_count, 0)
        #         else:
        #             self.assertGreaterEqual(error_count, len(traj))
        #             self.assertGreaterEqual(store_count, len(traj))
        #
        #     elif 'Queue' in file and 'ERROR' in file:
        #         self.assertGreaterEqual(store_count, len(traj))
        #     elif 'Queue' in file and 'LOG' in file:
        #         self.assertGreaterEqual(store_count, len(traj))
        #     elif 'LOG' in file:
        #         if self.mode.multiproc and self.mode.use_pool:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 0)
        #         else:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 1)
        #             if self.mode.wrap_mode == 'QUEUE':
        #                 self.assertEqual(store_count, 0)
        #             else:
        #                 self.assertGreaterEqual(store_count, 1)
        #     elif 'ERROR' in file:
        #         if self.mode.multiproc and self.mode.use_pool:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 0)
        #         else:
        #             self.assertEqual(count, 0)
        #             self.assertGreaterEqual(error_count, 1)
        #             if self.mode.wrap_mode == 'QUEUE':
        #                 self.assertEqual(store_count, 0)
        #             else:
        #                 self.assertGreaterEqual(store_count, 1)
        #     else:
        #         self.assertTrue(False, 'There`s a file in the log folder that does not '
        #                                'belong there: %s' % str(file))
        # self.assertGreaterEqual(total_store_count, 2*len(traj))
        # self.assertGreaterEqual(total_error_count, 2*len(traj))
        # self.assertEqual(total_info_count, 0)
        # self.assertLess(total_retry_count, len(traj))

    # @unittest.skipIf(platform.system() == 'Windows', 'Log file creation might fail under windows.')
    def test_logging_stdout(self):
        filename = 'teststdoutlog.hdf5'
        filename = make_temp_dir(filename)
        folder = make_temp_dir('logs')
        env = Environment(trajectory=make_trajectory_name(self),
                          filename=filename, log_config=get_log_config(),
                          # log_levels=logging.CRITICAL, # needed for the test
                          log_stdout=('STDOUT', 50), #log_folder=folder
                          )

        env.f_run(log_error)
        traj = env.v_traj
        path = get_log_path(traj)

        mainstr = 'sTdOuTLoGGinG'
        print(mainstr)
        env.f_disable_logging()

        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        self.assertTrue(mainstr in full_text)
        self.assertTrue('4444444' not in full_text)
        self.assertTrue('DEBUG' not in full_text)


    def test_logging_show_progress(self):
        self.make_env(log_config=get_log_config(),
                      # log_folder=make_temp_dir('logs'),
                      # log_levels=logging.ERROR,
                      report_progress=(3, 'progress', 40))
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        self.env.f_disable_logging()

        traj = self.env.v_traj

        path = get_log_path(traj)
        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        progress = 'PROGRESS: Finished'
        self.assertTrue(progress in full_text)
        bar = '[=='
        self.assertTrue(bar in full_text)


    def test_logging_show_progress_print(self):
        self.make_env(log_config=get_log_config(), log_stdout=('prostdout', 50),
                      report_progress=(3, 'print'))
        self.add_params(self.traj)
        self.explore(self.traj)

        self.env.f_run(log_all_levels)
        self.env.f_disable_logging()

        path = get_log_path(self.env.v_traj)
        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()

        progress = 'prostdout CRITICAL PROGRESS: Finished'
        self.assertTrue(progress in full_text)
        bar = '[=='
        self.assertIn(bar, full_text)
示例#22
0
from pypet import Environment, cartesian_product


def multiply(traj):
    """Example of a sophisticated numerical experiment
    that involves multiplying two integer values.

    :param traj:
        Trajectory containing the parameters in a particular
        combination, it also serves as a container for results.
    """
    z = traj.x * traj.y
    traj.f_add_result('z', z, comment='Result of x*y')


# Create an environment that handles running the experiment
env = Environment(trajectory='Multiplication',
                  filename='multiply.hdf5',
                  comment='A simulation of multiplication')
# The environment provides a trajectory container for us
traj = env.v_trajectory
# Add two parameters, both with default value 0
traj.f_add_parameter('x', 0, comment='First dimension')
traj.f_add_parameter('y', 0, comment='Second dimension')
# Explore the Cartesian product of x in {1,2,3,4} and y in {6,7,8}
traj.f_explore(cartesian_product({'x': [1, 2, 3, 4], 'y': [6, 7, 8]}))
# Run simulation function `multiply` with all parameter combinations
env.f_run(multiply)