def setUp(self): logging.basicConfig(level = logging.INFO) traj = Trajectory('Test') traj.v_storage_service = LazyStorageService() large_amount = 111 for irun in range(large_amount): name = 'There.Are.Many.Parameters.Like.Me' + str(irun) traj.f_add_parameter(name, irun) traj.f_add_parameter('TestExplorer', 1) traj.v_fast_access=False traj.f_explore({traj.TestExplorer.v_full_name:[1,2,3,4,5]}) traj.v_fast_access=True self.traj = traj self.n = 1 self.single_run = self.traj._make_single_run(self.n) self.assertTrue(len(self.single_run)==1)
def setUp(self): name = 'Moop' self.traj = Trajectory(name,[ImAParameterInDisguise]) comment = 'This is a comment' self.traj.v_comment=comment self.assertTrue(comment == self.traj.v_comment) self.traj.f_add_parameter('IntParam',3) sparsemat = spsp.csr_matrix((1000,1000)) sparsemat[1,2] = 17.777 #self.traj.f_add_parameter('SparseParam', sparsemat, param_type=PickleParameter) self.traj.f_add_parameter('FloatParam') self.traj.f_add_derived_parameter(Parameter('FortyTwo', 42)) self.traj.f_add_parameter('Trials',0) self.traj.f_add_result(Result,'Im.A.Simple.Result',44444) self.traj.FloatParam=4.0 self.traj.v_storage_service = LazyStorageService() self.traj.f_explore({'FloatParam':[1.0,1.1,1.2,1.3],'Trials':[0,1,2,3]}) self.assertTrue(len(self.traj) == 4) name2 = 'aaaaah' self.traj2 = Trajectory(name2,[ImAParameterInDisguise]) comment = 'This is a comment' self.traj2.v_comment=comment self.assertTrue(comment == self.traj2.v_comment) self.traj2.f_add_parameter('IntParam',3) sparsemat = spsp.csr_matrix((1000,1000)) sparsemat[1,2] = 17.777 #self.traj2.f_add_parameter('SparseParam', sparsemat, param_type=PickleParameter) self.traj2.f_add_parameter('Trials',0) self.traj2.f_add_parameter('FloatParam') self.traj2.f_add_derived_parameter(Parameter('FortyTwo', 42)) self.traj2.f_add_result(Result,'Im.A.Simple.Result',44444) self.traj2.FloatParam=4.0 self.traj2.f_explore({'FloatParam':[42.0,43.0,1.2,1.3],'Trials':[0,1,2,3]}) self.traj2.v_storage_service = LazyStorageService() self.assertTrue(len(self.traj2) == 4)
def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB' % str(size_in_mb))
def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory and check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj
def test_partially_delete_stuff(self): traj = Trajectory(name='TestDelete', filename=make_temp_file('testpartiallydel.hdf5')) res = traj.f_add_result('mytest.test', a='b', c='d') traj.f_store() self.assertTrue('a' in res) traj.f_delete_item(res, delete_only=['a'], remove_from_item=True) self.assertTrue('c' in res) self.assertTrue('a' not in res) res['a'] = 'offf' self.assertTrue('a' in res) traj.f_load(load_results=3) self.assertTrue('a' not in res) self.assertTrue('c' in res) traj.f_delete_item(res, remove_from_trajectory=True, remove_empty_groups=True) self.assertTrue('results' not in traj) self.assertTrue(res not in traj)
def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, load_derived_parameters=2,load_results=2, index=trajectory_index, as_new=as_new) return newtraj
def test_store_and_load_large_dictionary(self): traj = Trajectory(name='Test', filename=make_temp_file('large_dict.hdf5')) large_dict = {} for irun in range(1025): large_dict['item_%d' % irun] = irun large_dict2 = {} for irun in range(33): large_dict2['item_%d' % irun] = irun traj.f_add_result('large_dict', large_dict, comment='Huge_dict!') traj.f_add_result('large_dict2', large_dict2, comment='Not so large dict!') traj.f_store() traj_name = traj.v_name traj2 = Trajectory(filename=make_temp_file('large_dict.hdf5')) traj2.f_load(name=traj_name, load_all=2) self.compare_trajectories(traj, traj2)
def test_root_getting(self): traj = Trajectory() traj.f_add_config_group('ff') root = traj.ff.f_get_root() self.assertTrue(root is traj)
def test_storage_service_errors(self): traj = Trajectory(filename=make_temp_file('testnoservice.hdf5')) traj_name = traj.v_name # you cannot store stuff before the trajectory was stored once: with self.assertRaises(ValueError): traj.v_storage_service.store('FAKESERVICE', self, trajectory_name = traj.v_name) traj.f_store() with self.assertRaises(ValueError): traj.v_storage_service.store('FAKESERVICE', self, trajectory_name = 'test') with self.assertRaises(pex.NoSuchServiceError): traj.v_storage_service.store('FAKESERVICE', self, trajectory_name = traj.v_name) with self.assertRaises(ValueError): traj.f_load(name = 'test', index=1) with self.assertRaises(RuntimeError): traj.v_storage_service.store('LIST', [('LEAF',None,None,None,None)], trajectory_name = traj.v_name) with self.assertRaises(ValueError): traj.f_load(index=9999) with self.assertRaises(ValueError): traj.f_load(name='Non-Existising-Traj')
def test_net(self): self.env.f_run(run_net) self.traj.f_load(load_derived_parameters=2, load_results=2) traj2 = Trajectory(name = self.traj.v_name, add_time=False, filename=make_temp_file('experiments/tests/briantests/HDF5/briantest.hdf5'), dynamically_imported_classes=['pypet.brian.parameter.BrianParameter', BrianMonitorResult]) traj2.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) self.compare_trajectories(self.traj, traj2)
def load_trajectory(fname): tr = Trajectory(name='tr1', add_time=False, filename=fname, dynamic_imports=[Brian2MonitorResult, Brian2Parameter]) # pypet.pypetconstants.LOAD_NOTHING --> 0 # pypet.pypetconstants.LOAD_SKELETON --> 1 # pypet.pypetconstants.LOAD_DATA --> 2 tr.f_load(load_parameters=2, load_derived_parameters=2, load_results=1) tr.v_auto_load = True return tr
def test_run(self): self.env.f_run(dostuff_and_add_links) self.traj.f_load(load_data=2) traj2 = Trajectory() traj2.f_load(name=self.traj.v_name, filename=self.filename) traj2.f_load(load_data=2) for run in self.traj.f_get_run_names(): self.assertTrue(self.traj.res.runs[run].paraBL is self.traj.paramB) self.compare_trajectories(self.traj, traj2)
def test_store_load_with_hdf5(self): traj_name = 'test_%s' % self.__class__.__name__ filename = make_temp_dir(traj_name + '.hdf5') traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports, filename = filename, overwrite_file=True) for res in self.results.values(): traj.f_add_result(res) traj.f_store() new_traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports, filename = filename) new_traj.f_load(load_data=2) self.compare_trajectories(traj, new_traj)
def test_not_adding_pars_during_single_run(self): traj = Trajectory() traj._is_run = True with self.assertRaises(TypeError): traj.f_add_parameter('dd') with self.assertRaises(TypeError): traj.f_add_parameter_group('dd') with self.assertRaises(TypeError): traj.f_add_config('dd') with self.assertRaises(TypeError): traj.f_add_config_group('dd')
def test_illegal_namings(self): self.traj=Trajectory('resulttest2') with self.assertRaises(ValueError): self.traj.f_add_parameter('f_get') with self.assertRaises(ValueError): self.traj.f_add_result('test.$.k.$') rg=self.traj.f_add_result_group('ggg.$') with self.assertRaises(ValueError): rg.f_add_result('$.fff') self.traj.f_add_result_group('test.$.k') with self.assertRaises(ValueError): self.traj.res.k.f_add_result('$.jjj') with self.assertRaises(ValueError): self.traj.f_add_parameter('e'*129) with self.assertRaises(ValueError): self.traj.f_add_parameter('e'*120+'.j'*120+'.k'*40) with self.assertRaises(ValueError): self.traj.f_add_parameter('crun',22)
def test_storage_and_loading(self): self.traj.f_store() traj_name = self.traj.v_name del self.traj self.traj = Trajectory(filename=self.filename) self.traj.f_load(name=traj_name, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) self.test_annotations_insert()
def test_short_cuts(self): self.traj = Trajectory() self.traj.f_add_parameter('test', 42) self.traj.f_add_config('tefffst', 42) self.traj.f_add_derived_parameter('dtest', 42) self.traj.f_add_result('safd', 42) self.traj.f_explore({'test':[1,2,3,4]}) self.assertEqual(id(self.traj.par), id(self.traj.parameters)) #self.assertEqual(id(self.traj.param), id(self.traj.parameters)) self.assertEqual(id(self.traj.dpar), id(self.traj.derived_parameters)) #self.assertEqual(id(self.traj.dparam), id(self.traj.derived_parameters)) self.assertEqual(id(self.traj.conf), id(self.traj.config)) self.assertEqual(id(self.traj.res), id(self.traj.results)) srun = self.traj._make_single_run(3) srun.f_add_result('sdffds',42) self.assertEqual(id(srun.results.crun), id(srun.results.f_get(srun.v_name)))
def test_store_load_with_hdf5_no_data(self): traj_name = 'test_%s' % self.__class__.__name__ filename = make_temp_dir(traj_name + 'nodata.hdf5') traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports, filename = filename, overwrite_file=True) for param in self.param.values(): param._data = None traj.f_add_parameter(param) traj.f_store() new_traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports, filename = filename) new_traj.f_load(load_data=2) self.compare_trajectories(traj, new_traj)
def test_loading(filenames, traj_names): loading_times = np.zeros(len(traj_names)) loading_times_wd = np.zeros(len(traj_names)) n_groups= np.zeros(len(traj_names), dtype='int') for idx, traj_name in enumerate(traj_names): filename = filenames[idx] traj = Trajectory(name=traj_name, filename=filename, add_time=False) start = time.time() traj.f_load(load_parameters=2, load_results=1, load_derived_parameters=1) elapsed = (time.time() - start) loading_times[idx]=elapsed n_groups[idx] = len([x for x in traj.f_iter_nodes(recursive=True)]) del traj traj = Trajectory(name=traj_name, filename=filename, add_time=False) start = time.time() traj.f_load(load_all=2) elapsed = (time.time() - start) loading_times_wd[idx]=elapsed for idx, loading_time in enumerate(loading_times): loading_time_wd = loading_times_wd[idx] groups = n_groups[idx] print('Groups: %d, Loading: %.3fs, with Data: %.3fs' % (groups, loading_time, loading_time_wd))
def test_net(self): self.env.f_run(run_net) self.traj.f_load(load_derived_parameters=2, load_results=2) traj2 = Trajectory(name = self.traj.v_name, add_time=False, filename=make_temp_dir(os.path.join( 'experiments', 'tests', 'briantests', 'HDF5', 'briantest.hdf5')), dynamic_imports=['pypet.brian.parameter.BrianParameter', BrianMonitorResult]) traj2.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) self.compare_trajectories(self.traj, traj2)
def test_remov_of_explored_stuff_if_not_saved(self): self.traj = Trajectory() self.traj.f_add_parameter('test', 42) self.traj.f_explore({'test':[1,2,3,4]}) self.traj.parameters.f_remove_child('test') self.assertTrue(len(self.traj) == 1)
def test_attribute_error_raises_when_leaf_and_group_with_same_name_are_added(self): self.traj = Trajectory() self.traj.f_add_parameter('test.param1') with self.assertRaises(AttributeError): self.traj.f_add_parameter('test.param1.param2') with self.assertRaises(AttributeError): self.traj.f_add_parameter('test')
def test_net(self): self.env.f_run(run_net) self.traj.f_load(load_derived_parameters=2, load_results=2) traj2 = Trajectory(name=self.traj.v_name, add_time=False, filename=make_temp_dir( os.path.join('experiments', 'tests', 'briantests', 'HDF5', 'briantest.hdf5')), dynamic_imports=[ 'pypet.brian.parameter.BrianParameter', BrianMonitorResult ]) traj2.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) self.compare_trajectories(self.traj, traj2)
def test_remove_of_explored_stuff_if_saved(self): self.traj = Trajectory() self.traj.f_add_parameter('test', 42) self.traj.f_explore({'test':[1,2,3,4]}) self.traj._stored=True self.traj.parameters.f_remove_child('test') len(self.traj) == 4
def test_pipeline(self): filename = 'testpostprocpipe.hdf5' env1, filename, _, _ = self.make_environment(filename, 'k1') env2 = self.make_environment(filename, 'k2', log=False)[0] traj1 = env1.v_trajectory traj2 = env2.v_trajectory trajs = [traj1, traj2] traj2.f_add_parameter('x', 1, comment='1st') traj2.f_add_parameter('y', 1, comment='1st') exp_dict2 = {'x':[1, 2, 3, 4, 1, 2, 2, 3], 'y':[1, 2, 3, 4, 1, 2, 0, 1]} traj2.f_explore(exp_dict2) res1 = env1.f_pipeline(pipeline=mypipeline) self.are_results_in_order(res1) res2 = env2.f_run(Multiply(), 22) self.are_results_in_order(res2) traj_name = traj1.v_name traj1 = Trajectory(traj_name, add_time=False, filename=filename) traj1.f_load(load_data=2) traj2.f_load(load_data=2) self.compare_trajectories(traj1, traj2) env1.f_disable_logging() env2.f_disable_logging()
def test_storage_and_loading(self): self.traj.f_store() traj_name = self.traj.v_name del self.traj self.traj = Trajectory(filename=self.filename) self.traj.f_load(name=traj_name, load_results=2, load_parameters=2, load_derived_parameters=2, load_other_data=2) self.test_annotations_insert()
def setUp(self): self.filename = make_temp_dir( os.path.join('experiments', 'tests', 'HDF5', 'annotations.hdf5')) self.traj = Trajectory(name='Annotations', filename=self.filename) self.traj.f_add_result('testres', 42) self.traj.f_add_parameter('testgroup.testparam', 42) self.make_annotations() self.add_annotations(self.traj) pred = lambda x: 'config' not in x.v_full_name x = len([ node for node in self.traj.f_iter_nodes(recursive=True, predicate=pred) ]) self.assertTrue(x == 5, '%s != %s' % (str(x), str(5)))
def test_pipeline(self): filename = 'testpostprocpipe.hdf5' env1, filename, _, _ = self.make_environment(filename, 'k1') env2 = self.make_environment(filename, 'k2', log=False)[0] traj1 = env1.v_trajectory traj2 = env2.v_trajectory trajs = [traj1, traj2] traj2.f_add_parameter('x', 1, comment='1st') traj2.f_add_parameter('y', 1, comment='1st') exp_dict2 = { 'x': [1, 2, 3, 4, 1, 2, 2, 3], 'y': [1, 2, 3, 4, 1, 2, 0, 1] } traj2.f_explore(exp_dict2) res1 = env1.pipeline(pipeline=mypipeline) self.are_results_in_order(res1) res2 = env2.f_run(Multiply(), 22) self.are_results_in_order(res2) traj_name = traj1.v_name traj1 = Trajectory(traj_name, add_time=False, filename=filename) traj1.f_load(load_data=2) traj2.f_load(load_data=2) self.compare_trajectories(traj1, traj2) env1.f_disable_logging() env2.f_disable_logging()
def profile_single_storing(profile_stroing=False, profile_loading=True): logging.basicConfig(level = logging.INFO) logfolder = os.path.join(tempfile.gettempdir(), TEMPDIR, 'logs') pathfolder = os.path.join(tempfile.gettempdir(), TEMPDIR, 'hdf5') res_per_run = 100 env = Environment(log_folder=logfolder, filename=pathfolder, ncores=2, multiproc=False, use_pool=True, wrap_mode='QUEUE') traj = env.v_trajectory traj.f_add_parameter('res_per_run', res_per_run) traj.f_add_parameter('trial', 0) traj.f_explore({'trial':list(range(10))}) runexp = lambda : env.f_run(add_data) if profile_stroing: cProfile.runctx('runexp()', {'runexp': runexp},globals(), sort=1, filename='store_stats.profile') else: runexp() print('########################################################################') traj = Trajectory(name=traj.v_name, add_time=False, filename= traj.v_storage_service.filename) load = lambda : traj.f_load(load_parameters=2, load_results=1) if profile_loading: cProfile.runctx('load()', {'load': load},globals(), filename='load_stats.profile', sort=1)
def setUp(self): name = 'Moop' self.traj = Trajectory(name,dynamically_imported_classes=[ImAParameterInDisguise, 'pypet.tests.test_helpers.ImAResultInDisguise']) self.assertTrue(self.traj.f_is_empty()) comment = 'This is a comment' self.traj.v_comment=comment self.assertTrue(comment == self.traj.v_comment) self.traj.f_add_parameter('IntParam',3) sparsemat = spsp.csr_matrix((1000,1000)) sparsemat[1,2] = 17.777 self.traj.f_add_parameter(PickleParameter,'SparseParam', sparsemat) self.traj.f_add_parameter('FloatParam') self.traj.f_add_derived_parameter(Parameter('FortyTwo', 42)) self.traj.f_add_result(Result,'Im.A.Simple.Result',44444) self.traj.FloatParam=4.0 self.explore_dict = {'FloatParam':[1.0,1.1,1.2,1.3]} self.traj.f_explore(self.explore_dict) self.assertTrue(len(self.traj) == 4) self.traj.f_add_parameter_group('peter.paul') self.traj.f_add_parameter('peter.markus.yve',6) self.traj.f_add_result('Test',42) self.traj.peter.f_add_parameter('paul.peter') self.traj.f_add_config('make.impossible.promises',1) with self.assertRaises(AttributeError): self.traj.markus.peter with self.assertRaises(ValueError): self.traj.f_add_parameter('Peter. h ._hurz')
def setUp(self): self.filename = make_temp_file('experiments/tests/HDF5/annotations.hdf5') self.traj = Trajectory(name='Annotations', filename = self.filename) self.traj.f_add_result('testres', 42) self.traj.f_add_parameter('testgroup.testparam', 42) self.make_annotations() self.add_annotations(self.traj) self.assertTrue(len([node for node in self.traj.f_iter_nodes(recursive=True)]) == 5)
def test_not_unique_search(self): self.traj = Trajectory() self.traj.f_add_parameter('ghgghg.test') self.traj.f_add_parameter('ghdsfdfdsfdsghg.test') with self.assertRaises(pex.NotUniqueNodeError): self.traj.test self.traj.f_add_parameter('depth0.depth1.depth2.findme', 42) self.traj.f_add_parameter('depth0.depth1.findme', 43) self.assertTrue(self.traj.findme==43) with self.assertRaises(pex.NotUniqueNodeError): self.traj.f_get('depth0.findme', backwards_search=True)
def test_multiple_storage_and_loading(self): self.filenames = [ make_temp_dir( os.path.join('experiments', 'tests', 'HDF5', 'multiple_storage_and_loading.hdf5')), 0 ] self.envs = [] self.trajs = [] for irun, filename in enumerate(self.filenames): if isinstance(filename, int): filename = self.filenames[filename] self.make_environment(irun, filename) self.param_dict = {} create_param_dict(self.param_dict) for irun in range(len(self.filenames)): add_params(self.trajs[irun], self.param_dict) self.explore(self.trajs[0]) self.explore(self.trajs[1]) for irun in range(len(self.filenames)): self.make_run(self.envs[irun]) #self.trajs[0].f_store() temp_sservice = self.trajs[0].v_storage_service temp_name = self.trajs[0].v_name self.trajs[0] = Trajectory() self.trajs[0].v_storage_service = temp_sservice self.trajs[0].f_load(name=temp_name, as_new=False, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) #self.trajs[0].f_load(trajectory_name=temp_name,as_new=False, load_params=2, load_derived_params=2, load_results=2) self.trajs[1].f_load_skeleton() self.trajs[1].f_load_items(self.trajs[1].f_to_dict().values(), only_empties=True) self.compare_trajectories(self.trajs[0], self.trajs[1])
def setUp(self): self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','annotations.hdf5')) self.traj = Trajectory(name='Annotations', filename = self.filename) self.traj.f_add_result('testres', 42) self.traj.f_add_parameter('testgroup.testparam', 42) self.make_annotations() self.add_annotations(self.traj) pred = lambda x: 'config' not in x.v_full_name x = len([node for node in self.traj.f_iter_nodes(recursive=True, predicate=pred)]) self.assertTrue(x == 5, '%s != %s' % (str(x), str(5)))
def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx)
def main(): folder = 'experiments/example_11/HDF5/' filename = 'Clustered_Network.hdf5' filename = os.path.join(folder, filename) # If we pass a filename to the trajectory a new HDF5StorageService will # be automatically created traj = Trajectory(filename=filename, dynamically_imported_classes=[BrianDurationParameter, BrianMonitorResult, BrianParameter]) # Let's create and fake environment to enable logging: Environment(traj, do_single_runs=False) # Load the trajectory, but onyl laod the skeleton of the results traj.f_load(index=0, # Change if you do not want to load the very first trajectory load_parameters=2, load_derived_parameters=2, load_results=1) # Find the result instances related to the fano factor fano_dict = traj.f_get_from_runs('mean_fano_factor', fast_access=False) # Load the data of the fano factor results ffs = fano_dict.values() traj.f_load_items(ffs) # Extract all values and R_ee values for each run ffs_values = [x.f_get() for x in ffs] Rees = traj.f_get('R_ee').f_get_range() # Plot average fano factor as a function of R_ee plt.plot(Rees, ffs_values) plt.xlabel('R_ee') plt.ylabel('Avg. Fano Factor') plt.show()
def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx)
def prepare(): logging.basicConfig(level = logging.INFO) traj = Trajectory('Test') traj.v_storage_service=LazyStorageService() large_amount = 11111 for irun in range(large_amount): name = 'Hurz.L' + str(irun) traj.f_add_parameter(name,irun) traj.f_add_parameter('Hurz.Test', data=1) return traj
def test_file_translation(self): filename = make_temp_dir('to_new_tree.hdf5') mytraj = Trajectory('SCRATCH', filename=filename) mytraj.f_add_parameter('Test.Group.Test', 42) mytraj.f_add_derived_parameter('trajectory.saaaa', 33) mytraj.f_add_derived_parameter('trajectory.intraj.dpar1', 33) mytraj.f_add_derived_parameter('run_00000008.inrun.dpar2', 33) mytraj.f_add_derived_parameter('run_00000001.inrun.dpar3', 35) mytraj.f_add_result('trajectory.intraj.res1', 33) mytraj.f_add_result('run_00000008.inrun.res1', 33) mytraj.f_store() mytraj.f_migrate(new_name=mytraj.v_name + 'PETER', in_store=True) mytraj.f_store() fu = FileUpdater(filename=filename, backup=True) fu.update_file() mytraj = Trajectory(name=mytraj.v_name, add_time=False, filename=filename) mytraj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) for node in mytraj.f_iter_nodes(): self.assertTrue(node.v_name != 'trajectory') if 'run_' in node.v_full_name: self.assertTrue('.runs.' in node.v_full_name) remove_data()
class AnnotationsTest(unittest.TestCase): tags = 'unittest', 'annotations' def setUp(self): self.filename = make_temp_dir( os.path.join('experiments', 'tests', 'HDF5', 'annotations.hdf5')) self.traj = Trajectory(name='Annotations', filename=self.filename) self.traj.f_add_result('testres', 42) self.traj.f_add_parameter('testgroup.testparam', 42) self.make_annotations() self.add_annotations(self.traj) pred = lambda x: 'config' not in x.v_full_name x = len([ node for node in self.traj.f_iter_nodes(recursive=True, predicate=pred) ]) self.assertTrue(x == 5, '%s != %s' % (str(x), str(5))) def tearDown(self): remove_data() def make_annotations(self): self.annotations = {} self.annotations['dict'] = { '33': 12, 'kkk': [1, 2, 'h'], 3: { 'a': 42.0 } } self.annotations['list'] = [self.annotations['dict'], 33] self.annotations['string'] = 'string' self.annotations['integer'] = 42 self.annotations['tuple'] = (3, 4, 5) self.annotations['Numpy_Data'] = np.array(['fff', 'ddd']) self.annotations[0] = 7777 def add_annotations(self, traj): funcs = 5 for idx, node in enumerate( [traj] + [node for node in traj.f_iter_nodes(recursive=True)]): for name in self.annotations: anno = self.annotations[name] if name == 0: node.f_set_annotations(anno) node.v_annotations.f_set(anno) elif idx % funcs == 0: node.f_set_annotations(**{name: anno}) elif idx % funcs == 1: node.v_annotations.f_set(**{name: anno}) elif idx % funcs == 2: node.v_annotations.f_set_single(name, anno) elif idx % funcs == 3: setattr(node.v_annotations, name, anno) elif idx % funcs == 4: node.v_annotations[name] = anno def test_annotations_insert(self): for idx,node in \ enumerate([self.traj] + [node for node in self.traj.f_iter_nodes(recursive=True)]): for name in self.annotations: anno = self.annotations[name] node_anno = node.v_annotations[name] self.assertTrue(comp.nested_equal(anno, node_anno), '%s != %s' % (str(anno), str(node_anno))) def test_pickling(self): dump = pickle.dumps(self.traj) del self.traj self.traj = pickle.loads(dump) self.test_annotations_insert() def test_storage_and_loading(self): self.traj.f_store() traj_name = self.traj.v_name del self.traj self.traj = Trajectory(filename=self.filename) self.traj.f_load(name=traj_name, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) self.test_annotations_insert() def test_attribute_deletion(self): for node in self.traj.f_iter_nodes(recursive=True): name_list = [name for name in node.v_annotations] for name in name_list: delattr(node.v_annotations, name) self.assertTrue(node.v_annotations.f_is_empty()) def test_item_deletion(self): for node in self.traj.f_iter_nodes(recursive=True): name_list = [name for name in node.v_annotations] for name in name_list: del node.v_annotations[name] self.assertTrue(node.v_annotations.f_is_empty()) def test_get_item(self): for node in self.traj.f_iter_nodes(recursive=True): for key, val1 in node.v_annotations.f_to_dict().items(): val2 = node.v_annotations[key] self.assertTrue(comp.nested_equal(val1, val2)) def test_get_item_no_copy(self): for node in self.traj.f_iter_nodes(recursive=True): for key, val1 in node.v_annotations.f_to_dict(copy=False).items(): val2 = node.v_annotations[key] self.assertTrue(comp.nested_equal(val1, val2)) @staticmethod def dict_to_str(dictionary): resstr = '' new_dict = {} for key, val in dictionary.items(): if key == 0: key = 'annotation' new_dict[key] = val for key in sorted(new_dict.keys()): resstr += '%s=%s; ' % (key, str(new_dict[key])) return resstr[:-2] def test_to_str(self): dict_str = self.dict_to_str(self.annotations) for node in self.traj.f_iter_nodes(recursive=True): ann_str = node.f_ann_to_str() self.assertTrue(not ann_str.endswith(' ') or not ann_str.endswith(',')) for name in self.annotations: if name == 0: name = 'annotation' self.assertTrue(name in ann_str) self.assertEqual(dict_str, ann_str, '%s!=%s' % (dict_str, ann_str)) ann_str = str(node.v_annotations) self.assertEqual(dict_str, ann_str, '%s!=%s' % (dict_str, ann_str)) def test_single_get_and_getattr_and_setattr(self): self.traj.f_add_parameter('test2', 42) self.traj.f_get('test2').v_annotations.test = 4 self.assertTrue(self.traj.f_get('test2').v_annotations.test, 4) self.assertTrue(self.traj.f_get('test2').v_annotations.f_get(), 4) def test_get_annotations(self): key_list = list(self.annotations.keys()) for node in self.traj.f_iter_nodes(recursive=True): for name in self.annotations: self.assertTrue( comp.nested_equal(self.annotations[name], node.f_get_annotations(name))) val_list = node.f_get_annotations(*key_list) for idx, val in enumerate(val_list): self.assertTrue( comp.nested_equal(self.annotations[key_list[idx]], val)) def test_f_get_errors(self): for node in self.traj.f_iter_nodes(recursive=True): with self.assertRaises(ValueError): node.v_annotations.f_get() with self.assertRaises(AttributeError): node.v_annotations.f_get('gdsdfd') testparam = self.traj.f_add_parameter('ggg', 343) with self.assertRaises(AttributeError): testparam.v_annotations.f_get() def test_f_set_numbering(self): int_list = list(range(10)) for node in self.traj.f_iter_nodes(recursive=True): node.v_annotations.f_set(*int_list) self.assertEqual(node.v_annotations.f_get(*int_list), tuple(int_list)) for integer in int_list: if integer == 0: name = 'annotation' else: name = 'annotation_%d' % integer self.assertTrue(name in node.v_annotations)
__author__ = 'Robert Meyer' import numpy as np import os # For path names being viable under Windows and Linux from pypet.trajectory import Trajectory from pypet import pypetconstants # Here I show how to store and load results in parts if they are quite large. # I will skip using an environment and only work with a trajectory. # We can create a trajectory and hand it a filename directly and it will create an # HDF5StorageService for us: filename = os.path.join('hdf5', 'example_09.hdf5') traj = Trajectory(name='example_09_huge_data', filename=filename, overwrite_file=True) # Now we directly add a huge result. Note that we could do the exact same procedure during # a single run, there is no syntactical difference. # However, the sub branch now is different, the result will be found under `traj.results.trajectory` # instead of `traj.results.run_XXXXXXXX` (where XXXXXXX is the current run index, e.g. 00000007). # We will add two large matrices a 100 by 100 by 100 one and 1000 by 1000 one, both containing # random numbers. They are called `mat1` and `mat2` and are handled by the same result object # called `huge_matrices`: traj.f_add_result('huge_matrices', mat1=np.random.rand(100, 100, 100), mat2=np.random.rand(1000, 1000)) # Note that the result will not support fast access since it contains more than a single # data item. Even if there was only `mat1`, because the name is `mat1` instead of `huge_matrices`
env.run(multiply) # Now let's see how we can reload the stored data from above. # We do not need an environment for that, just a trajectory. from pypet.trajectory import Trajectory # So, first let's create a new trajectory and pass it the path and name of the HDF5 file. # Yet, to be very clear let's delete all the old stuff. del traj # Before deleting the environment let's disable logging and close all log-files env.disable_logging() del env traj = Trajectory(filename=filename) # Now we want to load all stored data. traj.f_load(index=-1, load_parameters=2, load_results=2) # Above `index` specifies that we want to load the trajectory with that particular index # within the HDF5 file. We could instead also specify a `name`. # Counting works also backwards, so `-1` yields the last or newest trajectory in the file. # # Next we need to specify how the data is loaded. # Therefore, we have to set the keyword arguments `load_parameters` and `load_results`, # here we chose both to be `2`. # `0` would mean we do not want to load anything at all. # `1` would mean we only want to load the empty hulls or skeletons of our parameters # or results. Accordingly, we would add parameters or results to our trajectory # but they would not contain any data.