def main(): # We don't use an environment so we enable logging manually logging.basicConfig(level=logging.INFO) filename = os.path.join('hdf5','example_16.hdf5') traj = Trajectory(filename=filename, overwrite_file=True) # The result that will be manipulated traj.f_add_result('last_process_name', 'N/A', comment='Name of the last process that manipulated the trajectory') with MultiprocContext(trajectory=traj, wrap_mode='LOCK') as mc: # The multiprocessing context manager wraps the storage service of the trajectory # and passes the wrapped service to the trajectory. # Also restores the original storage service in the end. # Moreover, wee need to use the `MANAGER_LOCK` wrapping because the locks # are pickled and send to the pool for all function executions # Start a pool of processes manipulating the trajectory iterable = (traj for x in range(20)) pool = mp.Pool(processes=4) # Pass the trajectory and the function to the pool and execute it 20 times pool.map_async(manipulate_multiproc_safe, iterable) pool.close() # Wait for all processes to join pool.join() # Reload the data from disk and overwrite the existing result in RAM traj.results.f_load(load_data=3) # Print the name of the last process the trajectory was manipulated by print('The last process to manipulate the trajectory was: `%s`' % traj.last_process_name)
def make_plots(resultsPath): traj = Trajectory('tone-in-noise', add_time=False) traj.f_load(load_parameters=2, load_derived_parameters=0, load_results=1, load_other_data=0, filename=resultsPath) traj.v_auto_load = True with PdfPages( path.join(path.expanduser('~'), "pypet-output", 'synaptopathy.pdf')) as pdf: synaptopathy_effect(traj, pdf) with PdfPages( path.join(path.expanduser('~'), "pypet-output", 'periphery.pdf')) as pdf: periphery_effect(traj, pdf) with PdfPages( path.join(path.expanduser('~'), "pypet-output", 'weighting.pdf')) as pdf: weighting_effect(traj, pdf) with PdfPages( path.join(path.expanduser('~'), "pypet-output", 'brainstem.pdf')) as pdf: brainstem_effect(traj, pdf) return 0
def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are still the same newtraj = Trajectory(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj
def test_partially_delete_stuff(self): traj = Trajectory(name='TestDelete', filename=make_temp_dir('testpartiallydel.hdf5')) res = traj.f_add_result('mytest.test', a='b', c='d') traj.f_store() self.assertTrue('a' in res) traj.f_delete_item(res, delete_only=['a'], remove_from_item=True) self.assertTrue('c' in res) self.assertTrue('a' not in res) res['a'] = 'offf' self.assertTrue('a' in res) traj.f_load(load_results=3) self.assertTrue('a' not in res) self.assertTrue('c' in res) traj.f_delete_item(res, remove_from_trajectory=True) self.assertTrue('results' in traj) self.assertTrue(res not in traj)
def test_store_and_load_large_dictionary(self): traj = Trajectory(name='Testlargedict', filename=make_temp_dir('large_dict.hdf5')) large_dict = {} for irun in range(1025): large_dict['item_%d' % irun] = irun large_dict2 = {} for irun in range(33): large_dict2['item_%d' % irun] = irun traj.f_add_result('large_dict', large_dict, comment='Huge_dict!') traj.f_add_result('large_dict2', large_dict2, comment='Not so large dict!') traj.f_store() traj_name = traj.v_name traj2 = Trajectory(filename=make_temp_dir('large_dict.hdf5')) traj2.f_load(name=traj_name, load_data=2) self.compare_trajectories(traj, traj2)
def test_storage_service_errors(self): traj = Trajectory(filename=make_temp_dir('testnoservice.hdf5'), add_time=True) traj_name = traj.v_name # you cannot store stuff before the trajectory was stored once: with self.assertRaises(ValueError): traj.v_storage_service.store('FAKESERVICE', self, trajectory_name = traj.v_name) traj.f_store() with self.assertRaises(ValueError): traj.v_storage_service.store('FAKESERVICE', self, trajectory_name = 'test') with self.assertRaises(pex.NoSuchServiceError): traj.v_storage_service.store('FAKESERVICE', self, trajectory_name = traj.v_name) with self.assertRaises(ValueError): traj.f_load(name='test', index=1) with self.assertRaises(RuntimeError): traj.v_storage_service.store('LIST', [('LEAF',None,None,None,None)], trajectory_name = traj.v_name) with self.assertRaises(ValueError): traj.f_load(index=9999) with self.assertRaises(ValueError): traj.f_load(name='Non-Existising-Traj')
def main(): # We don't use an environment so we enable logging manually logging.basicConfig(level=logging.INFO) filename = os.path.join('hdf5','example_16.hdf5') traj = Trajectory(filename=filename, overwrite_file=True) # The result that will be manipulated traj.f_add_result('last_process_name', 'N/A', comment='Name of the last process that manipulated the trajectory') with MultiprocContext(trajectory=traj, wrap_mode='LOCK') as mc: # The multiprocessing context manager wraps the storage service of the trajectory # and passes the wrapped service to the trajectory. # Also restores the original storage service in the end. # Moreover, wee need to use the `MANAGER_LOCK` wrapping because the locks # are pickled and send to the pool for all function executions # Start a pool of processes manipulating the trajectory iterable = (traj for x in range(50)) pool = mp.Pool(processes=4) # Pass the trajectory and the function to the pool and execute it 20 times pool.map_async(manipulate_multiproc_safe, iterable) pool.close() # Wait for all processes to join pool.join() # Reload the data from disk and overwrite the existing result in RAM traj.results.f_load(load_data=3) # Print the name of the last process the trajectory was manipulated by print('The last process to manipulate the trajectory was: `%s`' % traj.last_process_name)
def test_df(self): filename = make_temp_dir('hdf5errors.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) traj.f_store() dadict = { 'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui'] } dadict2 = {'answer': [42]} traj.f_add_result( SharedResult, 'dfs.df', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict)) traj.f_add_result( SharedResult, 'dfs.df1', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict2)) traj.f_add_result(SharedResult, 'dfs.df3', SharedPandasFrame()) for irun in range(10): traj.df3.append(traj.df1.read()) dframe = traj.df3.read() self.assertTrue(len(dframe) == 10) what = traj.df.select(where='index == 2') self.assertTrue(len(what) == 1)
def test_iteration_failure(self): traj = Trajectory() traj.f_add_parameter_group('test.test3') traj.f_add_parameter_group('test2') traj.test2.f_add_link(traj.test3) with self.assertRaises(pex.NotUniqueNodeError): traj.test3
def test_storage_service_errors(self): traj = Trajectory(filename=make_temp_dir('testnoservice.hdf5')) traj_name = traj.v_name # you cannot store stuff before the trajectory was stored once: with self.assertRaises(ValueError): traj.v_storage_service.store('FAKESERVICE', self, trajectory_name = traj.v_name) traj.f_store() with self.assertRaises(ValueError): traj.v_storage_service.store('FAKESERVICE', self, trajectory_name = 'test') with self.assertRaises(pex.NoSuchServiceError): traj.v_storage_service.store('FAKESERVICE', self, trajectory_name = traj.v_name) with self.assertRaises(ValueError): traj.f_load(name='test', index=1) with self.assertRaises(RuntimeError): traj.v_storage_service.store('LIST', [('LEAF',None,None,None,None)], trajectory_name = traj.v_name) with self.assertRaises(ValueError): traj.f_load(index=9999) with self.assertRaises(ValueError): traj.f_load(name='Non-Existising-Traj')
def test_errors(self): filename = make_temp_dir("hdf5errors.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name npearray = np.ones((2, 10, 3), dtype=np.float) thevlarray = np.array([compat.tobytes("j"), 22.2, compat.tobytes("gutter")]) with self.assertRaises(TypeError): traj.f_add_result(SharedResult, "arrays.vlarray", SharedVLArray()).create_shared_data(obj=thevlarray) traj.f_store() traj.arrays.vlarray.create_shared_data(obj=thevlarray) traj.f_add_result(SharedResult, "arrays.array", SharedArray()).create_shared_data(data=npearray) traj.arrays.f_add_result(SharedResult, "super.carray", SharedCArray(), comment="carray").create_shared_data( shape=(10, 10), atom=pt.atom.FloatAtom() ) traj.arrays.f_add_result(SharedResult, "earray", SharedEArray()).create_shared_data("earray", obj=npearray) traj.f_store() with self.assertRaises(TypeError): traj.arrays.array.iter_rows() with StorageContextManager(traj) as cm: with self.assertRaises(RuntimeError): with StorageContextManager(traj) as cm2: pass self.assertTrue(traj.v_storage_service.is_open) with self.assertRaises(RuntimeError): StorageContextManager(traj).f_open_store() self.assertFalse(traj.v_storage_service.is_open)
def test_store_overly_long_comment(self): filename = make_temp_dir('remove_errored.hdf5') traj = Trajectory(name='traj', add_time=True, filename=filename) res=traj.f_add_result('iii', 42, 43, comment=7777 * '6') traj.f_store() traj.f_remove_child('results', recursive=True) traj.f_load_child('results', recursive=True) self.assertTrue(traj.iii.v_comment == 7777 * '6')
def test_links_according_to_run(self): traj = Trajectory() traj.f_add_parameter('test.hi', 44) traj.f_explore({'hi': [1,2,3]}) traj.f_add_parameter_group('test.test.test2') traj.f_add_parameter_group('test2') traj.test2.f_add_link('test', traj.test) traj.v_idx = 1
def test_link_of_link(self): traj = Trajectory() traj.f_add_parameter_group('test') traj.f_add_parameter_group('test2') traj.test.f_add_link('circle1' , traj.test2) traj.test2.f_add_link('circle2' , traj.test) traj.test.f_add_link('circle2' , traj.test.circle1.circle2) self.assertTrue(traj.test.circle2 is traj.test)
def test_max_depth_loading_and_storing(self): filename = make_temp_dir('newassignment.hdf5') traj = Trajectory(filename=filename, overwrite_file=True) traj.par.d1 = Parameter('d1.d2.d3.d4.d5', 55) traj.f_store(max_depth=4) traj = load_trajectory(index=-1, filename=filename) traj.f_load(load_data=2) self.assertTrue('d3' in traj) self.assertFalse('d4' in traj) traj = load_trajectory(index=-1, filename=filename, max_depth=3) self.assertTrue('d2' in traj) self.assertFalse('d3' in traj) traj.par.f_remove(recursive=True) traj.dpar.d1 = Parameter('d1.d2.d3.d4.d5', 123) traj.dpar.f_store_child('d1', recursive=True, max_depth=3) traj.dpar.f_remove_child('d1', recursive=True) self.assertTrue('d1' not in traj) traj.dpar.f_load_child('d1', recursive=True) self.assertTrue('d3' in traj) self.assertTrue('d4' not in traj) traj.dpar.f_remove_child('d1', recursive=True) self.assertTrue('d1' not in traj) traj.dpar.f_load_child('d1', recursive=True, max_depth=2) self.assertTrue('d2' in traj) self.assertTrue('d3' not in traj) traj.dpar.l1 = Parameter('l1.l2.l3.l4.l5', 123) traj.dpar.f_store(recursive=True, max_depth=0) self.assertFalse(traj.dpar.l1._stored) traj.dpar.f_store(recursive=True, max_depth=4) traj.dpar.f_remove() self.assertTrue('l1' not in traj) traj.dpar.f_load(recursive=True) self.assertTrue('l4' in traj) self.assertTrue('l5' not in traj) traj.dpar.f_remove() self.assertTrue('l1' not in traj) traj.dpar.f_load(recursive=True, max_depth=3) self.assertTrue('l3' in traj) self.assertTrue('l4' not in traj)
def main(): filename = os.path.join('hdf5', 'Clustered_Network.hdf5') # If we pass a filename to the trajectory a new HDF5StorageService will # be automatically created traj = Trajectory(filename=filename, dynamically_imported_classes=[BrianMonitorResult, BrianParameter]) # Let's create and fake environment to enable logging: env = Environment(traj, do_single_runs=False) # Load the trajectory, but onyl laod the skeleton of the results traj.f_load(index=-1, load_parameters=2, load_derived_parameters=2, load_results=1) # Find the result instances related to the fano factor fano_dict = traj.f_get_from_runs('mean_fano_factor', fast_access=False) # Load the data of the fano factor results ffs = fano_dict.values() traj.f_load_items(ffs) # Extract all values and R_ee values for each run ffs_values = [x.f_get() for x in ffs] Rees = traj.f_get('R_ee').f_get_range() # Plot average fano factor as a function of R_ee plt.plot(Rees, ffs_values) plt.xlabel('R_ee') plt.ylabel('Avg. Fano Factor') plt.show() # Finally disable logging and close all log-files env.disable_logging()
def test_backwards_compatibility(self): # Test only makes sense with python 2.7 or lower old_pypet_traj = Trajectory() module_path, init_file = os.path.split(pypet.__file__) filename= os.path.join(module_path, 'tests','testdata','pypet_v0_1b_6.hdf5') old_pypet_traj.f_load(index=-1, load_data=2, force=True, filename=filename) self.assertTrue(old_pypet_traj.v_version=='0.1b.6') self.assertTrue(old_pypet_traj.par.x==0) self.assertTrue(len(old_pypet_traj)==9) self.assertTrue(old_pypet_traj.res.runs.r_4.z==12) nexplored = len(old_pypet_traj._explored_parameters) self.assertGreater(nexplored, 0) for param in old_pypet_traj.f_get_explored_parameters(): self.assertTrue(old_pypet_traj.f_contains(param))
def test_no_run_information_loading(self): filename = make_temp_dir('testnoruninfo.hdf5') traj = Trajectory(name='TestDelete', filename=filename, add_time=True) length = 100000 traj.par.x = Parameter('', 42) traj.f_explore({'x': range(length)}) traj.f_store() traj = load_trajectory(index=-1, filename=filename, with_run_information=False) self.assertEqual(len(traj), length) self.assertEqual(len(traj._run_information), 1)
def test_storing_and_manipulating(self): filename = make_temp_dir("hdf5manipulation.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name thedata = np.zeros((1000, 1000)) res = traj.f_add_result(SharedResult, "shared") myarray = SharedArray("array", res, trajectory=traj, add_to_parent=True) mytable = SharedTable("t1", res, trajectory=traj, add_to_parent=True) mytable2 = SharedTable("t2", res, trajectory=traj, add_to_parent=True) mytable3 = SharedTable("t3", res, trajectory=traj, add_to_parent=True) traj.f_store(only_init=True) myarray.create_shared_data(data=thedata) mytable.create_shared_data(first_row={"hi": compat.tobytes("hi"), "huhu": np.ones(3)}) mytable2.create_shared_data(description={"ha": pt.StringCol(2, pos=0), "haha": pt.FloatCol(pos=1)}) mytable3.create_shared_data(description={"ha": pt.StringCol(2, pos=0), "haha": pt.FloatCol(pos=1)}) traj.f_store() newrow = {"ha": "hu", "haha": 4.0} with self.assertRaises(TypeError): row = traj.shared.t2.row with StorageContextManager(traj) as cm: row = traj.shared.t2.row for irun in range(11): for key, val in newrow.items(): row[key] = val row.append() traj.shared.t3.flush() data = myarray.read() arr = myarray.get_data_node() self.assertTrue(np.all(data == thedata)) with StorageContextManager(traj) as cm: myarray[2, 2] = 10 data = myarray.read() self.assertTrue(data[2, 2] == 10) self.assertTrue(data[2, 2] == 10) self.assertFalse(traj.v_storage_service.is_open) traj = load_trajectory(name=trajname, filename=filename) traj.f_load(load_data=2) traj.shared.t2.traj = traj traj.shared.t1.traj = traj traj.shared.array.traj = traj self.assertTrue(traj.shared.t2.nrows == 11, "%s != 11" % str(traj.shared.t2.nrows)) self.assertTrue(traj.shared.t2[0]["ha"] == compat.tobytes("hu"), traj.shared.t2[0]["ha"]) self.assertTrue(traj.shared.t2[1]["ha"] == compat.tobytes("hu"), traj.shared.t2[1]["ha"]) self.assertTrue("huhu" in traj.shared.t1.colnames) self.assertTrue(traj.shared.array[2, 2] == 10)
def setUp(self): self.filename = make_temp_dir('shared_table_test.hdf5') self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename) self.traj.v_standard_result = SharedResult self.traj.f_store(only_init=True) self.traj.f_add_result('shared_data') self.shared_array = SharedArray(name='array', parent=self.traj.shared_data, trajectory=self.traj, add_to_parent=True)
def test_loading_explored_parameters(self): filename = make_temp_dir('load_explored.hdf5') traj = Trajectory(filename=filename, overwrite_file=True, add_time=False) traj.par.x = Parameter('x', 42, comment='answer') traj.f_explore({'x':[1,2,3,4]}) traj.f_store() name = traj.v_name traj = Trajectory(filename=filename, add_time=False) traj.f_load() x = traj.f_get('x') self.assertIs(x, traj._explored_parameters['parameters.x'])
def test_shortenings_of_names(self): traj = Trajectory(filename=make_temp_dir('testshortening.hdf5')) traj.f_aconf('g', 444) self.assertTrue(isinstance(traj.f_get('g'), Parameter)) self.assertTrue(traj.conf.g == 444) traj.f_apar('g', 444) self.assertTrue(isinstance(traj.par.f_get('g'), Parameter)) self.assertTrue(traj.par.g == 444) traj.f_adpar('g', 445) self.assertTrue(isinstance(traj.derived_parameters.f_get('g'), Parameter)) self.assertTrue(traj.dpar.g == 445) traj.f_ares('g', 454) self.assertTrue(isinstance(traj.res.f_get('g'), Result)) self.assertTrue(traj.res.g == 454)
def print_traj_parameters_explored(traj_dir): # Load the trajectory from the hdf5 file # Only load parameters, results will be loaded at runtime (auto loading) #traj_dir = os.path.join('trajectories', '2019_03_21_22h48m29s_HCP_test') #if not os.path.isdir(traj_dir): # traj_dir = os.path.join('..', traj_dir) traj_filename = 'traj.hdf5' traj_fullpath = os.path.join(traj_dir, traj_filename) traj = Trajectory() traj.f_load(filename=traj_fullpath, index=0, load_parameters=2, load_results=0, load_derived_parameters=0, force=True) # Turn on auto loading traj.v_auto_load = True # Count number of runs runs_n = len(traj.f_get_run_names()) print('number of runs = {0}'.format(runs_n)) # Get list of explored parameters parameters_explored = [ str.split(par, '.').pop() for par in (traj.f_get_explored_parameters()) ] print(parameters_explored)
def test_throw_warning_if_old_kw_is_used(self): pass filename = make_temp_dir('hdfwarning.hdf5') with warnings.catch_warnings(record=True) as w: env = Environment(trajectory='test', filename=filename, dynamically_imported_classes=[], log_config=get_log_config()) with warnings.catch_warnings(record=True) as w: traj = Trajectory(dynamically_imported_classes=[]) traj = env.v_trajectory traj.f_store() with warnings.catch_warnings(record=True) as w: traj.f_load(dynamically_imported_classes=[]) env.f_disable_logging()
def test_errors(self): filename = make_temp_dir('hdf5errors.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) npearray = np.ones((2, 10, 3), dtype=np.float) thevlarray = np.array( [compat.tobytes('j'), 22.2, compat.tobytes('gutter')]) with self.assertRaises(TypeError): traj.f_add_result( SharedResult, 'arrays.vlarray', SharedVLArray()).create_shared_data(obj=thevlarray) traj.f_store() traj.arrays.vlarray.create_shared_data(obj=thevlarray) traj.f_add_result(SharedResult, 'arrays.array', SharedArray()).create_shared_data(data=npearray) traj.arrays.f_add_result(SharedResult, 'super.carray', SharedCArray(), comment='carray').create_shared_data( shape=(10, 10), atom=pt.atom.FloatAtom()) traj.arrays.f_add_result(SharedResult, 'earray', SharedEArray()).create_shared_data( 'earray', obj=npearray) traj.f_store() with self.assertRaises(TypeError): traj.arrays.array.iterrows() with StorageContextManager(traj): with self.assertRaises(RuntimeError): with StorageContextManager(traj): pass self.assertTrue(traj.v_storage_service.is_open) with self.assertRaises(RuntimeError): StorageContextManager(traj).open_store() self.assertFalse(traj.v_storage_service.is_open)
def test_link_removal(self): traj = Trajectory() traj.f_add_parameter_group('test') traj.f_add_parameter_group('test2') traj.test.f_add_link('circle1' , traj.test2) traj.test2.f_add_link('circle2' , traj.test) self.assertTrue('circle1' in traj) traj.circle1.circle2.f_remove_link('circle1') self.assertTrue('circle1' not in traj.circle2) with self.assertRaises(AttributeError): traj.test.circle1 with self.assertRaises(ValueError): traj.test.f_remove_link('circle1') traj.test2.f_remove_child('circle2') self.assertTrue('circle2' not in traj)
def test_df(self): filename = make_temp_dir('hdf5errors.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) traj.f_store() dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']} dadict2 = {'answer': [42]} traj.f_add_result(SharedResult, 'dfs.df', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict)) traj.f_add_result(SharedResult, 'dfs.df1', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict2)) traj.f_add_result(SharedResult, 'dfs.df3', SharedPandasFrame()) for irun in range(10): traj.df3.append(traj.df1.read()) dframe = traj.df3.read() self.assertTrue(len(dframe) == 10) what = traj.df.select(where='index == 2') self.assertTrue(len(what) == 1)
def test_df(self): filename = make_temp_dir("hdf5errors.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) traj.f_store() dadict = {"hi": [1, 2, 3, 4, 5], "shu": ["bi", "du", "da", "ha", "hui"]} dadict2 = {"answer": [42]} traj.f_add_result(SharedResult, "dfs.df", SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict)) traj.f_add_result(SharedResult, "dfs.df1", SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict2)) traj.f_add_result(SharedResult, "dfs.df3", SharedPandasFrame()) for irun in range(10): traj.df3.append(traj.df1.read()) dframe = traj.df3.read() self.assertTrue(len(dframe) == 10) what = traj.df.select(where="index == 2") self.assertTrue(len(what) == 1)
def test_all_arrays(self): filename = make_temp_dir("hdf5arrays.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name npearray = np.ones((2, 10, 3), dtype=np.float) thevlarray = np.array([compat.tobytes("j"), 22.2, compat.tobytes("gutter")]) traj.f_store(only_init=True) res = traj.f_add_result(SharedResult, "arrays") res["carray"] = SharedCArray() res["carray"].create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom()) res["earray"] = SharedEArray() res["earray"].create_shared_data(obj=npearray) res["vlarray"] = SharedVLArray() res["vlarray"].create_shared_data(obj=thevlarray) res["array"] = SharedArray() res["array"].create_shared_data(data=npearray) traj.f_store() traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) toappned = [44, compat.tobytes("k")] with StorageContextManager(traj) as cm: a1 = traj.arrays.array a1[0, 0, 0] = 4.0 a2 = traj.arrays.carray a2[0, 1] = 4 a4 = traj.arrays.vlarray a4.append(toappned) a3 = traj.arrays.earray a3.append(np.zeros((1, 10, 3))) # cm.f_flush_storage() traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) with StorageContextManager(traj) as cm: a1 = traj.arrays.array self.assertTrue(a1[0, 0, 0] == 4.0) a2 = traj.arrays.carray self.assertTrue(a2[0, 1] == 4) a3 = traj.arrays.earray self.assertTrue(a3.read().shape == (3, 10, 3)) a4 = traj.arrays.vlarray for idx, x in enumerate(a4): if idx == 0: self.assertTrue(np.all(x == np.array(thevlarray))) elif idx == 1: self.assertTrue(np.all(x == np.array(toappned))) else: raise RuntimeError()
def test_file_renaming(self): traj_name = 'test' traj = Trajectory('test', add_time=False) traj.f_add_parameter('x', 42) traj.f_explore({'x': [1,2,3]}) rename_string = '$traj_$set_$run' solution_1 = 'test_run_set_ALL_run_ALL' solution_2 = 'test_run_set_00000_run_00000000' renaming_1 = rename_log_file(rename_string, traj) self.assertEqual(renaming_1, solution_1) traj.v_idx = 0 renaming_2 = rename_log_file(rename_string, traj) self.assertEqual(renaming_2, solution_2)
def test_file_size_many_params(self): filename = make_temp_dir('filesize.hdf5') traj = Trajectory(filename=filename, overwrite_file=True, add_time=False) npars = 700 traj.f_store() for irun in range(npars): par = traj.f_add_parameter('test.test%d' % irun, 42+irun, comment='duh!') traj.f_store_item(par) size = os.path.getsize(filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 10.0, 'Size is %sMB > 10MB' % str(size_in_mb))
def test_link_of_link(self): traj = Trajectory() traj.f_add_parameter_group('test') traj.f_add_parameter_group('test2') traj.test.f_add_link('circle1', traj.test2) traj.test2.f_add_link('circle2', traj.test) traj.test.f_add_link('circle2', traj.test.circle1.circle2) self.assertTrue(traj.test.circle2 is traj.test)
def test_backwards_compatibility(self): # Test only makes sense with python 2.7 or lower old_pypet_traj = Trajectory() module_path, init_file = os.path.split(pypet.__file__) filename = os.path.join(module_path, 'tests', 'testdata', 'pypet_v0_1b_6.hdf5') old_pypet_traj.f_load(index=-1, load_data=2, force=True, filename=filename) self.assertTrue(old_pypet_traj.v_version == '0.1b.6') self.assertTrue(old_pypet_traj.par.x == 0) self.assertTrue(len(old_pypet_traj) == 9) self.assertTrue(old_pypet_traj.res.runs.r_4.z == 12) nexplored = len(old_pypet_traj._explored_parameters) self.assertGreater(nexplored, 0) for param in old_pypet_traj.f_get_explored_parameters(): self.assertTrue(old_pypet_traj.f_contains(param))
def test_link_removal(self): traj = Trajectory() traj.f_add_parameter_group('test') traj.f_add_parameter_group('test2') traj.test.f_add_link('circle1', traj.test2) traj.test2.f_add_link('circle2', traj.test) self.assertTrue('circle1' in traj) traj.circle1.circle2.f_remove_link('circle1') self.assertTrue('circle1' not in traj.circle2) with self.assertRaises(AttributeError): traj.test.circle1 with self.assertRaises(ValueError): traj.test.f_remove_link('circle1') traj.test2.f_remove_child('circle2') self.assertTrue('circle2' not in traj)
def test_compacting(self): filename = make_temp_dir('hdf5compacting.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name traj.v_storage_service.complevel = 7 first_row = {'ha': compat.tobytes('hi'), 'haha': np.zeros((3, 3))} traj.f_store(only_init=True) traj.f_add_result('My.Tree.Will.Be.Deleted', 42) traj.f_add_result('Mine.Too.HomeBoy', 42, comment='Don`t cry for me!') res = traj.f_add_result(SharedResult, 'myres') res['myres'] = SharedTable() res['myres'].create_shared_data(first_row=first_row) with StorageContextManager(traj): traj.myres for irun in range(10000): row = traj.myres.row for key in first_row: row[key] = first_row[key] row.append() traj.f_store() del traj traj = load_trajectory(name=trajname, filename=filename, load_all=2) with StorageContextManager(traj) as cm: tb = traj.myres.get_data_node() ptcompat.remove_rows(tb, 1000, 10000) cm.flush_store() self.assertTrue(traj.myres.nrows == 1001) traj.f_delete_item(traj.My, recursive=True) traj.f_delete_item(traj.Mine, recursive=True) size = os.path.getsize(filename) get_root_logger().info('Filesize is %s' % str(size)) name_wo_ext, ext = os.path.splitext(filename) backup_file_name = name_wo_ext + '_backup' + ext code = compact_hdf5_file(filename, keep_backup=True) if code != 0: raise RuntimeError('ptrepack fail') backup_size = os.path.getsize(backup_file_name) self.assertTrue(backup_size == size) new_size = os.path.getsize(filename) get_root_logger().info('New filesize is %s' % str(new_size)) self.assertTrue(new_size < size, "%s > %s" % (str(new_size), str(size)))
def test_compacting(self): filename = make_temp_dir("hdf5compacting.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name traj.v_storage_service.complevel = 7 first_row = {"ha": compat.tobytes("hi"), "haha": np.zeros((3, 3))} traj.f_store(only_init=True) res1 = traj.f_add_result("My.Tree.Will.Be.Deleted", 42) res2 = traj.f_add_result("Mine.Too.HomeBoy", 42, comment="Don`t cry for me!") res = traj.f_add_result(SharedResult, "myres") res["myres"] = SharedTable() res["myres"].create_shared_data(first_row=first_row) with StorageContextManager(traj): tab = traj.myres for irun in range(10000): row = traj.myres.row for key in first_row: row[key] = first_row[key] row.append() traj.f_store() del traj traj = load_trajectory(name=trajname, filename=filename, load_all=2) with StorageContextManager(traj) as cm: tb = traj.myres.get_data_node() ptcompat.remove_rows(tb, 1000, 10000) cm.f_flush_store() self.assertTrue(traj.myres.nrows == 1001) traj.f_delete_item(traj.My, recursive=True) traj.f_delete_item(traj.Mine, recursive=True) size = os.path.getsize(filename) get_root_logger().info("Filesize is %s" % str(size)) name_wo_ext, ext = os.path.splitext(filename) backup_file_name = name_wo_ext + "_backup" + ext code = compact_hdf5_file(filename, keep_backup=True) if code != 0: raise RuntimeError("ptrepack fail") backup_size = os.path.getsize(backup_file_name) self.assertTrue(backup_size == size) new_size = os.path.getsize(filename) get_root_logger().info("New filesize is %s" % str(new_size)) self.assertTrue(new_size < size, "%s > %s" % (str(new_size), str(size)))
def main(): # No environment here ;-) filename = os.path.join('experiments', 'example_20.hdf5') traj = Trajectory('onemax', filename=filename, overwrite_file=True) # ------- Add parameters ------- # traj.f_add_parameter('popsize', 100) traj.f_add_parameter('CXPB', 0.5) traj.f_add_parameter('MUTPB', 0.2) traj.f_add_parameter('NGEN', 20) traj.f_add_parameter('generation', 0) traj.f_add_parameter('ind_idx', 0) traj.f_add_parameter('ind_len', 50) traj.f_add_parameter('indpb', 0.005) traj.f_add_parameter('tournsize', 3) traj.f_add_parameter('seed', 42) traj.f_store(only_init=True) # ------- Create and register functions with DEAP ------- # creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() # Attribute generator toolbox.register("attr_bool", random.randint, 0, 1) # Structure initializers toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, traj.ind_len) toolbox.register("population", tools.initRepeat, list, toolbox.individual) # Operator registering toolbox.register("mate", tools.cxTwoPoint) toolbox.register("mutate", tools.mutFlipBit, indpb=traj.indpb) toolbox.register("select", tools.selTournament, tournsize=traj.tournsize) toolbox.register("evaluate", eval_wrapper) pool = multip.Pool(4) toolbox.register("map", pool.map) # We use the pool's map function! # ------- Initialize Population -------- # random.seed(traj.seed) pop = toolbox.population(n=traj.popsize) CXPB, MUTPB, NGEN = traj.CXPB, traj.MUTPB, traj.NGEN start_idx = 0 # We need to count executed runs print("Start of evolution") for g in range(traj.NGEN): print("-- Generation %i --" % g) # Determine individuals that need to be evaluated eval_pop = [ind for ind in pop if not ind.fitness.valid] # Add as many explored runs as individuals that need to be evaluated traj.f_expand( cartesian_product({ 'generation': [g], 'ind_idx': range(len(eval_pop)) })) # We need to make the storage service multiprocessing safe mc = MultiprocContext(traj, wrap_mode='QUEUE') mc.f_start() # Create a single iterable to be passed to our fitness function (wrapper). # `yields='copy'` is important, the pool's `map` function will # go over the whole iterator at once and store it in memory. # So for every run we need a copy of the trajectory. # Alternatively, you could use `yields='self'` and use the pool's `imap` function. zip_iterable = izip(traj.f_iter_runs(start_idx, yields='copy'), eval_pop) fitnesses = toolbox.map(eval_wrapper, zip_iterable) # fitnesses is just a list of tuples [(fitness,), ...] for idx, fitness in enumerate(fitnesses): # Update fitnesses eval_pop[idx].fitness.values = fitness # Finalize the multiproc wrapper mc.f_finalize() # Update start index start_idx += len(eval_pop) print(" Evaluated %i individuals" % len(eval_pop)) # Gather all the fitnesses in one list and print the stats fits = [ind.fitness.values[0] for ind in pop] length = len(pop) mean = sum(fits) / length sum2 = sum(x * x for x in fits) std = abs(sum2 / length - mean**2)**0.5 print(" Min %s" % min(fits)) print(" Max %s" % max(fits)) print(" Avg %s" % mean) print(" Std %s" % std) # ------- Create the next generation by crossover and mutation -------- # if g < traj.NGEN - 1: # not necessary for the last generation # Select the next generation individuals offspring = toolbox.select(pop, len(pop)) # Clone the selected individuals offspring = list(map(toolbox.clone, offspring)) # Apply crossover and mutation on the offspring for child1, child2 in zip(offspring[::2], offspring[1::2]): if random.random() < CXPB: toolbox.mate(child1, child2) del child1.fitness.values del child2.fitness.values for mutant in offspring: if random.random() < MUTPB: toolbox.mutate(mutant) del mutant.fitness.values # The population is entirely replaced by the offspring pop[:] = offspring # Stop the multiprocessing pool pool.close() pool.join() print("-- End of (successful) evolution --") best_ind = tools.selBest(pop, 1)[0] print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values)) traj.f_store() # And store all the rest of the data
__author__ = 'Robert Meyer' from pypet import Trajectory, Result, Parameter traj = Trajectory() # There are more ways to add data, # 1st the standard way: traj.f_add_parameter('x', 1, comment='I am the first dimension!') # 2nd by providing a new parameter/result instance, be aware that the data is added where # you specify it. There are no such things as shortcuts for parameter creation: traj.parameters.y = Parameter('y', 1, comment='I am the second dimension!') # 3rd as before, but if our new leaf has NO name it will be renamed accordingly: traj.parameters.t = Parameter('', 1, comment='Third dimension') # See: print('t=' + str(traj.t)) # This also works for adding groups on the fly and with the well known *dot* notation: traj.parameters.subgroup = Parameter('subgroup.subsubgroup.w', 2) # See print('w='+str(traj.par.subgroup.subsubgroup.w)) # Finally, there's one more thing. Using this notation we can also add links. # Simply use the `=` assignment with objects that already exist in your trajectory: traj.mylink = traj.f_get('x') # now `mylink` links to parameter `x`, also fast access works: print('Linking to x gives: ' + str(traj.mylink))
def test_conversions(self): filename = make_temp_dir('hdf5manipulation.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name traj.v_standard_result = SharedResult traj.f_store(only_init=True) traj.f_add_result('shared_data') thedata = np.zeros((1000, 1000)) myarray = SharedArray('array', traj.shared_data, trajectory=traj) traj.shared_data['array'] = myarray mytable = SharedTable('t1', traj.shared_data, trajectory=traj) traj.shared_data['t1'] = mytable dadict = { 'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui'] } dadict2 = {'answer': [42]} res = traj.f_add_result('shared.dfs') res['df'] = SharedPandasFrame() res['df'].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj) frame = SharedPandasFrame('df1', traj.f_get('shared.dfs'), trajectory=traj, add_to_parent=True) frame.create_shared_data(data=pd.DataFrame(dadict2), ) res['df1'] = frame traj.f_add_result('mylist', [1, 2, 3]) traj.f_add_result('my.mytuple', k=(1, 2, 3), wa=42) traj.f_add_result('my.myarray', np.zeros((50, 50))) traj.f_add_result('my.myframe', data=pd.DataFrame(dadict2)) traj.f_add_result('my.mytable', ObjectTable(data=dadict2)) myarray.create_shared_data(data=thedata) mytable.create_shared_data(first_row={ 'hi': compat.tobytes('hi'), 'huhu': np.ones(3) }) traj.f_store() data = myarray.read() myarray.get_data_node() self.assertTrue(np.all(data == thedata)) with StorageContextManager(traj): myarray[2, 2] = 10 data = myarray.read() self.assertTrue(data[2, 2] == 10) self.assertTrue(data[2, 2] == 10) self.assertFalse(traj.v_storage_service.is_open) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) make_ordinary_result(traj.shared_data, 'array', trajectory=traj) array = traj.shared_data.array self.assertTrue(isinstance(array, np.ndarray)) thedata[2, 2] = 10 self.assertTrue(np.all(array == thedata)) make_ordinary_result( traj.shared_data, 't1', trajectory=traj, ) t1 = traj.shared_data.t1 self.assertTrue(isinstance(t1, ObjectTable)) self.assertTrue(np.all(t1['huhu'][0] == np.ones(3))) dfs = traj.shared.dfs make_ordinary_result(traj.shared.dfs, 'df', trajectory=traj) theframe = dfs.f_get('df') self.assertTrue(isinstance(dfs, Result)) self.assertTrue(isinstance(theframe, pd.DataFrame)) self.assertTrue(theframe['hi'][0] == 1) listres = traj.f_get('mylist') listres = make_shared_result(listres, 0, trajectory=traj) with StorageContextManager(traj): self.assertTrue(listres[0][2] == 3) listres[0][0] = 4 self.assertTrue(listres[0][0] == 4) listres = make_ordinary_result(listres, 0, trajectory=traj) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) mylist = traj.mylist self.assertTrue(isinstance(listres, Result)) self.assertTrue(mylist[0] == 4) self.assertTrue(isinstance(mylist, list)) mytuple = traj.mytuple with self.assertRaises(AttributeError): mytuple = make_shared_result(mytuple, 'mylist', traj, new_class=SharedArray) mytuple = make_shared_result(mytuple, 'k', traj, new_class=SharedArray) self.assertTrue(mytuple.k[1] == 2) mytuple = make_ordinary_result(mytuple, 'k', trajectory=traj) self.assertTrue(isinstance(mytuple.k, tuple)) self.assertTrue(mytuple.k[2] == 3) myframe = traj.myframe myframe = make_shared_result(myframe, 'data', traj) theframe = myframe.data.read() self.assertTrue(theframe['answer'][0] == 42) myframe = make_ordinary_result(myframe, 'data', trajectory=traj) traj.f_load_item(myframe) self.assertTrue(myframe.data['answer'][0] == 42) mytable = traj.f_get('mytable') mytable = make_shared_result(mytable, 0, traj) self.assertTrue(isinstance(mytable[0], SharedTable)) rows = mytable.mytable.read() self.assertTrue(rows[0][0] == 42) mytable = make_ordinary_result(mytable, 0, trajectory=traj) self.assertTrue(isinstance(mytable, Result)) self.assertTrue(mytable[0]['answer'][0] == 42)
def test_conversions(self): filename = make_temp_dir("hdf5manipulation.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name traj.v_standard_result = SharedResult traj.f_store(only_init=True) traj.f_add_result("shared_data") thedata = np.zeros((1000, 1000)) myarray = SharedArray("array", traj.shared_data, trajectory=traj) traj.shared_data["array"] = myarray mytable = SharedTable("t1", traj.shared_data, trajectory=traj) traj.shared_data["t1"] = mytable # mytable2 = SharedTableResult('h.t2', trajectory=traj) # mytable3 = SharedTableResult('jjj.t3', trajectory=traj) dadict = {"hi": [1, 2, 3, 4, 5], "shu": ["bi", "du", "da", "ha", "hui"]} dadict2 = {"answer": [42]} res = traj.f_add_result("shared.dfs") res["df"] = SharedPandasFrame() res["df"].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj) frame = SharedPandasFrame("df1", traj.f_get("shared.dfs"), trajectory=traj) frame.create_shared_data(data=pd.DataFrame(dadict2)) res["df1"] = frame traj.f_add_result("mylist", [1, 2, 3]) traj.f_add_result("my.mytuple", k=(1, 2, 3), wa=42) traj.f_add_result("my.myarray", np.zeros((50, 50))) traj.f_add_result("my.myframe", data=pd.DataFrame(dadict2)) traj.f_add_result("my.mytable", ObjectTable(data=dadict2)) myarray.create_shared_data(data=thedata) mytable.create_shared_data(first_row={"hi": compat.tobytes("hi"), "huhu": np.ones(3)}) traj.f_store() data = myarray.read() arr = myarray.get_data_node() self.assertTrue(np.all(data == thedata)) with StorageContextManager(traj) as cm: myarray[2, 2] = 10 data = myarray.read() self.assertTrue(data[2, 2] == 10) self.assertTrue(data[2, 2] == 10) self.assertFalse(traj.v_storage_service.is_open) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) make_ordinary_result(traj.shared_data, "array", trajectory=traj) array = traj.shared_data.array self.assertTrue(isinstance(array, np.ndarray)) thedata[2, 2] = 10 self.assertTrue(np.all(array == thedata)) make_ordinary_result(traj.shared_data, "t1", trajectory=traj) t1 = traj.shared_data.t1 self.assertTrue(isinstance(t1, ObjectTable)) # self.assertTrue(np.all(t1["huhu"][0] == np.ones(3))) dfs = traj.shared.dfs make_ordinary_result(traj.shared.dfs, "df", trajectory=traj) theframe = dfs.f_get("df") self.assertTrue(isinstance(dfs, Result)) self.assertTrue(isinstance(theframe, pd.DataFrame)) self.assertTrue(theframe["hi"][0] == 1) listres = traj.f_get("mylist") listres = make_shared_result(listres, 0, trajectory=traj) with StorageContextManager(traj) as cm: self.assertTrue(listres[0][2] == 3) listres[0][0] = 4 self.assertTrue(listres[0][0] == 4) listres = make_ordinary_result(listres, 0, trajectory=traj) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) mylist = traj.mylist self.assertTrue(isinstance(listres, Result)) self.assertTrue(mylist[0] == 4) self.assertTrue(isinstance(mylist, list)) mytuple = traj.mytuple with self.assertRaises(AttributeError): mytuple = make_shared_result(mytuple, "mylist", traj, new_class=SharedArray) mytuple = make_shared_result(mytuple, "k", traj, new_class=SharedArray) self.assertTrue(mytuple.k[1] == 2) mytuple = make_ordinary_result(mytuple, "k", trajectory=traj) self.assertTrue(isinstance(mytuple.k, tuple)) self.assertTrue(mytuple.k[2] == 3) myframe = traj.myframe myframe = make_shared_result(myframe, "data", traj) theframe = myframe.data.read() self.assertTrue(theframe["answer"][0] == 42) myframe = make_ordinary_result(myframe, "data", trajectory=traj) traj.f_load_item(myframe) self.assertTrue(myframe.data["answer"][0] == 42) mytable = traj.f_get("mytable") mytable = make_shared_result(mytable, 0, traj) self.assertTrue(isinstance(mytable[0], SharedTable)) rows = mytable.mytable.read() self.assertTrue(rows[0][0] == 42) mytable = make_ordinary_result(mytable, 0, trajectory=traj) self.assertTrue(isinstance(mytable, Result)) self.assertTrue(mytable[0]["answer"][0] == 42)
__author__ = 'Robert Meyer' from pypet import Trajectory, Result, Parameter traj = Trajectory() # There are more ways to add data, # 1st the standard way: traj.f_add_parameter('x', 1, comment='I am the first dimension!') # 2nd by providing a new parameter/result instance, be aware that the data is added where # you specify it. There are no such things as shortcuts for parameter creation: traj.parameters.y = Parameter('y', 1, comment='I am the second dimension!') # 3rd as before, but if our new leaf has NO name it will be renamed accordingly: traj.parameters.t = Parameter('', 1, comment='Third dimension') # See: print('t=' + str(traj.t)) # What happens if our new parameter's name does not match the name passed to the constructor? traj.parameters.subgroup = Parameter('v', 2, comment='Fourth dimension') # Well, since 'subgroup' != 'v', 'subgroup' becomes just another group node created on the fly print(traj.parameters.subgroup) # This even works for already existing groups and with the well known *dot* notation: traj.parameters = Parameter('subgroup.subsubgroup.w', 2) # See print('w='+str(traj.par.subgroup.subsubgroup.w)) # There's a lazy version which does not require a constructor. # This can be turned on via traj.v_lazy_adding = True
def main(): filename = os.path.join('hdf5', 'example_05.hdf5') env = Environment(trajectory='Example_05_Euler_Integration', filename=filename, file_title='Example_05_Euler_Integration', comment='Go for Euler!') traj = env.v_trajectory trajectory_name = traj.v_name # 1st a) phase parameter addition add_parameters(traj) # 1st b) phase preparation # We will add the differential equation (well, its source code only) as a derived parameter traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz, comment='Source code of our equation!') # We want to explore some initial conditions traj.f_explore({'initial_conditions' : [ np.array([0.01,0.01,0.01]), np.array([2.02,0.02,0.02]), np.array([42.0,4.2,0.42]) ]}) # 3 different conditions are enough for an illustrative example # 2nd phase let's run the experiment # We pass `euler_scheme` as our top-level simulation function and # the Lorenz equation 'diff_lorenz' as an additional argument env.f_run(euler_scheme, diff_lorenz) # We don't have a 3rd phase of post-processing here # 4th phase analysis. # I would recommend to do post-processing completely independent from the simulation, # but for simplicity let's do it here. # Let's assume that we start all over again and load the entire trajectory new. # Yet, there is an error within this approach, do you spot it? del traj traj = Trajectory(filename=filename) # We will only fully load parameters and derived parameters. # Results will be loaded manually later on. try: # However, this will fail because our trajectory does not know how to # build the FunctionParameter. You have seen this coming, right? traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2, load_results=1) except ImportError as e: print('That did\'nt work, I am sorry: %s ' % str(e)) # Ok, let's try again but this time with adding our parameter to the imports traj = Trajectory(filename=filename, dynamically_imported_classes=FunctionParameter) # Now it works: traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2, load_results=1) #For the fun of it, let's print the source code print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq) # Let's get the exploration array: initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range() # Now let's plot our simulated equations for the different initial conditions: # We will iterate through the run names for idx, run_name in enumerate(traj.f_get_run_names()): #Get the result of run idx from the trajectory euler_result = traj.results.f_get(run_name).euler_evolution # Now we manually need to load the result. Actually the results are not so large and we # could load them all at once. But for demonstration we do as if they were huge: traj.f_load_item(euler_result) euler_data = euler_result.data #Plot fancy 3d plot fig = plt.figure(idx) ax = fig.gca(projection='3d') x = euler_data[:,0] y = euler_data[:,1] z = euler_data[:,2] ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx])) plt.legend() plt.show() # Now we free the data again (because we assume its huuuuuuge): del euler_data euler_result.f_empty() # You have to click through the images to stop the example_05 module! # Finally disable logging and close all log-files env.f_disable_logging()
def test_all_arrays(self): filename = make_temp_dir('hdf5arrays.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name npearray = np.ones((2, 10, 3), dtype=np.float) thevlarray = np.array( [compat.tobytes('j'), 22.2, compat.tobytes('gutter')]) traj.f_store(only_init=True) res = traj.f_add_result(SharedResult, 'arrays') res['carray'] = SharedCArray() res['carray'].create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom()) res['earray'] = SharedEArray() res['earray'].create_shared_data(obj=npearray) res['vlarray'] = SharedVLArray() res['vlarray'].create_shared_data(obj=thevlarray) res['array'] = SharedArray() res['array'].create_shared_data(data=npearray) traj.f_store() traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) toappned = [44, compat.tobytes('k')] with StorageContextManager(traj): a1 = traj.arrays.array a1[0, 0, 0] = 4.0 a2 = traj.arrays.carray a2[0, 1] = 4 a4 = traj.arrays.vlarray a4.append(toappned) a3 = traj.arrays.earray a3.append(np.zeros((1, 10, 3))) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) with StorageContextManager(traj): a1 = traj.arrays.array self.assertTrue(a1[0, 0, 0] == 4.0) a2 = traj.arrays.carray self.assertTrue(a2[0, 1] == 4) a3 = traj.arrays.earray self.assertTrue(a3.read().shape == (3, 10, 3)) a4 = traj.arrays.vlarray for idx, x in enumerate(a4): if idx == 0: self.assertTrue(np.all(x == np.array(thevlarray))) elif idx == 1: self.assertTrue(np.all(x == np.array(toappned))) else: raise RuntimeError()
class SharedArrayTest(TrajectoryComparator): tags = 'unittest', 'trajectory', 'shared', 'hdf5', 'array', 'mehmet' def setUp(self): self.filename = make_temp_dir('shared_table_test.hdf5') self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename) self.traj.v_standard_result = SharedResult self.traj.f_store(only_init=True) self.traj.f_add_result('shared_data') self.shared_array = SharedArray(name='array', parent=self.traj.shared_data, trajectory=self.traj, add_to_parent=True) def test_array_read(self): the_reading_array = np.ones((100, 100)) * 4 first_reading_array = self.traj.results.shared_data.array self.assertTrue(first_reading_array is self.shared_array) first_reading_array.create_shared_data(obj=the_reading_array) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_reading_array = traj2.shared_data.array.read() self.assertTrue( np.all(the_reading_array == second_reading_array), '%s != %s' % (str(the_reading_array), str(second_reading_array))) def test_array_getitem(self): the_getitem_array = np.array(range(100)) first_getitem_array = self.traj.results.shared_data.array first_getitem_array.create_shared_data(obj=the_getitem_array) for k in range(len(the_getitem_array)): self.assertEqual(the_getitem_array[k], first_getitem_array[k]) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) for j in range(len(the_getitem_array)): self.assertEqual(the_getitem_array[j], traj2.results.shared_data.array[j]) def test_array_getenum(self): the_getenum_array = np.array(range(100)) first_getenum_array = self.traj.results.shared_data.array first_getenum_array.create_shared_data(obj=the_getenum_array) with self.assertRaises(TypeError): first_getenum_array.get_enum() self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_enum_array = traj2.results.shared_data.array with self.assertRaises(TypeError): second_enum_array.get_enum() def test_array_iterrows(self): the_iterrows_array = np.random.randint(0, 100, (100, 100)) first_iterrows_array = self.traj.results.shared_data.array first_iterrows_array.create_shared_data(obj=the_iterrows_array) with StorageContextManager(self.traj): for idx, row in enumerate(first_iterrows_array.iterrows()): self.assertTrue(np.all(row == the_iterrows_array[idx, :])) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_iterrows_array = traj2.results.shared_data.array with StorageContextManager(traj2): for idx, row in enumerate(second_iterrows_array.iterrows()): self.assertTrue(np.all(row == the_iterrows_array[idx, :])) def test_array_setitem(self): the_setitem_array = np.zeros((50, 50)) first_setitem_array = self.traj.results.shared_data.array first_setitem_array.create_shared_data(obj=the_setitem_array) first_setitem_array[2, 2] = 10 self.assertEqual(first_setitem_array[2, 2], 10) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_setitem_array = traj2.results.shared_data.array self.assertEqual(second_setitem_array[2, 2], 10) second_setitem_array[3, 3] = 17 self.assertEqual(second_setitem_array[3, 3], 17) def test_array_iter(self): the_iterrows_array = np.random.randint(0, 100, (100, 100)) first_iterrows_array = self.traj.results.shared_data.array first_iterrows_array.create_shared_data(obj=the_iterrows_array) with StorageContextManager(self.traj): for idx, row in enumerate(first_iterrows_array): self.assertTrue(np.all(row == the_iterrows_array[idx, :])) self.assertTrue( np.all(the_iterrows_array == first_iterrows_array.read())) for idx, row in enumerate(the_iterrows_array): self.assertTrue(np.all(row == the_iterrows_array[idx, :])) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_iterrows_array = traj2.results.shared_data.array with StorageContextManager(traj2): for idx, row in enumerate(second_iterrows_array): self.assertTrue(np.all(row == the_iterrows_array[idx, :])) self.assertTrue( np.all(the_iterrows_array == second_iterrows_array.read())) for idx, row in enumerate(second_iterrows_array): self.assertTrue(np.all(row == the_iterrows_array[idx, :])) def test_array_len(self): the_len_array = np.ones((100, 100)) first_len_array = self.traj.results.shared_data.array self.assertTrue(first_len_array is self.shared_array) first_len_array.create_shared_data(obj=the_len_array) self.assertEqual(len(first_len_array), 100) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_len_array = traj2.results.shared_data.array self.assertEqual(len(second_len_array), 100)
class SharedTableTest(TrajectoryComparator): tags = 'unittest', 'trajectory', 'shared', 'hdf5', 'table', 'mehmet' def setUp(self): self.filename = make_temp_dir('shared_table_test.hdf5') self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename) self.traj.v_standard_result = SharedResult self.traj.f_store(only_init=True) self.traj.f_add_result('shared_data') self.shared_table = SharedTable(name='table', parent=self.traj.shared_data, trajectory=self.traj, add_to_parent=True) def test_table_read(self): the_reading_table = self.traj.results.shared_data.table self.assertTrue(the_reading_table is self.shared_table) the_reading_table.create_shared_data(description=MyTable) with StorageContextManager(self.traj): row = the_reading_table.row for i in range(10): row['id'] = i row['name'] = 'mehmet %d' % i row['surname'] = 'Timur' row['weight'] = 65.5 + i * 1.5 row.append() the_reading_table.flush() for idx, row in enumerate(the_reading_table.iterrows()): self.assertEqual(row['id'], idx) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_reading_table = traj2.results.shared_data.table self.assertTrue( np.all(the_reading_table.read() == second_reading_table.read())) second_reading_table.append([(21, 'aaa', 'bbb', 100)]) self.assertTrue( np.all(the_reading_table.read() == second_reading_table.read())) traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) third_reading_table = traj3.results.shared_data.table self.assertTrue( np.all(the_reading_table.read() == third_reading_table.read())) def test_table_append(self): the_append_table = self.traj.results.shared_data.table self.assertTrue(the_append_table is self.shared_table) the_append_table.create_shared_data(description=MyTable) with StorageContextManager(self.traj): row = the_append_table.row for i in range(15): row['id'] = i * 2 row['name'] = 'name %d' % i row['surname'] = '%d surname' % i row['weight'] = (i * 0.5 + 50.0) row.append() the_append_table.flush() for idx, row in enumerate(the_append_table.iterrows()): self.assertEqual(row['id'], idx * 2) self.assertEqual(row['name'], compat.tobytes('name %d' % idx)) self.assertEqual(row['surname'], compat.tobytes('%d surname' % idx)) self.assertEqual(row['weight'], idx * 0.5 + 50.0) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_append_table = traj2.results.shared_data.table with StorageContextManager(traj2): for idx, row in enumerate(second_append_table.iterrows()): self.assertEqual(row['id'], idx * 2) self.assertEqual(row['name'], compat.tobytes('name %d' % idx)) self.assertEqual(row['surname'], compat.tobytes('%d surname' % idx)) self.assertEqual(row['weight'], idx * 0.5 + 50.0) second_append_table.append([(30, 'mehmet', 'timur', 65.5)]) self.assertEqual(second_append_table.read(field='id')[-1], 30) self.assertEqual( second_append_table.read(field='name')[-1], compat.tobytes('mehmet')) self.assertEqual( second_append_table.read(field='surname')[-1], compat.tobytes('timur')) self.assertEqual( second_append_table.read(field='weight')[-1], 65.5) traj2.f_store() traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) third_append_table = traj3.results.shared_data.table self.assertEqual((third_append_table.read(field='id')[-1]), 30) self.assertEqual((third_append_table.read(field='name')[-1]), compat.tobytes('mehmet')) self.assertEqual((third_append_table.read(field='surname')[-1]), compat.tobytes('timur')) self.assertEqual((third_append_table.read(field='weight')[-1]), 65.5) third_append_table.append([(33, 'Harrison', 'Ford', 95.5)]) self.assertEqual((third_append_table.read(field='id')[-1]), 33) self.assertEqual((third_append_table.read(field='name')[-1]), compat.tobytes('Harrison')) self.assertEqual((third_append_table.read(field='surname')[-1]), compat.tobytes('Ford')) self.assertEqual((third_append_table.read(field='weight')[-1]), 95.5) def test_table_iterrows(self): the_iterrows_table = self.traj.results.shared_data.table self.assertTrue(the_iterrows_table is self.shared_table) the_iterrows_table.create_shared_data(description=MyTable) with StorageContextManager(self.traj): row = the_iterrows_table.row for i in range(10): row['id'] = i row['name'] = 'mehmet %d' % i row['surname'] = 'Timur' row['weight'] = 65.5 + i * 1.5 row.append() the_iterrows_table.flush() for idx, row in enumerate(the_iterrows_table.iterrows()): self.assertEqual(row['id'], idx) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_iterrows_table = traj2.results.shared_data.table with StorageContextManager(traj2): for idx, row in enumerate(second_iterrows_table.iterrows()): self.assertEqual(row['id'], idx) def test_table_col(self): the_col_table = self.traj.results.shared_data.table self.assertTrue(the_col_table is self.shared_table) the_col_table.create_shared_data(description=MyTable) with StorageContextManager(self.traj): row = the_col_table.row for i in range(10): row['id'] = i row['name'] = 'mehmet %d' % i row['surname'] = 'Timur' row['weight'] = 65.5 + i * 1.5 row.append() the_col_table.flush() for idx, row in enumerate(the_col_table.iterrows()): self.assertEqual(row['id'], idx) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_col_table = traj2.results.shared_data.table with StorageContextManager(traj2): for idx, row in enumerate(second_col_table.iterrows()): self.assertEqual(row['id'], idx) self.assertTrue( np.all( second_col_table.read( field='id') == second_col_table.col('id'))) self.assertTrue( np.all( second_col_table.read( field='name') == second_col_table.col('name'))) self.assertTrue( np.all( second_col_table.read( field='surname') == second_col_table.col('surname'))) self.assertTrue( np.all( second_col_table.read( field='weight') == second_col_table.col('weight'))) # def test_table_itersequence(self): # pass # # def test_table_itersorted(self): # pass # # def test_table_read_coordinates(self): # pass # # def test_table_read_sorted(self): # pass def test_table_getitem(self): the_getitem_table = self.traj.results.shared_data.table self.assertTrue(the_getitem_table is self.shared_table) the_getitem_table.create_shared_data(description=MyTable) with StorageContextManager(self.traj): row = the_getitem_table.row for i in range(10): row['id'] = i row['name'] = 'mehmet %d' % i row['surname'] = 'Timur' row['weight'] = 65.5 + i * 1.5 row.append() the_getitem_table.flush() for idx, row in enumerate(the_getitem_table.iterrows()): self.assertEqual(row['id'], idx) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_getitem_table = traj2.results.shared_data.table with StorageContextManager(traj2): for idx, row in enumerate(second_getitem_table.iterrows()): self.assertTrue( np.all(second_getitem_table.read()[idx] == second_getitem_table[idx])) second_getitem_table.append([(30, 'mehmet nevvaf', 'timur', 65.5)]) for idx, row in enumerate(second_getitem_table.iterrows(-1)): self.assertEqual(row['id'], 30) self.assertEqual(row['name'], compat.tobytes('mehmet nevvaf')) self.assertEqual(row['surname'], compat.tobytes('timur')) self.assertEqual(row['weight'], 65.5) traj2.f_store() traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) third_getitem_table = traj3.results.shared_data.table with StorageContextManager(traj3): for idx, row in enumerate(third_getitem_table.iterrows()): self.assertTrue( np.all(third_getitem_table.read()[idx] == third_getitem_table[idx])) # def test_table_iter(self): # pass # # def test_table_modify_column(self): # pass # # def test_table_modify_columns(self): # pass # # def test_table_modify_coordinates(self): # pass # # def test_table_modify_rows(self): # pass # # def test_table_remove_rows(self): # pass # # def test_table_remove_row(self): # pass def test_table_setitem(self): the_setitem_table = self.traj.results.shared_data.table self.assertTrue(the_setitem_table is self.shared_table) the_setitem_table.create_shared_data(description=MyTable) with StorageContextManager(self.traj): row = the_setitem_table.row for i in range(10): row['id'] = i row['name'] = 'mehmet %d' % i row['surname'] = 'Timur' row['weight'] = 65.5 + i * 1.5 row.append() the_setitem_table.flush() for idx, row in enumerate(the_setitem_table.iterrows()): self.assertEqual(row['id'], idx) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_setitem_table = traj2.results.shared_data.table second_setitem_table[0] = [(100, 'Mehmet Nevvaf', 'TIMUR', 75.5)] self.assertEqual(second_setitem_table.read(field='id')[0], 100) self.assertEqual( second_setitem_table.read(field='name')[0], compat.tobytes('Mehmet Nevvaf')) self.assertEqual( second_setitem_table.read(field='surname')[0], compat.tobytes('TIMUR')) self.assertEqual(second_setitem_table.read(field='weight')[0], 75.5) traj2.f_store() traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) third_setitem_table = traj3.results.shared_data.table self.assertEqual(third_setitem_table.read(field='id')[0], 100) self.assertEqual( third_setitem_table.read(field='name')[0], compat.tobytes('Mehmet Nevvaf')) self.assertEqual( third_setitem_table.read(field='surname')[0], compat.tobytes('TIMUR')) self.assertEqual(third_setitem_table.read(field='weight')[0], 75.5) # def test_table_get_where_list(self): # pass # # def test_table_read_where(self): # pass def test_table_where(self): the_where_table = self.traj.results.shared_data.table self.assertTrue(the_where_table is self.shared_table) the_where_table.create_shared_data(description=MyTable) with StorageContextManager(self.traj): row = the_where_table.row for i in range(10): row['id'] = i row['name'] = 'mehmet %d' % i row['surname'] = 'Timur' row['weight'] = 65.5 + i row.append() the_where_table.flush() for idx, row in enumerate(the_where_table.iterrows()): self.assertEqual(row['id'], idx) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_where_table = traj2.results.shared_data.table with StorageContextManager(traj2): result = second_where_table.where( '(id == 2)&(name == b"mehmet 2")&(surname ==b"Timur")&(weight == 67.5)' ) there = False for row in result: there = True self.assertTrue(there) # def test_table_append_where(self): # pass # # def test_table_will_query_use_indexing(self): # pass # # def test_table_copy(self): # pass # # def test_table_flush_rows_to_index(self): # pass # # def test_table_get_enum(self): # pass # # def test_table_reindex(self): # pass # # def test_table_reindex_dirty(self): # pass # # def test_table_remove_index(self): # pass # # def test_table_create_index(self): # pass # # def test_table_create_cindex(self): # pass # # def test_table_colindexes(self): # pass # # def test_table_cols(self): # pass # # def test_table_row(self): # pass def test_table_flush(self): the_flush_table = self.traj.results.shared_data.table self.assertTrue(the_flush_table is self.shared_table) the_flush_table.create_shared_data(description=MyTable) with StorageContextManager(self.traj): row = the_flush_table.row for i in range(10): row['id'] = i row['name'] = 'mehmet %d' % i row['surname'] = 'Timur' row['weight'] = 65.5 + i row.append() the_flush_table.flush() for idx, row in enumerate(the_flush_table.iterrows()): self.assertEqual(row['id'], idx) self.assertEqual(row['name'], compat.tobytes('mehmet %d' % idx)) self.assertEqual(row['surname'], compat.tobytes('Timur')) self.assertEqual(row['weight'], 65.5 + idx) self.traj.f_store() traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult) second_flush_table = traj2.results.shared_data.table with StorageContextManager(traj2): for idx, row in enumerate(second_flush_table.iterrows()): self.assertEqual(row['id'], idx) self.assertEqual(row['name'], compat.tobytes('mehmet %d' % idx)) self.assertEqual(row['surname'], compat.tobytes('Timur')) self.assertEqual(row['weight'], 65.5 + idx) row = second_flush_table.row for i in range(10, 11): row['id'] = i row['name'] = 'mehmet %d' % i row['surname'] = 'Timur' row['weight'] = 65.5 + i row.append() second_flush_table.flush() for idx, row in enumerate(second_flush_table.iterrows()): self.assertEqual(row['id'], idx) self.assertEqual(row['name'], compat.tobytes('mehmet %d' % idx)) self.assertEqual(row['surname'], compat.tobytes('Timur')) self.assertEqual(row['weight'], 65.5 + idx)
import pandas as pd from pypet import Trajectory import matplotlib.pyplot as plt plt.style.use("seaborn") # path SIM_NAME = "covariate_binary" SIM_PATH = "/home/simon/Documents/NNVI/simulations/" + SIM_NAME RES_FILE = SIM_PATH + "/results/" + SIM_NAME + ".hdf5" # load pypet trajectory traj = Trajectory(SIM_NAME, add_time=False) traj.f_load( filename=RES_FILE, load_data=2, load_results=2, load_derived_parameters=2, load_other_data=2, load_parameters=2, force=True ) traj.v_auto_load = True # load results seed = [r.f_get("seed").seed for r in traj.res.runs] p_bin = [r.f_get("p_bin").p_bin for r in traj.res.runs] N = [r.f_get("N").N for r in traj.res.runs] mse = [r.f_get("mse").mse for r in traj.res.runs] elbo = [r.f_get("elbo").elbo for r in traj.res.runs] auroc = [r.f_get("auroc").auroc for r in traj.res.runs]
def test_storing_and_manipulating(self): filename = make_temp_dir('hdf5manipulation.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name thedata = np.zeros((1000, 1000)) res = traj.f_add_result(SharedResult, 'shared') myarray = SharedArray('array', res, trajectory=traj, add_to_parent=True) mytable = SharedTable('t1', res, trajectory=traj, add_to_parent=True) mytable2 = SharedTable('t2', res, trajectory=traj, add_to_parent=True) mytable3 = SharedTable('t3', res, trajectory=traj, add_to_parent=True) traj.f_store(only_init=True) myarray.create_shared_data(data=thedata) mytable.create_shared_data(first_row={ 'hi': compat.tobytes('hi'), 'huhu': np.ones(3) }) mytable2.create_shared_data(description={ 'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1) }) mytable3.create_shared_data(description={ 'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1) }) traj.f_store() newrow = {'ha': 'hu', 'haha': 4.0} with self.assertRaises(TypeError): traj.shared.t2.row with StorageContextManager(traj) as cm: row = traj.shared.t2.row for irun in range(11): for key, val in newrow.items(): row[key] = val row.append() traj.shared.t3.flush() data = myarray.read() myarray.get_data_node() self.assertTrue(np.all(data == thedata)) with StorageContextManager(traj): myarray[2, 2] = 10 data = myarray.read() self.assertTrue(data[2, 2] == 10) self.assertTrue(data[2, 2] == 10) self.assertFalse(traj.v_storage_service.is_open) traj = load_trajectory(name=trajname, filename=filename) traj.f_load(load_data=2) traj.shared.t2.traj = traj traj.shared.t1.traj = traj traj.shared.array.traj = traj self.assertTrue(traj.shared.t2.nrows == 11, '%s != 11' % str(traj.shared.t2.nrows)) self.assertTrue(traj.shared.t2[0]['ha'] == compat.tobytes('hu'), traj.shared.t2[0]['ha']) self.assertTrue(traj.shared.t2[1]['ha'] == compat.tobytes('hu'), traj.shared.t2[1]['ha']) self.assertTrue('huhu' in traj.shared.t1.colnames) self.assertTrue(traj.shared.array[2, 2] == 10)
os.path.join(base_dir, dir_name) for dir_name in traj_dir_names ] for traj_dir in traj_dir_fullpaths: if os.path.exists(traj_dir): print('OK: Directory found: {0}'.format(traj_dir)) else: raise ValueError('ERROR: Directory not found: {0}'.format(traj_dir)) for traj_dir in traj_dir_fullpaths: # Change current directory to the one containing the trajectory files os.chdir(traj_dir) # Load the trajectory (only parameters) traj_fullpath = os.path.join(traj_dir, traj_filename) traj = Trajectory() traj.f_load(filename=traj_fullpath, index=0, load_parameters=2, load_results=0, load_derived_parameters=0, force=True) # Turn on auto loading traj.v_auto_load = True # Ensure trajectory was not already assembled if not traj.f_is_completed(): # Save a backup version of the original trajectory traj_backup_fullpath = os.path.join( traj_dir, traj_filename + '.backup' + datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss"))
import os import numpy as np import pandas as pd from scipy.linalg import block_diag from scipy.linalg import solve_discrete_lyapunov fdr = False debug_mode = False save_results = True # Load the trajectory from the hdf5 file # Only load parameters, results will be loaded at runtime (auto loading) traj_dir = 'TE_from_couplings_WS_sweep_noise1_w0.15_100nodes_100000samples_10rep_history14' traj_filename = 'traj.hdf5' traj_fullpath = os.path.join(traj_dir, traj_filename) traj = Trajectory() traj.f_load(filename=traj_fullpath, index=0, load_parameters=2, load_results=0, load_derived_parameters=0, force=True) # Turn on auto loading traj.v_auto_load = True # Count number of runs runs_n = len(traj.f_get_run_names()) print('Number of runs = {0}'.format(runs_n)) # Get list of explored parameters parameters_explored = [
def main(): # This time we don't need an environment since we just going to look # at data in the trajectory traj = Trajectory('FiringRate', add_time=False) # Let's load the trajectory from the file # Only load the parameters, we will load the results on the fly as we need them filename = os.path.join('hdf5', 'FiringRate.hdf5') traj.f_load(load_parameters=2, load_derived_parameters=0, load_results=0, load_other_data=0, filename=filename) # We'll simply use auto loading so all data will be loaded when needed. traj.v_auto_load = True rates_frame = traj.res.summary.firing_rates.rates_frame # Here we load the data automatically on the fly plt.figure() plt.subplot(2, 1, 1) #Let's iterate through the columns and plot the different firing rates : for tau_ref, I_col in rates_frame.iteritems(): plt.plot(I_col.index, I_col, label='Avg. Rate for tau_ref=%s' % str(tau_ref)) # Label the plot plt.xlabel('I') plt.ylabel('f[Hz]') plt.title('Firing as a function of input current `I`') plt.legend(loc='best') # Also let's plot an example run, how about run 13 ? example_run = 13 traj.v_idx = example_run # We make the trajectory behave as a single run container. # This short statement has two major effects: # a) all explored parameters are set to the value of run 13, # b) if there are tree nodes with names other than the current run aka `run_00000013` # they are simply ignored, if we use the `$` sign or the `crun` statement, # these are translated into `run_00000013`. # Get the example data example_I = traj.I example_tau_ref = traj.tau_ref example_V = traj.results.neuron.crun.V # Here crun stands for run_00000013 # We need the time step... dt = traj.dt # ...to create an x-axis for the plot dt_array = [irun * dt for irun in range(len(example_V))] # And plot the development of V over time, # Since this is rather repetitive, we only # plot the first eighth of it. plt.subplot(2, 1, 2) plt.plot(dt_array, example_V) plt.xlim((0, dt * len(example_V) / 8)) # Label the axis plt.xlabel('t[ms]') plt.ylabel('V') plt.title('Example of development of V for I=%s, tau_ref=%s in run %d' % (str(example_I), str(example_tau_ref), traj.v_idx)) # And let's take a look at it plt.show() # Finally revoke the `traj.v_idx=13` statement and set everything back to normal. # Since our analysis is done here, we could skip that, but it is always a good idea # to do that. traj.f_restore_default()
for dir_name in traj_dir_names ] # Check if trajectory files exist print('Checking if trajectory files exist...') for traj_fullpath in traj_fullpath_list: if os.path.exists(traj_fullpath): print('OK: Trajectory file found: {0}'.format(traj_fullpath)) else: raise ValueError( 'ERROR: Trajectory file not found: {0}'.format(traj_fullpath)) # Load parameters and results, and check that all trajectories # to be merged are completed print('Loading parameters and results...') trajectories = [Trajectory() for traj_fullpath in traj_fullpath_list] for traj_i in range(len(traj_fullpath_list)): trajectories[traj_i].f_load(filename=traj_fullpath_list[traj_i], index=0, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2, force=True) # Ensure that the trajectory was explored if not trajectories[traj_i].f_is_completed(): raise ValueError( 'ERROR: Trajectory not completed or not assembled: {0}'.format( traj_fullpath_list[traj_i])) print('Creating output directory...')