def test_errors(self): filename = make_temp_dir("hdf5errors.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name npearray = np.ones((2, 10, 3), dtype=np.float) thevlarray = np.array([compat.tobytes("j"), 22.2, compat.tobytes("gutter")]) with self.assertRaises(TypeError): traj.f_add_result(SharedResult, "arrays.vlarray", SharedVLArray()).create_shared_data(obj=thevlarray) traj.f_store() traj.arrays.vlarray.create_shared_data(obj=thevlarray) traj.f_add_result(SharedResult, "arrays.array", SharedArray()).create_shared_data(data=npearray) traj.arrays.f_add_result(SharedResult, "super.carray", SharedCArray(), comment="carray").create_shared_data( shape=(10, 10), atom=pt.atom.FloatAtom() ) traj.arrays.f_add_result(SharedResult, "earray", SharedEArray()).create_shared_data("earray", obj=npearray) traj.f_store() with self.assertRaises(TypeError): traj.arrays.array.iter_rows() with StorageContextManager(traj) as cm: with self.assertRaises(RuntimeError): with StorageContextManager(traj) as cm2: pass self.assertTrue(traj.v_storage_service.is_open) with self.assertRaises(RuntimeError): StorageContextManager(traj).f_open_store() self.assertFalse(traj.v_storage_service.is_open)
def test_logging_stdout(self): filename = 'teststdoutlog.hdf5' filename = make_temp_dir(filename) folder = make_temp_dir('logs') env = Environment(trajectory=make_trajectory_name(self), filename=filename, log_config=get_log_config(), # log_levels=logging.CRITICAL, # needed for the test log_stdout=('STDOUT', 50), #log_folder=folder ) env.f_run(log_error) traj = env.v_traj path = get_log_path(traj) mainstr = 'sTdOuTLoGGinG' print(mainstr) env.f_disable_logging() mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as mainf: full_text = mainf.read() self.assertTrue(mainstr in full_text) self.assertTrue('4444444' not in full_text) self.assertTrue('DEBUG' not in full_text)
def make_environment(self, idx, filename, continuable=True, delete_continue=False): #self.filename = '../../experiments/tests/HDF5/test.hdf5' self.logfolder = make_temp_dir( os.path.join('experiments', 'tests', 'Log')) self.cnt_folder = make_temp_dir( os.path.join('experiments', 'tests', 'cnt')) trajname = 'Test%d' % idx + '_' + make_trajectory_name(self) env = Environment(trajectory=trajname, filename=filename, file_title=trajname, log_stdout=False, log_config=get_log_config(), continuable=continuable, continue_folder=self.cnt_folder, delete_continue=delete_continue, large_overview_tables=True) self.envs.append(env) self.trajs.append(env.v_trajectory)
def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj
def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input,) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',0) traj.f_add_parameter('y',0) self.env=env self.traj=traj
def make_environment_mp(self, idx, filename): #self.filename = '../../experiments/tests/HDF5/test.hdf5' self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) self.cnt_folder = make_temp_dir(os.path.join('experiments', 'tests', 'cnt')) trajname = 'Test%d' % idx + '_' + make_trajectory_name(self) env = Environment(trajectory=trajname, dynamic_imports=[CustomParameter], filename=filename, file_title=trajname, log_stdout=False, purge_duplicate_comments=False, log_config=get_log_config(), continuable=True, continue_folder=self.cnt_folder, delete_continue=False, multiproc=True, use_pool=True, ncores=4) self.envs.append(env) self.trajs.append( env.v_trajectory)
def make_environment(self, idx, filename): #self.filename = '../../experiments/tests/HDF5/test.hdf5' self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) self.cnt_folder = make_temp_dir(os.path.join('experiments','tests','cnt')) trajname = 'Test%d' % idx + '_' + make_trajectory_name(self) env = Environment(trajectory=trajname, filename=filename, file_title=trajname, log_stdout=False, log_config=get_log_config(), continuable=True, continue_folder=self.cnt_folder, delete_continue=False, large_overview_tables=True) self.envs.append(env) self.trajs.append( env.v_trajectory)
def test_hdf5_store_load_parameter(self): traj_name = make_trajectory_name(self) file_name = make_temp_dir(os.path.join('brian2', 'tests', 'hdf5', 'test_%s.hdf5' % traj_name)) env = Environment(trajectory=traj_name, filename=file_name, log_config=get_log_config(), dynamic_imports=[Brian2Parameter], add_time=False, storage_service=HDF5StorageService) traj = env.v_trajectory traj.v_standard_parameter = Brian2Parameter traj.f_add_parameter('brian2.single.millivolts', 10*mvolt, comment='single value') #traj.f_add_parameter('brian2.array.millivolts', [11, 12]*mvolt, comment='array') #traj.f_add_parameter('mV1', 42.0*mV) #traj.f_add_parameter('ampere1', 1*mA) #traj.f_add_parameter('integer', 16) #traj.f_add_parameter('kHz05', 0.5*kHz) #traj.f_add_parameter('nested_array', np.array([[6.,7.,8.],[9.,10.,11.]]) * ms) #traj.f_add_parameter('b2a', np.array([1., 2.]) * mV) # We also need to check if explorations work with hdf5 store! #explore_dict = {'ampere1': [1*mA, 2*mA, 3*mA], # 'integer': [42,43,44], # 'b2a': [np.array([1., 2.]) * mV, np.array([1., 4.]) * mV, # np.array([1., 2.]) * mV]} #traj.f_explore(explore_dict) traj.f_store() traj2 = load_trajectory(filename=file_name, name=traj_name, dynamic_imports=[Brian2Parameter], load_data=2) self.compare_trajectories(traj, traj2)
def test_hdf5_store_load_monitorresult(self): traj_name = make_trajectory_name(self) file_name = make_temp_dir(os.path.join('brian2', 'tests', 'hdf5', 'test_%s.hdf5' % traj_name)) env = Environment(trajectory=traj_name, filename=file_name, log_config=get_log_config(), dynamic_imports=[Brian2MonitorResult], add_time=False, storage_service=HDF5StorageService) traj = env.v_trajectory traj.v_standard_result = Brian2MonitorResult traj.f_add_result('brian2.single.millivolts_single_a', 10*mvolt, comment='single value a') traj.f_add_result('brian2.single.millivolts_single_c', 11*mvolt, comment='single value b') traj.f_add_result('brian2.array.millivolts_array_a', [11, 12]*mvolt, comment='array') traj.f_add_result('mV1', 42.0*mV) # results can hold much more than a single data item: traj.f_add_result('ampere1', 1*mA, 44, test=300*mV, test2=[1,2,3], test3=np.array([1,2,3])*mA, comment='Result keeping track of many things') traj.f_add_result('integer', 16) traj.f_add_result('kHz05', 0.5*kHz) traj.f_add_result('nested_array', np.array([[6.,7.,8.],[9.,10.,11.]]) * ms) traj.f_add_result('b2a', np.array([1., 2.]) * mV) traj.f_store() traj2 = load_trajectory(filename=file_name, name=traj_name, dynamic_imports=[Brian2MonitorResult], load_data=2) #traj._logger.error('traj :'+str(traj)) #traj._logger.error('traj2:'+str(traj2)) self.compare_trajectories(traj, traj2)
def test_df(self): filename = make_temp_dir('hdf5errors.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) traj.f_store() dadict = { 'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui'] } dadict2 = {'answer': [42]} traj.f_add_result( SharedResult, 'dfs.df', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict)) traj.f_add_result( SharedResult, 'dfs.df1', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict2)) traj.f_add_result(SharedResult, 'dfs.df3', SharedPandasFrame()) for irun in range(10): traj.df3.append(traj.df1.read()) dframe = traj.df3.read() self.assertTrue(len(dframe) == 10) what = traj.df.select(where='index == 2') self.assertTrue(len(what) == 1)
def setUp(self): self.envs = [] self.trajs = [] self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','test.hdf5')) self.trajname = make_trajectory_name(self)
def test_storing_and_manipulating(self): filename = make_temp_dir("hdf5manipulation.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name thedata = np.zeros((1000, 1000)) res = traj.f_add_result(SharedResult, "shared") myarray = SharedArray("array", res, trajectory=traj, add_to_parent=True) mytable = SharedTable("t1", res, trajectory=traj, add_to_parent=True) mytable2 = SharedTable("t2", res, trajectory=traj, add_to_parent=True) mytable3 = SharedTable("t3", res, trajectory=traj, add_to_parent=True) traj.f_store(only_init=True) myarray.create_shared_data(data=thedata) mytable.create_shared_data(first_row={"hi": compat.tobytes("hi"), "huhu": np.ones(3)}) mytable2.create_shared_data(description={"ha": pt.StringCol(2, pos=0), "haha": pt.FloatCol(pos=1)}) mytable3.create_shared_data(description={"ha": pt.StringCol(2, pos=0), "haha": pt.FloatCol(pos=1)}) traj.f_store() newrow = {"ha": "hu", "haha": 4.0} with self.assertRaises(TypeError): row = traj.shared.t2.row with StorageContextManager(traj) as cm: row = traj.shared.t2.row for irun in range(11): for key, val in newrow.items(): row[key] = val row.append() traj.shared.t3.flush() data = myarray.read() arr = myarray.get_data_node() self.assertTrue(np.all(data == thedata)) with StorageContextManager(traj) as cm: myarray[2, 2] = 10 data = myarray.read() self.assertTrue(data[2, 2] == 10) self.assertTrue(data[2, 2] == 10) self.assertFalse(traj.v_storage_service.is_open) traj = load_trajectory(name=trajname, filename=filename) traj.f_load(load_data=2) traj.shared.t2.traj = traj traj.shared.t1.traj = traj traj.shared.array.traj = traj self.assertTrue(traj.shared.t2.nrows == 11, "%s != 11" % str(traj.shared.t2.nrows)) self.assertTrue(traj.shared.t2[0]["ha"] == compat.tobytes("hu"), traj.shared.t2[0]["ha"]) self.assertTrue(traj.shared.t2[1]["ha"] == compat.tobytes("hu"), traj.shared.t2[1]["ha"]) self.assertTrue("huhu" in traj.shared.t1.colnames) self.assertTrue(traj.shared.array[2, 2] == 10)
def test_all_arrays(self): filename = make_temp_dir("hdf5arrays.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name npearray = np.ones((2, 10, 3), dtype=np.float) thevlarray = np.array([compat.tobytes("j"), 22.2, compat.tobytes("gutter")]) traj.f_store(only_init=True) res = traj.f_add_result(SharedResult, "arrays") res["carray"] = SharedCArray() res["carray"].create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom()) res["earray"] = SharedEArray() res["earray"].create_shared_data(obj=npearray) res["vlarray"] = SharedVLArray() res["vlarray"].create_shared_data(obj=thevlarray) res["array"] = SharedArray() res["array"].create_shared_data(data=npearray) traj.f_store() traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) toappned = [44, compat.tobytes("k")] with StorageContextManager(traj) as cm: a1 = traj.arrays.array a1[0, 0, 0] = 4.0 a2 = traj.arrays.carray a2[0, 1] = 4 a4 = traj.arrays.vlarray a4.append(toappned) a3 = traj.arrays.earray a3.append(np.zeros((1, 10, 3))) # cm.f_flush_storage() traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) with StorageContextManager(traj) as cm: a1 = traj.arrays.array self.assertTrue(a1[0, 0, 0] == 4.0) a2 = traj.arrays.carray self.assertTrue(a2[0, 1] == 4) a3 = traj.arrays.earray self.assertTrue(a3.read().shape == (3, 10, 3)) a4 = traj.arrays.vlarray for idx, x in enumerate(a4): if idx == 0: self.assertTrue(np.all(x == np.array(thevlarray))) elif idx == 1: self.assertTrue(np.all(x == np.array(toappned))) else: raise RuntimeError()
def make_env(self, **kwargs): self.mode.__dict__.update(kwargs) filename = 'log_testing.hdf5' self.filename = make_temp_dir(filename) self.traj_name = make_trajectory_name(self) self.env = Environment(trajectory=self.traj_name, filename=self.filename, **self.mode.__dict__) self.traj = self.env.v_traj
def setUp(self): self.multiproc = True self.mode = 'LOCK' self.trajname = make_trajectory_name(self) self.filename = make_temp_dir( os.path.join('experiments', 'tests', 'HDF5', '%s.hdf5' % self.trajname)) self.logfolder = make_temp_dir( os.path.join('experiments', 'tests', 'Log')) random.seed() cap_dicts = ( dict(cpu_cap=0.000001), # Ensure that these are triggered dict(memory_cap=(0.000001, 150.0)), dict(swap_cap=0.000001, )) cap_dict = cap_dicts[CapTest.cap_count] env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_folder=self.logfolder, logger_names=('pypet', 'test'), log_levels='ERROR', log_stdout=False, results_per_run=5, derived_parameters_per_run=5, multiproc=True, ncores=4, use_pool=False, niceness=check_nice(11), **cap_dict) logging.getLogger('test').error('Using Cap: %s and file: %s' % (str(cap_dict), str(self.filename))) # Loop through all possible cap configurations # and test one at a time CapTest.cap_count += 1 CapTest.cap_count = CapTest.cap_count % len(cap_dicts) traj = env.v_trajectory ## Create some parameters self.param_dict = {} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj, self.param_dict) #remember the trajectory and the environment self.traj = traj self.env = env
def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj = traj self.env = env
def test_compacting(self): filename = make_temp_dir('hdf5compacting.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name traj.v_storage_service.complevel = 7 first_row = {'ha': compat.tobytes('hi'), 'haha': np.zeros((3, 3))} traj.f_store(only_init=True) traj.f_add_result('My.Tree.Will.Be.Deleted', 42) traj.f_add_result('Mine.Too.HomeBoy', 42, comment='Don`t cry for me!') res = traj.f_add_result(SharedResult, 'myres') res['myres'] = SharedTable() res['myres'].create_shared_data(first_row=first_row) with StorageContextManager(traj): traj.myres for irun in range(10000): row = traj.myres.row for key in first_row: row[key] = first_row[key] row.append() traj.f_store() del traj traj = load_trajectory(name=trajname, filename=filename, load_all=2) with StorageContextManager(traj) as cm: tb = traj.myres.get_data_node() ptcompat.remove_rows(tb, 1000, 10000) cm.flush_store() self.assertTrue(traj.myres.nrows == 1001) traj.f_delete_item(traj.My, recursive=True) traj.f_delete_item(traj.Mine, recursive=True) size = os.path.getsize(filename) get_root_logger().info('Filesize is %s' % str(size)) name_wo_ext, ext = os.path.splitext(filename) backup_file_name = name_wo_ext + '_backup' + ext code = compact_hdf5_file(filename, keep_backup=True) if code != 0: raise RuntimeError('ptrepack fail') backup_size = os.path.getsize(backup_file_name) self.assertTrue(backup_size == size) new_size = os.path.getsize(filename) get_root_logger().info('New filesize is %s' % str(new_size)) self.assertTrue(new_size < size, "%s > %s" % (str(new_size), str(size)))
def test_compacting(self): filename = make_temp_dir("hdf5compacting.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name traj.v_storage_service.complevel = 7 first_row = {"ha": compat.tobytes("hi"), "haha": np.zeros((3, 3))} traj.f_store(only_init=True) res1 = traj.f_add_result("My.Tree.Will.Be.Deleted", 42) res2 = traj.f_add_result("Mine.Too.HomeBoy", 42, comment="Don`t cry for me!") res = traj.f_add_result(SharedResult, "myres") res["myres"] = SharedTable() res["myres"].create_shared_data(first_row=first_row) with StorageContextManager(traj): tab = traj.myres for irun in range(10000): row = traj.myres.row for key in first_row: row[key] = first_row[key] row.append() traj.f_store() del traj traj = load_trajectory(name=trajname, filename=filename, load_all=2) with StorageContextManager(traj) as cm: tb = traj.myres.get_data_node() ptcompat.remove_rows(tb, 1000, 10000) cm.f_flush_store() self.assertTrue(traj.myres.nrows == 1001) traj.f_delete_item(traj.My, recursive=True) traj.f_delete_item(traj.Mine, recursive=True) size = os.path.getsize(filename) get_root_logger().info("Filesize is %s" % str(size)) name_wo_ext, ext = os.path.splitext(filename) backup_file_name = name_wo_ext + "_backup" + ext code = compact_hdf5_file(filename, keep_backup=True) if code != 0: raise RuntimeError("ptrepack fail") backup_size = os.path.getsize(backup_file_name) self.assertTrue(backup_size == size) new_size = os.path.getsize(filename) get_root_logger().info("New filesize is %s" % str(new_size)) self.assertTrue(new_size < size, "%s > %s" % (str(new_size), str(size)))
def setUp(self): self.multiproc = True self.mode = 'LOCK' self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', '%s.hdf5' % self.trajname)) self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() cap_dicts = (dict(cpu_cap=0.000001), # Ensure that these are triggered dict(memory_cap=(0.000001, 150.0)), dict(swap_cap=0.000001,)) cap_dict = cap_dicts[CapTest.cap_count] env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_folder=self.logfolder, logger_names=('pypet', 'test'), log_levels='ERROR', log_stdout=False, results_per_run=5, derived_parameters_per_run=5, multiproc=True, ncores=4, use_pool=False, niceness = check_nice(11), **cap_dict) logging.getLogger('test').error('Using Cap: %s and file: %s' % (str(cap_dict), str(self.filename))) # Loop through all possible cap configurations # and test one at a time CapTest.cap_count += 1 CapTest.cap_count = CapTest.cap_count % len(cap_dicts) traj = env.v_trajectory ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj = traj self.env = env
def make_environment(self, idx, filename, **kwargs): #self.filename = make_temp_dir('experiments/tests/HDF5/test.hdf5') logfolder = make_temp_dir(os.path.join('experiments','tests','Log')) trajname = make_trajectory_name(self) + '__' +str(idx) +'_' env = Environment(trajectory=trajname,filename=filename, file_title=trajname, log_stdout=False, large_overview_tables=True, log_config=get_log_config(), **kwargs) self.envs.append(env) self.trajs.append( env.v_trajectory)
def setUp(self): self.filename = make_temp_dir('shared_table_test.hdf5') self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename) self.traj.v_standard_result = SharedResult self.traj.f_store(only_init=True) self.traj.f_add_result('shared_data') self.shared_array = SharedArray(name='array', parent=self.traj.shared_data, trajectory=self.traj, add_to_parent=True)
def setUp(self): self.set_mode() logging.basicConfig(level=logging.ERROR) self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters create_link_params(traj) ### Add some parameter: explore_params(traj) #remember the trajectory and the environment self.traj = traj self.env = env
def test_hdf5_store_load_result(self): traj_name = make_trajectory_name(self) file_name = make_temp_dir( os.path.join('brian2', 'tests', 'hdf5', 'test_%s.hdf5' % traj_name)) env = Environment(trajectory=traj_name, filename=file_name, log_config=get_log_config(), dynamic_imports=[Brian2Result], add_time=False, storage_service=HDF5StorageService) traj = env.v_trajectory traj.v_standard_result = Brian2Result traj.f_add_result('brian2.single.millivolts_single_a', 10 * mvolt, comment='single value a') traj.f_add_result('brian2.single.millivolts_single_c', 11 * mvolt, comment='single value b') traj.f_add_result('brian2.array.millivolts_array_a', [11, 12] * mvolt, comment='array') traj.f_add_result('mV1', 42.0 * mV) # results can hold much more than a single data item: traj.f_add_result('ampere1', 1 * mA, 44, test=300 * mV, test2=[1, 2, 3], test3=np.array([1, 2, 3]) * mA, comment='Result keeping track of many things') traj.f_add_result('integer', 16) traj.f_add_result('kHz05', 0.5 * kHz) traj.f_add_result('nested_array', np.array([[6., 7., 8.], [9., 10., 11.]]) * ms) traj.f_add_result('b2a', np.array([1., 2.]) * mV) traj.f_add_result('nounit', Quantity(np.array([[6., 7., 8.], [9., 10., 11.]]))) traj.f_store() traj2 = load_trajectory(filename=file_name, name=traj_name, dynamic_imports=[Brian2Result], load_data=2) self.compare_trajectories(traj, traj2)
def setUp(self): self.set_mode() logging.basicConfig(level=logging.ERROR) self.logfolder = make_temp_dir( os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir( os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding) traj = env.v_trajectory traj.v_standard_parameter = Parameter ## Create some parameters create_link_params(traj) ### Add some parameter: explore_params(traj) #remember the trajectory and the environment self.traj = traj self.env = env
def test_df(self): filename = make_temp_dir('hdf5errors.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) traj.f_store() dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']} dadict2 = {'answer': [42]} traj.f_add_result(SharedResult, 'dfs.df', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict)) traj.f_add_result(SharedResult, 'dfs.df1', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict2)) traj.f_add_result(SharedResult, 'dfs.df3', SharedPandasFrame()) for irun in range(10): traj.df3.append(traj.df1.read()) dframe = traj.df3.read() self.assertTrue(len(dframe) == 10) what = traj.df.select(where='index == 2') self.assertTrue(len(what) == 1)
def test_df(self): filename = make_temp_dir("hdf5errors.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) traj.f_store() dadict = {"hi": [1, 2, 3, 4, 5], "shu": ["bi", "du", "da", "ha", "hui"]} dadict2 = {"answer": [42]} traj.f_add_result(SharedResult, "dfs.df", SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict)) traj.f_add_result(SharedResult, "dfs.df1", SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict2)) traj.f_add_result(SharedResult, "dfs.df3", SharedPandasFrame()) for irun in range(10): traj.df3.append(traj.df1.read()) dframe = traj.df3.read() self.assertTrue(len(dframe) == 10) what = traj.df.select(where="index == 2") self.assertTrue(len(what) == 1)
def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=False, log_config=get_log_config(), results_per_run=5, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding) traj = env.v_trajectory self.param_dict={} create_param_dict(self.param_dict) add_params(traj,self.param_dict) self.traj = traj self.env = env
def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=False, port=self.url, log_config=get_log_config(), results_per_run=5, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding) traj = env.v_trajectory self.param_dict={} create_param_dict(self.param_dict) add_params(traj,self.param_dict) self.traj = traj self.env = env
def test_errors(self): filename = make_temp_dir('hdf5errors.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) npearray = np.ones((2, 10, 3), dtype=np.float) thevlarray = np.array( [compat.tobytes('j'), 22.2, compat.tobytes('gutter')]) with self.assertRaises(TypeError): traj.f_add_result( SharedResult, 'arrays.vlarray', SharedVLArray()).create_shared_data(obj=thevlarray) traj.f_store() traj.arrays.vlarray.create_shared_data(obj=thevlarray) traj.f_add_result(SharedResult, 'arrays.array', SharedArray()).create_shared_data(data=npearray) traj.arrays.f_add_result(SharedResult, 'super.carray', SharedCArray(), comment='carray').create_shared_data( shape=(10, 10), atom=pt.atom.FloatAtom()) traj.arrays.f_add_result(SharedResult, 'earray', SharedEArray()).create_shared_data( 'earray', obj=npearray) traj.f_store() with self.assertRaises(TypeError): traj.arrays.array.iterrows() with StorageContextManager(traj): with self.assertRaises(RuntimeError): with StorageContextManager(traj): pass self.assertTrue(traj.v_storage_service.is_open) with self.assertRaises(RuntimeError): StorageContextManager(traj).open_store() self.assertFalse(traj.v_storage_service.is_open)
def test_hdf5_store_load_parameter(self): traj_name = make_trajectory_name(self) file_name = make_temp_dir( os.path.join('brian2', 'tests', 'hdf5', 'test_%s.hdf5' % traj_name)) env = Environment(trajectory=traj_name, filename=file_name, log_config=get_log_config(), dynamic_imports=[Brian2Parameter], add_time=False, storage_service=HDF5StorageService) traj = env.v_trajectory traj.v_standard_parameter = Brian2Parameter traj.f_add_parameter('brian2.single.millivolts', 10 * mvolt, comment='single value') #traj.f_add_parameter('brian2.array.millivolts', [11, 12]*mvolt, comment='array') #traj.f_add_parameter('mV1', 42.0*mV) #traj.f_add_parameter('ampere1', 1*mA) #traj.f_add_parameter('integer', 16) #traj.f_add_parameter('kHz05', 0.5*kHz) #traj.f_add_parameter('nested_array', np.array([[6.,7.,8.],[9.,10.,11.]]) * ms) #traj.f_add_parameter('b2a', np.array([1., 2.]) * mV) # We also need to check if explorations work with hdf5 store! #explore_dict = {'ampere1': [1*mA, 2*mA, 3*mA], # 'integer': [42,43,44], # 'b2a': [np.array([1., 2.]) * mV, np.array([1., 4.]) * mV, # np.array([1., 2.]) * mV]} #traj.f_explore(explore_dict) traj.f_store() traj2 = load_trajectory(filename=file_name, name=traj_name, dynamic_imports=[Brian2Parameter], load_data=2) self.compare_trajectories(traj, traj2)
def test_conversions(self): filename = make_temp_dir('hdf5manipulation.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name traj.v_standard_result = SharedResult traj.f_store(only_init=True) traj.f_add_result('shared_data') thedata = np.zeros((1000, 1000)) myarray = SharedArray('array', traj.shared_data, trajectory=traj) traj.shared_data['array'] = myarray mytable = SharedTable('t1', traj.shared_data, trajectory=traj) traj.shared_data['t1'] = mytable dadict = { 'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui'] } dadict2 = {'answer': [42]} res = traj.f_add_result('shared.dfs') res['df'] = SharedPandasFrame() res['df'].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj) frame = SharedPandasFrame('df1', traj.f_get('shared.dfs'), trajectory=traj, add_to_parent=True) frame.create_shared_data(data=pd.DataFrame(dadict2), ) res['df1'] = frame traj.f_add_result('mylist', [1, 2, 3]) traj.f_add_result('my.mytuple', k=(1, 2, 3), wa=42) traj.f_add_result('my.myarray', np.zeros((50, 50))) traj.f_add_result('my.myframe', data=pd.DataFrame(dadict2)) traj.f_add_result('my.mytable', ObjectTable(data=dadict2)) myarray.create_shared_data(data=thedata) mytable.create_shared_data(first_row={ 'hi': compat.tobytes('hi'), 'huhu': np.ones(3) }) traj.f_store() data = myarray.read() myarray.get_data_node() self.assertTrue(np.all(data == thedata)) with StorageContextManager(traj): myarray[2, 2] = 10 data = myarray.read() self.assertTrue(data[2, 2] == 10) self.assertTrue(data[2, 2] == 10) self.assertFalse(traj.v_storage_service.is_open) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) make_ordinary_result(traj.shared_data, 'array', trajectory=traj) array = traj.shared_data.array self.assertTrue(isinstance(array, np.ndarray)) thedata[2, 2] = 10 self.assertTrue(np.all(array == thedata)) make_ordinary_result( traj.shared_data, 't1', trajectory=traj, ) t1 = traj.shared_data.t1 self.assertTrue(isinstance(t1, ObjectTable)) self.assertTrue(np.all(t1['huhu'][0] == np.ones(3))) dfs = traj.shared.dfs make_ordinary_result(traj.shared.dfs, 'df', trajectory=traj) theframe = dfs.f_get('df') self.assertTrue(isinstance(dfs, Result)) self.assertTrue(isinstance(theframe, pd.DataFrame)) self.assertTrue(theframe['hi'][0] == 1) listres = traj.f_get('mylist') listres = make_shared_result(listres, 0, trajectory=traj) with StorageContextManager(traj): self.assertTrue(listres[0][2] == 3) listres[0][0] = 4 self.assertTrue(listres[0][0] == 4) listres = make_ordinary_result(listres, 0, trajectory=traj) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) mylist = traj.mylist self.assertTrue(isinstance(listres, Result)) self.assertTrue(mylist[0] == 4) self.assertTrue(isinstance(mylist, list)) mytuple = traj.mytuple with self.assertRaises(AttributeError): mytuple = make_shared_result(mytuple, 'mylist', traj, new_class=SharedArray) mytuple = make_shared_result(mytuple, 'k', traj, new_class=SharedArray) self.assertTrue(mytuple.k[1] == 2) mytuple = make_ordinary_result(mytuple, 'k', trajectory=traj) self.assertTrue(isinstance(mytuple.k, tuple)) self.assertTrue(mytuple.k[2] == 3) myframe = traj.myframe myframe = make_shared_result(myframe, 'data', traj) theframe = myframe.data.read() self.assertTrue(theframe['answer'][0] == 42) myframe = make_ordinary_result(myframe, 'data', trajectory=traj) traj.f_load_item(myframe) self.assertTrue(myframe.data['answer'][0] == 42) mytable = traj.f_get('mytable') mytable = make_shared_result(mytable, 0, traj) self.assertTrue(isinstance(mytable[0], SharedTable)) rows = mytable.mytable.read() self.assertTrue(rows[0][0] == 42) mytable = make_ordinary_result(mytable, 0, trajectory=traj) self.assertTrue(isinstance(mytable, Result)) self.assertTrue(mytable[0]['answer'][0] == 42)
def test_conversions(self): filename = make_temp_dir("hdf5manipulation.hdf5") traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name traj.v_standard_result = SharedResult traj.f_store(only_init=True) traj.f_add_result("shared_data") thedata = np.zeros((1000, 1000)) myarray = SharedArray("array", traj.shared_data, trajectory=traj) traj.shared_data["array"] = myarray mytable = SharedTable("t1", traj.shared_data, trajectory=traj) traj.shared_data["t1"] = mytable # mytable2 = SharedTableResult('h.t2', trajectory=traj) # mytable3 = SharedTableResult('jjj.t3', trajectory=traj) dadict = {"hi": [1, 2, 3, 4, 5], "shu": ["bi", "du", "da", "ha", "hui"]} dadict2 = {"answer": [42]} res = traj.f_add_result("shared.dfs") res["df"] = SharedPandasFrame() res["df"].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj) frame = SharedPandasFrame("df1", traj.f_get("shared.dfs"), trajectory=traj) frame.create_shared_data(data=pd.DataFrame(dadict2)) res["df1"] = frame traj.f_add_result("mylist", [1, 2, 3]) traj.f_add_result("my.mytuple", k=(1, 2, 3), wa=42) traj.f_add_result("my.myarray", np.zeros((50, 50))) traj.f_add_result("my.myframe", data=pd.DataFrame(dadict2)) traj.f_add_result("my.mytable", ObjectTable(data=dadict2)) myarray.create_shared_data(data=thedata) mytable.create_shared_data(first_row={"hi": compat.tobytes("hi"), "huhu": np.ones(3)}) traj.f_store() data = myarray.read() arr = myarray.get_data_node() self.assertTrue(np.all(data == thedata)) with StorageContextManager(traj) as cm: myarray[2, 2] = 10 data = myarray.read() self.assertTrue(data[2, 2] == 10) self.assertTrue(data[2, 2] == 10) self.assertFalse(traj.v_storage_service.is_open) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) make_ordinary_result(traj.shared_data, "array", trajectory=traj) array = traj.shared_data.array self.assertTrue(isinstance(array, np.ndarray)) thedata[2, 2] = 10 self.assertTrue(np.all(array == thedata)) make_ordinary_result(traj.shared_data, "t1", trajectory=traj) t1 = traj.shared_data.t1 self.assertTrue(isinstance(t1, ObjectTable)) # self.assertTrue(np.all(t1["huhu"][0] == np.ones(3))) dfs = traj.shared.dfs make_ordinary_result(traj.shared.dfs, "df", trajectory=traj) theframe = dfs.f_get("df") self.assertTrue(isinstance(dfs, Result)) self.assertTrue(isinstance(theframe, pd.DataFrame)) self.assertTrue(theframe["hi"][0] == 1) listres = traj.f_get("mylist") listres = make_shared_result(listres, 0, trajectory=traj) with StorageContextManager(traj) as cm: self.assertTrue(listres[0][2] == 3) listres[0][0] = 4 self.assertTrue(listres[0][0] == 4) listres = make_ordinary_result(listres, 0, trajectory=traj) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) mylist = traj.mylist self.assertTrue(isinstance(listres, Result)) self.assertTrue(mylist[0] == 4) self.assertTrue(isinstance(mylist, list)) mytuple = traj.mytuple with self.assertRaises(AttributeError): mytuple = make_shared_result(mytuple, "mylist", traj, new_class=SharedArray) mytuple = make_shared_result(mytuple, "k", traj, new_class=SharedArray) self.assertTrue(mytuple.k[1] == 2) mytuple = make_ordinary_result(mytuple, "k", trajectory=traj) self.assertTrue(isinstance(mytuple.k, tuple)) self.assertTrue(mytuple.k[2] == 3) myframe = traj.myframe myframe = make_shared_result(myframe, "data", traj) theframe = myframe.data.read() self.assertTrue(theframe["answer"][0] == 42) myframe = make_ordinary_result(myframe, "data", trajectory=traj) traj.f_load_item(myframe) self.assertTrue(myframe.data["answer"][0] == 42) mytable = traj.f_get("mytable") mytable = make_shared_result(mytable, 0, traj) self.assertTrue(isinstance(mytable[0], SharedTable)) rows = mytable.mytable.read() self.assertTrue(rows[0][0] == 42) mytable = make_ordinary_result(mytable, 0, trajectory=traj) self.assertTrue(isinstance(mytable, Result)) self.assertTrue(mytable[0]["answer"][0] == 42)
def test_merge_with_linked_derived_parameter(self, disable_logging = True): logging.basicConfig(level = logging.ERROR) self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname1 = 'T1'+ make_trajectory_name(self) self.trajname2 = 'T2'+make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname1)) self.env1 = Environment(trajectory=self.trajname1, filename=self.filename, file_title=self.trajname1, log_stdout=False, log_config=get_log_config()) self.env2 = Environment(trajectory=self.trajname2, filename=self.filename, file_title=self.trajname2, log_stdout=False, log_config=get_log_config()) self.traj1 = self.env1.v_trajectory self.traj2 = self.env2.v_trajectory create_link_params(self.traj1) create_link_params(self.traj2) explore_params(self.traj1) explore_params2(self.traj2) self.traj1.f_add_derived_parameter('test.$.gg', 42) self.traj2.f_add_derived_parameter('test.$.gg', 44) self.traj1.f_add_derived_parameter('test.hh.$', 111) self.traj2.f_add_derived_parameter('test.hh.$', 53) self.env1.f_run(dostuff_and_add_links) self.env2.f_run(dostuff_and_add_links) old_length = len(self.traj1) self.traj1.f_merge(self.traj2, remove_duplicates=True) self.traj1.f_load(load_data=2) for run in self.traj1.f_get_run_names(): self.traj1.v_crun = run idx = self.traj1.v_idx param = self.traj1['test.crun.gg'] if idx < old_length: self.assertTrue(param == 42) else: self.assertTrue(param == 44) param = self.traj1['test.hh.crun'] if idx < old_length: self.assertTrue(param == 111) else: self.assertTrue(param == 53) self.assertTrue(len(self.traj1) > old_length) for irun in range(len(self.traj1.f_get_run_names())): self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB) self.assertTrue(self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB) if disable_logging: self.env1.f_disable_logging() self.env2.f_disable_logging() return old_length
def test_storing_and_manipulating(self): filename = make_temp_dir('hdf5manipulation.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name thedata = np.zeros((1000, 1000)) res = traj.f_add_result(SharedResult, 'shared') myarray = SharedArray('array', res, trajectory=traj, add_to_parent=True) mytable = SharedTable('t1', res, trajectory=traj, add_to_parent=True) mytable2 = SharedTable('t2', res, trajectory=traj, add_to_parent=True) mytable3 = SharedTable('t3', res, trajectory=traj, add_to_parent=True) traj.f_store(only_init=True) myarray.create_shared_data(data=thedata) mytable.create_shared_data(first_row={ 'hi': compat.tobytes('hi'), 'huhu': np.ones(3) }) mytable2.create_shared_data(description={ 'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1) }) mytable3.create_shared_data(description={ 'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1) }) traj.f_store() newrow = {'ha': 'hu', 'haha': 4.0} with self.assertRaises(TypeError): traj.shared.t2.row with StorageContextManager(traj) as cm: row = traj.shared.t2.row for irun in range(11): for key, val in newrow.items(): row[key] = val row.append() traj.shared.t3.flush() data = myarray.read() myarray.get_data_node() self.assertTrue(np.all(data == thedata)) with StorageContextManager(traj): myarray[2, 2] = 10 data = myarray.read() self.assertTrue(data[2, 2] == 10) self.assertTrue(data[2, 2] == 10) self.assertFalse(traj.v_storage_service.is_open) traj = load_trajectory(name=trajname, filename=filename) traj.f_load(load_data=2) traj.shared.t2.traj = traj traj.shared.t1.traj = traj traj.shared.array.traj = traj self.assertTrue(traj.shared.t2.nrows == 11, '%s != 11' % str(traj.shared.t2.nrows)) self.assertTrue(traj.shared.t2[0]['ha'] == compat.tobytes('hu'), traj.shared.t2[0]['ha']) self.assertTrue(traj.shared.t2[1]['ha'] == compat.tobytes('hu'), traj.shared.t2[1]['ha']) self.assertTrue('huhu' in traj.shared.t1.colnames) self.assertTrue(traj.shared.array[2, 2] == 10)
def test_merge_with_linked_derived_parameter(self, disable_logging=True): logging.basicConfig(level=logging.ERROR) self.logfolder = make_temp_dir( os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname1 = 'T1' + make_trajectory_name(self) self.trajname2 = 'T2' + make_trajectory_name(self) self.filename = make_temp_dir( os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname1)) self.env1 = Environment(trajectory=self.trajname1, filename=self.filename, file_title=self.trajname1, log_stdout=False, log_config=get_log_config()) self.env2 = Environment(trajectory=self.trajname2, filename=self.filename, file_title=self.trajname2, log_stdout=False, log_config=get_log_config()) self.traj1 = self.env1.v_trajectory self.traj2 = self.env2.v_trajectory create_link_params(self.traj1) create_link_params(self.traj2) explore_params(self.traj1) explore_params2(self.traj2) self.traj1.f_add_derived_parameter('test.$.gg', 42) self.traj2.f_add_derived_parameter('test.$.gg', 44) self.traj1.f_add_derived_parameter('test.hh.$', 111) self.traj2.f_add_derived_parameter('test.hh.$', 53) self.env1.f_run(dostuff_and_add_links) self.env2.f_run(dostuff_and_add_links) old_length = len(self.traj1) self.traj1.f_merge(self.traj2, remove_duplicates=True) self.traj1.f_load(load_data=2) for run in self.traj1.f_get_run_names(): self.traj1.v_crun = run idx = self.traj1.v_idx param = self.traj1['test.crun.gg'] if idx < old_length: self.assertTrue(param == 42) else: self.assertTrue(param == 44) param = self.traj1['test.hh.crun'] if idx < old_length: self.assertTrue(param == 111) else: self.assertTrue(param == 53) self.assertTrue(len(self.traj1) > old_length) for irun in range(len(self.traj1.f_get_run_names())): self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB) self.assertTrue( self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB) if disable_logging: self.env1.f_disable_logging() self.env2.f_disable_logging() return old_length
def test_all_arrays(self): filename = make_temp_dir('hdf5arrays.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name npearray = np.ones((2, 10, 3), dtype=np.float) thevlarray = np.array( [compat.tobytes('j'), 22.2, compat.tobytes('gutter')]) traj.f_store(only_init=True) res = traj.f_add_result(SharedResult, 'arrays') res['carray'] = SharedCArray() res['carray'].create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom()) res['earray'] = SharedEArray() res['earray'].create_shared_data(obj=npearray) res['vlarray'] = SharedVLArray() res['vlarray'].create_shared_data(obj=thevlarray) res['array'] = SharedArray() res['array'].create_shared_data(data=npearray) traj.f_store() traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) toappned = [44, compat.tobytes('k')] with StorageContextManager(traj): a1 = traj.arrays.array a1[0, 0, 0] = 4.0 a2 = traj.arrays.carray a2[0, 1] = 4 a4 = traj.arrays.vlarray a4.append(toappned) a3 = traj.arrays.earray a3.append(np.zeros((1, 10, 3))) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) with StorageContextManager(traj): a1 = traj.arrays.array self.assertTrue(a1[0, 0, 0] == 4.0) a2 = traj.arrays.carray self.assertTrue(a2[0, 1] == 4) a3 = traj.arrays.earray self.assertTrue(a3.read().shape == (3, 10, 3)) a4 = traj.arrays.vlarray for idx, x in enumerate(a4): if idx == 0: self.assertTrue(np.all(x == np.array(thevlarray))) elif idx == 1: self.assertTrue(np.all(x == np.array(toappned))) else: raise RuntimeError()
def test_conversions(self): filename = make_temp_dir('hdf5manipulation.hdf5') traj = Trajectory(name=make_trajectory_name(self), filename=filename) trajname = traj.v_name traj.v_standard_result = SharedResult traj.f_store(only_init=True) traj.f_add_result('shared_data') thedata = np.zeros((1000, 1000)) myarray = SharedArray('array', traj.shared_data, trajectory=traj) traj.shared_data['array'] = myarray mytable = SharedTable('t1', traj.shared_data, trajectory=traj) traj.shared_data['t1'] = mytable dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']} dadict2 = {'answer': [42]} res = traj.f_add_result('shared.dfs') res['df'] = SharedPandasFrame() res['df'].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj) frame = SharedPandasFrame('df1', traj.f_get('shared.dfs'), trajectory=traj, add_to_parent=True) frame.create_shared_data(data=pd.DataFrame(dadict2),) res['df1'] = frame traj.f_add_result('mylist', [1, 2, 3]) traj.f_add_result('my.mytuple', k=(1, 2, 3), wa=42) traj.f_add_result('my.myarray', np.zeros((50, 50))) traj.f_add_result('my.myframe', data=pd.DataFrame(dadict2)) traj.f_add_result('my.mytable', ObjectTable(data=dadict2)) myarray.create_shared_data(data=thedata) mytable.create_shared_data(first_row={'hi': compat.tobytes('hi'), 'huhu': np.ones(3)}) traj.f_store() data = myarray.read() myarray.get_data_node() self.assertTrue(np.all(data == thedata)) with StorageContextManager(traj): myarray[2, 2] = 10 data = myarray.read() self.assertTrue(data[2, 2] == 10) self.assertTrue(data[2, 2] == 10) self.assertFalse(traj.v_storage_service.is_open) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) make_ordinary_result(traj.shared_data, 'array', trajectory=traj) array = traj.shared_data.array self.assertTrue(isinstance(array, np.ndarray)) thedata[2, 2] = 10 self.assertTrue(np.all(array == thedata)) make_ordinary_result(traj.shared_data, 't1', trajectory=traj,) t1 = traj.shared_data.t1 self.assertTrue(isinstance(t1, ObjectTable)) self.assertTrue(np.all(t1['huhu'][0] == np.ones(3))) dfs = traj.shared.dfs make_ordinary_result(traj.shared.dfs, 'df', trajectory=traj) theframe = dfs.f_get('df') self.assertTrue(isinstance(dfs, Result)) self.assertTrue(isinstance(theframe, pd.DataFrame)) self.assertTrue(theframe['hi'][0] == 1) listres = traj.f_get('mylist') listres = make_shared_result(listres, 0, trajectory=traj) with StorageContextManager(traj): self.assertTrue(listres[0][2] == 3) listres[0][0] = 4 self.assertTrue(listres[0][0] == 4) listres = make_ordinary_result(listres, 0, trajectory=traj) traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult) mylist = traj.mylist self.assertTrue(isinstance(listres, Result)) self.assertTrue(mylist[0] == 4) self.assertTrue(isinstance(mylist, list)) mytuple = traj.mytuple with self.assertRaises(AttributeError): mytuple = make_shared_result(mytuple, 'mylist', traj, new_class=SharedArray) mytuple = make_shared_result(mytuple, 'k', traj, new_class=SharedArray) self.assertTrue(mytuple.k[1] == 2) mytuple = make_ordinary_result(mytuple, 'k', trajectory=traj) self.assertTrue(isinstance(mytuple.k, tuple)) self.assertTrue(mytuple.k[2] == 3) myframe = traj.myframe myframe = make_shared_result(myframe, 'data', traj) theframe = myframe.data.read() self.assertTrue(theframe['answer'][0] == 42) myframe = make_ordinary_result(myframe, 'data', trajectory=traj) traj.f_load_item(myframe) self.assertTrue(myframe.data['answer'][0] == 42) mytable = traj.f_get('mytable') mytable = make_shared_result(mytable, 0, traj) self.assertTrue(isinstance(mytable[0], SharedTable)) rows = mytable.mytable.read() self.assertTrue(rows[0][0] == 42) mytable = make_ordinary_result(mytable, 0, trajectory=traj) self.assertTrue(isinstance(mytable, Result)) self.assertTrue(mytable[0]['answer'][0] == 42)