class EnvironmentTest(TrajectoryComparator): def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='fixed' self.pandas_append=False self.complib = 'blosc' self.complevel=9 self.shuffle=True self.fletcher32 = False def explore_complex_params(self, traj): matrices_csr = [] for irun in range(3): spsparse_csr = spsp.csr_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr) matrices_csc = [] for irun in range(3): spsparse_csc = spsp.csc_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc) matrices_bsr = [] for irun in range(3): spsparse_bsr = spsp.csr_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tobsr()) matrices_dia = [] for irun in range(3): spsparse_dia = spsp.csr_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.csr_matrix((2222,22)), spsp.csr_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def setUp(self): self.set_mode() logging.basicConfig(level = logging.INFO) self.logfolder = make_temp_file('experiments/tests/Log') random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_file('experiments/tests/HDF5/test%s.hdf5' % self.trajname) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_folder=self.logfolder, log_stdout=False, results_per_run=5, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj = traj self.env = env def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make a test run simple_arg = -13 simple_kwarg= 13.0 self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) def test_a_large_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,load_results=2, load_other_data=2, index=trajectory_index, as_new=as_new) return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() print '\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$' self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj,filename=self.filename, file_title=self.trajname, log_folder=self.logfolder, log_stdout=False) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand() print '\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \n' self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.csr_matrix((2222,22)), spsp.csr_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.traj.f_expand(cartesian_product(self.expanded)) ################## Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_off_LARGE_tables') ###Explore self.explore(self.traj) self.env.f_set_large_overview(True) self.make_run() hdf5file = pt.openFile(self.filename) overview_group = hdf5file.getNode(where='/'+ self.traj.v_name, name='overview') should_not = ['derived_parameters_runs', 'results_runs'] for name in should_not: self.assertTrue(name in overview_group, '%s in overviews but should not!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env.f_switch_off_all_overview() self.make_run() hdf5file = pt.openFile(self.filename) overview_group = hdf5file.getNode(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name = name.split('.')[-1] # Get only the name of the table, no the full name self.assertTrue(not name in overview_group, '%s in overviews but should not!' % name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children and recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group', remove_empty_groups=True) with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new', recursive=True, load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.purge_duplicate_comments=0 self.make_run() hdf5file = pt.openFile(self.filename) traj_group = hdf5file.getNode(where='/', name= self.traj.v_name) for node in traj_group._f_walkGroups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments_but_check_moving_comments_up_the_hierarchy(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.purge_duplicate_comments=1 self.traj.overview.results_runs_summary=0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.purge_duplicate_comments=1 self.traj.f_get('results_runs_summary').f_unlock() self.traj.overview.results_runs_summary=1 # We fake that the trajectory starts with run_00000001 self.traj._run_information['run_00000000']['completed']=1 self.make_run() # Noe we make the first run self.traj._run_information['run_00000000']['completed']=0 self.make_run() hdf5file = pt.openFile(self.filename, mode='a') try: traj_group = hdf5file.getNode(where='/', name= self.traj.v_name) for node in traj_group._f_walkGroups(): if 'SRVC_LEAF' in node._v_attrs: if ('run_' in node._v_pathname and not pypetconstants.RUN_NAME_DUMMY in node._v_pathname): #comment_run_name=self.get_comment_run_name(traj_group, node._v_pathname, node._v_name) comment_run_name = 'run_00000000' if comment_run_name in node._v_pathname: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!' % node._v_name) else: self.assertTrue(not ('SRVC_INIT_COMMENT' in node._v_attrs), 'There is a comment in node %s!' % node._v_name) else: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!' % node._v_name) finally: hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.purge_duplicate_comments=1 self.traj.overview.results_runs_summary=0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.purge_duplicate_comments=1 self.traj.f_get('results_runs_summary').f_unlock() self.traj.overview.results_runs_summary=1 self.make_run() hdf5file = pt.openFile(self.filename, mode='a') try: traj_group = hdf5file.getNode(where='/', name= self.traj.v_name) for node in traj_group._f_walkGroups(): if 'SRVC_LEAF' in node._v_attrs: if ('run_' in node._v_pathname and not pypetconstants.RUN_NAME_DUMMY in node._v_pathname): comment_run_name = 'run_00000000' if comment_run_name in node._v_pathname: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!' % node._v_name) else: self.assertTrue(not ('SRVC_INIT_COMMENT' in node._v_attrs), 'There is a comment in node %s!' % node._v_name) else: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!' % node._v_name) finally: hdf5file.close()
class EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.gc_interval = None self.ncores = 1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib' self.complevel=9 self.shuffle=True self.fletcher32 = False self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions = None self.niceness = None self.port = None self.timeout = None self.add_time=True def explore_complex_params(self, traj): matrices_csr = [] for irun in range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for irun in range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for irun in range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for irun in range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for param_name in par_dict: param = par_dict[param_name] if param.v_name in self.explore_dict: param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj = traj self.env = env @unittest.skipIf(not hasattr(os, 'nice') and psutil is None, 'Niceness not supported under non Unix.') def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with ptcompat.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 0) env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with ptcompat.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 1) env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger root = logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as mainf: full_text = mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make a test run simple_arg = -13 simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB' % str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) mp_traj = self.traj old_multiproc = self.multiproc self.multiproc = False ### Make a new single core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc def test_errors(self): tmp = make_temp_dir('cont') if dill is not None: env1 = Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3 = Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size is %sMB > 6MB' % str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env.f_set_large_overview(True) self.make_run() hdf5file = pt.openFile(self.filename) overview_group = hdf5file.getNode(where='/'+ self.traj.v_name, name='overview') should = ['derived_parameters_overview', 'results_overview'] for name in should: self.assertTrue(name in overview_group, '%s not in overviews but it should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env.f_switch_off_all_overview() self.make_run() hdf5file = pt.openFile(self.filename) overview_group = hdf5file.getNode(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name = name.split('.')[-1] # Get only the name of the table, no the full name self.assertTrue(not name in overview_group, '%s in overviews but should not!' % name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children and recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.openFile(self.filename) traj_group = hdf5file.getNode(where='/', name= self.traj.v_name) for node in traj_group._f_walkGroups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.openFile(self.filename, mode='a') ncomments = {} try: traj_group = hdf5file.getNode(where='/',name= self.traj.v_name) for node in traj_group._f_walkGroups(): if ('/derived_parameters/' in node._v_pathname or '/results/' in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1 for x in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.openFile(self.filename, mode='a') ncomments = {} try: traj_group = hdf5file.getNode(where='/',name= self.traj.v_name) for node in traj_group._f_walkGroups(): if ('/derived_parameters/' in node._v_pathname or '/results/' in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1 for x in ncomments.values()))