def run_experiments(): logging.basicConfig(level = logging.INFO) logfolder = os.path.join(tempfile.gettempdir(), TEMPDIR, 'logs') pathfolder = os.path.join(tempfile.gettempdir(), TEMPDIR, 'hdf5') exponents = np.arange(0, 8, 1) res_per_run = 100 traj_names = [] filenames = [] runs = (np.ones(len(exponents))*2) ** exponents for adx, nruns in enumerate(runs): env = Environment(log_folder=logfolder, filename=pathfolder, ncores=2, multiproc=True, use_pool=True, wrap_mode='QUEUE') traj = env.v_trajectory traj.f_add_parameter('res_per_run', res_per_run) traj.f_add_parameter('trial', 0) traj.f_explore({'trial': list(range(int(nruns)))}) env.f_run(add_data) traj_names.append(traj.v_name) filenames.append(traj.v_storage_service.filename) return filenames, traj_names, pathfolder
def main(): # Let's be very verbose! logging.basicConfig(level = logging.INFO) # Let's do multiprocessing this time with a lock (which is default) filename = os.path.join('hdf5', 'example_07.hdf5') env = Environment(trajectory='Example_07_BRIAN', filename=filename, file_title='Example_07_Brian', comment = 'Go Brian!', dynamically_imported_classes=[BrianMonitorResult, BrianParameter], multiproc=True, wrap_mode='QUEUE', ncores=2) traj = env.v_trajectory # 1st a) add the parameters add_params(traj) # 1st b) prepare, we want to explore the different network sizes and different tauw time scales traj.f_explore(cartesian_product({traj.f_get('N').v_full_name:[50,60], traj.f_get('tauw').v_full_name:[30*ms,40*ms]})) # 2nd let's run our experiment env.f_run(run_net) # You can take a look at the results in the hdf5 file if you want! # Finally disable logging and close all log-files env.f_disable_logging()
def main(): try: # Create an environment that handles running env = Environment(trajectory='Example1_Quick_And_Not_So_Dirty',filename='experiments/example_01/HDF5/', file_title='Example1_Quick_And_Not_So_Dirty', log_folder='experiments/example_01/LOGS/', comment='The first example!', complib='blosc', small_overview_tables=False, git_repository='./', git_message='Im a message!', sumatra_project='./', sumatra_reason='Testing!') # Get the trajectory from the environment traj = env.v_trajectory # Add both parameters traj.f_add_parameter('x', 1, comment='Im the first dimension!') traj.f_add_parameter('y', 1, comment='Im the second dimension!') # Explore the parameters with a cartesian product: traj.f_explore(cartesian_product({'x':[1,2,3], 'y':[6,7,8]})) # Run the simulation env.f_run(multiply) print("Python git test successful") # traj.f_expand({'x':[3,3],'y':[42,43]}) # # env.f_run(multiply) except Exception as e: print(repr(e)) sys.exit(1)
def main(): # Let's be very verbose! logging.basicConfig(level = logging.INFO) # Let's do multiprocessing this time with a lock (which is default) env = Environment(trajectory='Example_07_BRIAN', filename='experiments/example_07/HDF5/example_07.hdf5', file_title='Example_07_Euler_Integration', log_folder='experiments/example_07/LOGS/', comment = 'Go Brian!', dynamically_imported_classes=[BrianMonitorResult, BrianParameter], multiproc=True, wrap_mode='QUEUE', ncores=2) traj = env.v_trajectory # 1st a) add the parameters add_params(traj) # 1st b) prepare, we want to explore the different network sizes and different tauw time scales traj.f_explore(cartesian_product({traj.f_get('N').v_full_name:[50,60], traj.f_get('tauw').v_full_name:[30*ms,40*ms]})) # 2nd let's run our experiment env.f_run(run_net)
def main(): filename = os.path.join('hdf5', 'Clustered_Network.hdf5') env = Environment(trajectory='Clustered_Network', add_time=False, filename=filename, continuable=False, lazy_debug=False, multiproc=True, ncores=2, use_pool=False, # We cannot use a pool, our network cannot be pickled wrap_mode='QUEUE', overwrite_file=True) #Get the trajectory container traj = env.v_trajectory # We introduce a `meta` parameter that we can use to easily rescale our network scale = 0.5 # To obtain the results from the paper scale this to 1.0 # Be aware that your machine will need a lot of memory then! traj.f_add_parameter('simulation.scale', scale, comment='Meta parameter that can scale default settings. ' 'Rescales number of neurons and connections strenghts, but ' 'not the clustersize.') # We create a Manager and pass all our components to the Manager. # Note the order, CNNeuronGroups are scheduled before CNConnections, # and the Fano Factor computation depends on the CNMonitorAnalysis clustered_network_manager = NetworkManager(network_runner=CNNetworkRunner(), component_list=(CNNeuronGroup(), CNConnections()), analyser_list=(CNMonitorAnalysis(),CNFanoFactorComputer())) # Add original parameters (but scaled according to `scale`) clustered_network_manager.add_parameters(traj) # We need `tolist` here since our parameter is a python float and not a # numpy float. explore_list = np.arange(1.0, 2.6, 0.2).tolist() # Explore different values of `R_ee` traj.f_explore({'R_ee' : explore_list}) # Pre-build network components clustered_network_manager.pre_build(traj) # Run the network simulation traj.f_store() # Let's store the parameters already before the run env.f_run(clustered_network_manager.run_network) # Finally disable logging and close all log-files env.f_disable_logging()
def main(): filename = os.path.join('hdf5', 'Clustered_Network.hdf5') env = Environment(trajectory='Clustered_Network', add_time=False, filename=filename, continuable=False, lazy_debug=False, multiproc=True, ncores=2, use_pool=False, # We cannot use a pool, our network cannot be pickled wrap_mode='QUEUE', overwrite_file=True) #Get the trajectory container traj = env.v_trajectory # We introduce a `meta` parameter that we can use to easily rescale our network scale = 1.0 # To obtain the results from the paper scale this to 1.0 # Be aware that your machine will need a lot of memory then! traj.f_add_parameter('simulation.scale', scale, comment='Meta parameter that can scale default settings. ' 'Rescales number of neurons and connections strenghts, but ' 'not the clustersize.') # We create a Manager and pass all our components to the Manager. # Note the order, CNNeuronGroups are scheduled before CNConnections, # and the Fano Factor computation depends on the CNMonitorAnalysis clustered_network_manager = NetworkManager(network_runner=CNNetworkRunner(), component_list=(CNNeuronGroup(), CNConnections()), analyser_list=(CNMonitorAnalysis(),CNFanoFactorComputer())) # Add original parameters (but scaled according to `scale`) clustered_network_manager.add_parameters(traj) # We need `tolist` here since our parameter is a python float and not a # numpy float. explore_list = np.arange(1.0, 2.6, 0.2).tolist() # Explore different values of `R_ee` traj.f_explore({'R_ee' : explore_list}) # Pre-build network components clustered_network_manager.pre_build(traj) # Run the network simulation traj.f_store() # Let's store the parameters already before the run env.f_run(clustered_network_manager.run_network) # Finally disable logging and close all log-files env.f_disable_logging()
def profile_single_storing(profile_stroing=False, profile_loading=True): logging.basicConfig(level = logging.INFO) logfolder = os.path.join(tempfile.gettempdir(), TEMPDIR, 'logs') pathfolder = os.path.join(tempfile.gettempdir(), TEMPDIR, 'hdf5') res_per_run = 100 env = Environment(log_folder=logfolder, filename=pathfolder, ncores=2, multiproc=False, use_pool=True, wrap_mode='QUEUE') traj = env.v_trajectory traj.f_add_parameter('res_per_run', res_per_run) traj.f_add_parameter('trial', 0) traj.f_explore({'trial':list(range(10))}) runexp = lambda : env.f_run(add_data) if profile_stroing: cProfile.runctx('runexp()', {'runexp': runexp},globals(), sort=1, filename='store_stats.profile') else: runexp() print('########################################################################') traj = Trajectory(name=traj.v_name, add_time=False, filename= traj.v_storage_service.filename) load = lambda : traj.f_load(load_parameters=2, load_results=1) if profile_loading: cProfile.runctx('load()', {'load': load},globals(), filename='load_stats.profile', sort=1)
class ResultSortTest(TrajectoryComparator): def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True def setUp(self): self.set_mode() logging.basicConfig(level = logging.INFO) self.filename = make_temp_file('experiments/tests/HDF5/test.hdf5') self.logfolder = make_temp_file('experiments/tests/Log') self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_folder=self.logfolder, log_stdout=False, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',0) traj.f_add_parameter('y',0) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, load_derived_parameters=2,load_results=2, index=trajectory_index, as_new=as_new) return newtraj def explore(self,traj): self.explore_dict={'x':[0,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(self.explore_dict.values()[0])) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_f_iter_runs(self): ###Explore self.explore(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(self.explore_dict.values()[0])) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_as_run=run_name self.traj.v_as_run == run_name self.traj.v_idx = idx newtraj.v_idx = idx self.assertTrue('run_%08d' % (idx+1) not in traj) self.assertTrue(newtraj.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.z),str(traj.x),str(traj.y))) self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_as_run is None) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) print self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(self.explore_dict.values()[0])) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj,filename=self.filename, file_title=self.trajname, log_folder=self.logfolder, log_stdout=False) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(self.expand_dict.values()[0])+ len(self.explore_dict.values()[0])) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(self.explore_dict.values()[0])) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(self.expand_dict.values()[0])+ len(self.explore_dict.values()[0])) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_update_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct(self,traj): for x in range(len(traj)): traj.v_idx=x self.assertTrue(traj.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(traj.z),str(traj.x),str(traj.y))) traj.v_idx=-1
class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config = True self.port = None self.graceful_exit = True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory and check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x in range(len(self.traj))] args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3): for x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x = traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z != x*y: %s != %s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True
class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config = True self.port = None def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input,) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',0) traj.f_add_parameter('y',0) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=2, load_results=2) return newtraj def explore(self,traj): self.explore_dict={'x':[0,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x in range(len(self.traj))] args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_as_run=run_name self.traj.v_as_run == run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_as_run=run_name self.traj.v_as_run == run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_as_run=run_name self.traj.v_as_run == run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(compat.listvalues(self.explore_dict)[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.expand_dict)[0])+ len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.expand_dict)[0])+\ len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3): for x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x = traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z != x*y: %s != %s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True
traj2 = env2.v_trajectory # Add both parameters traj1.f_add_parameter('x', 1.0, comment='I am the first dimension!') traj1.f_add_parameter('y', 1.0, comment='I am the second dimension!') traj2.f_add_parameter('x', 1.0, comment='I am the first dimension!') traj2.f_add_parameter('y', 1.0, comment='I am the second dimension!') # Explore the parameters with a cartesian product for the first trajectory: traj1.f_explore(cartesian_product({'x':[1.0,2.0,3.0,4.0], 'y':[6.0,7.0,8.0]})) # Let's explore slightly differently for the second: traj2.f_explore(cartesian_product({'x':[3.0,4.0,5.0,6.0], 'y':[7.0,8.0,9.0]})) # Run the simulations with all parameter combinations env1.f_run(multiply) env2.f_run(multiply) # Now we merge them together into traj1 # We want to remove duplicate entries # like the parameter space point x=3.0, y=7.0. # Several points have been explored by both trajectories and we need them only once. # Therefore, we set remove_duplicates=True (Note this takes O(N1*N2)!). # We also want to backup both trajectories, but we let the system choose the filename. # Accordingly we choose backup_filename=True instead of providing a filename. # We want to move the hdf5 nodes from one trajectory to the other. # Thus we set move_nodes=True. # Finally,we want to delete the other trajectory afterwards since we already have a backup. traj1.f_merge(traj2,remove_duplicates=True,backup_filename=True, move_nodes=True, delete_other_trajectory=True)
env = Environment(trajectory='Example08',filename='experiments/example_08/HDF5/example_08.hdf5', file_title='Example08', log_folder='experiments/example_08/LOGS/', comment='Another example!') # Get the trajectory from the environment traj = env.v_trajectory # Add both parameters traj.f_add_parameter('x', 1, comment='I am the first dimension!') traj.f_add_parameter('y', 1, comment='I am the second dimension!') # Explore the parameters with a cartesian product: traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]})) # Run the simulation env.f_run(multiply) # We load all results traj.f_load(load_results=pypetconstants.LOAD_DATA) # And now we want to find som particular results, the ones where x was 2 or y was 8. # Therefore, we use a lambda function my_filter_predicate= lambda x,y: x==2 or y==8 # We can now use this lambda function to search for the run indexes associated with x==2 OR y==8. # We need a list specifying the names of the parameters and the predicate to do this. # Note that names need to be in the order as listed in the lambda function, here 'x' and 'y': idx_iterator = traj.f_find_idx(['x','y'], my_filter_predicate) # Now we can print the corresponding results: print 'The run names and results for parameter combinations with x==2 or y==8:'
class LinkMergeTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment', 'links', 'merge' def test_merge_with_linked_derived_parameter(self, disable_logging = True): logging.basicConfig(level = logging.ERROR) self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname1 = 'T1'+ make_trajectory_name(self) self.trajname2 = 'T2'+make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname1)) self.env1 = Environment(trajectory=self.trajname1, filename=self.filename, file_title=self.trajname1, log_stdout=False, log_config=get_log_config()) self.env2 = Environment(trajectory=self.trajname2, filename=self.filename, file_title=self.trajname2, log_stdout=False, log_config=get_log_config()) self.traj1 = self.env1.v_trajectory self.traj2 = self.env2.v_trajectory create_link_params(self.traj1) create_link_params(self.traj2) explore_params(self.traj1) explore_params2(self.traj2) self.traj1.f_add_derived_parameter('test.$.gg', 42) self.traj2.f_add_derived_parameter('test.$.gg', 44) self.traj1.f_add_derived_parameter('test.hh.$', 111) self.traj2.f_add_derived_parameter('test.hh.$', 53) self.env1.f_run(dostuff_and_add_links) self.env2.f_run(dostuff_and_add_links) old_length = len(self.traj1) self.traj1.f_merge(self.traj2, remove_duplicates=True) self.traj1.f_load(load_data=2) for run in self.traj1.f_get_run_names(): self.traj1.v_crun = run idx = self.traj1.v_idx param = self.traj1['test.crun.gg'] if idx < old_length: self.assertTrue(param == 42) else: self.assertTrue(param == 44) param = self.traj1['test.hh.crun'] if idx < old_length: self.assertTrue(param == 111) else: self.assertTrue(param == 53) self.assertTrue(len(self.traj1) > old_length) for irun in range(len(self.traj1.f_get_run_names())): self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB) self.assertTrue(self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB) if disable_logging: self.env1.f_disable_logging() self.env2.f_disable_logging() return old_length def test_remerging(self): prev_old_length = self.test_merge_with_linked_derived_parameter(disable_logging=False) name = self.traj1 self.bfilename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'backup_test%s.hdf5' % self.trajname1)) self.traj1.f_load(load_data=2) self.traj1.f_backup(backup_filename=self.bfilename) self.traj3 = load_trajectory(index=-1, filename=self.bfilename, load_all=2) old_length = len(self.traj1) self.traj1.f_merge(self.traj3, backup=False, remove_duplicates=False) self.assertTrue(len(self.traj1) > old_length) self.traj1.f_load(load_data=2) for run in self.traj1.f_get_run_names(): self.traj1.v_crun = run idx = self.traj1.v_idx param = self.traj1['test.crun.gg'] if idx < prev_old_length or old_length <= idx < prev_old_length + old_length: self.assertTrue(param == 42, '%s != 42' % str(param)) else: self.assertTrue(param == 44, '%s != 44' % str(param)) param = self.traj1['test.hh.crun'] if idx < prev_old_length or old_length <= idx < prev_old_length + old_length: self.assertTrue(param == 111, '%s != 111' % str(param)) else: self.assertTrue(param == 53, '%s != 53' % str(param)) self.assertTrue(len(self.traj1)>old_length) for irun in range(len(self.traj1.f_get_run_names())): self.assertTrue(self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB) self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB) self.env1.f_disable_logging() self.env2.f_disable_logging()
class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_pool_input=False def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','test.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, freeze_pool_input=self.freeze_pool_input,) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',0) traj.f_add_parameter('y',0) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=2, load_results=2) return newtraj def explore(self,traj): self.explore_dict={'x':[0,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_f_iter_runs(self): ###Explore self.explore(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_as_run=run_name self.traj.v_as_run == run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) get_root_logger().info(self.env.f_run(multiply)) traj = self.traj self.assertEqual(len(traj), len(list(compat.listvalues(self.explore_dict)[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.expand_dict)[0])+ len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(compat.listvalues(self.expand_dict)[0])+\ len(compat.listvalues(self.explore_dict)[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct(self,traj): for x in range(len(traj)): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1
y.val = x**2 smurf = Result('','','','') z = traj.f_add_result('Nada.Moo',smurf) z.val = y()+1 print 'Dat wars' #multip.log_to_stderr().setLevel(logging.INFO) config['multiproc']=False config['ncores']=2 env = Environment(trajectory='MyExperiment', filename='../experiments/env.hdf5',dynamicly_imported_classes=[BrianParameter]) traj = env.get_trajectory() assert isinstance(traj, Trajectory) par=traj.f_add_parameter('x',param_type=BrianParameter, value=3, unit = 'mV') par.hui='buh' print par() print par.val traj.f_explore(identity, {traj.x.gfn('value'):[1,2,3,4]}) env.f_run(test_run,to_print='test')
class LinkMergeTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment', 'links', 'merge' def test_merge_with_linked_derived_parameter(self, disable_logging=True): logging.basicConfig(level=logging.ERROR) self.logfolder = make_temp_dir( os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname1 = 'T1' + make_trajectory_name(self) self.trajname2 = 'T2' + make_trajectory_name(self) self.filename = make_temp_dir( os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname1)) self.env1 = Environment(trajectory=self.trajname1, filename=self.filename, file_title=self.trajname1, log_stdout=False, log_config=get_log_config()) self.env2 = Environment(trajectory=self.trajname2, filename=self.filename, file_title=self.trajname2, log_stdout=False, log_config=get_log_config()) self.traj1 = self.env1.v_trajectory self.traj2 = self.env2.v_trajectory create_link_params(self.traj1) create_link_params(self.traj2) explore_params(self.traj1) explore_params2(self.traj2) self.traj1.f_add_derived_parameter('test.$.gg', 42) self.traj2.f_add_derived_parameter('test.$.gg', 44) self.traj1.f_add_derived_parameter('test.hh.$', 111) self.traj2.f_add_derived_parameter('test.hh.$', 53) self.env1.f_run(dostuff_and_add_links) self.env2.f_run(dostuff_and_add_links) old_length = len(self.traj1) self.traj1.f_merge(self.traj2, remove_duplicates=True) self.traj1.f_load(load_data=2) for run in self.traj1.f_get_run_names(): self.traj1.v_crun = run idx = self.traj1.v_idx param = self.traj1['test.crun.gg'] if idx < old_length: self.assertTrue(param == 42) else: self.assertTrue(param == 44) param = self.traj1['test.hh.crun'] if idx < old_length: self.assertTrue(param == 111) else: self.assertTrue(param == 53) self.assertTrue(len(self.traj1) > old_length) for irun in range(len(self.traj1.f_get_run_names())): self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB) self.assertTrue( self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB) if disable_logging: self.env1.f_disable_logging() self.env2.f_disable_logging() return old_length def test_remerging(self): prev_old_length = self.test_merge_with_linked_derived_parameter( disable_logging=False) name = self.traj1 self.bfilename = make_temp_dir( os.path.join('experiments', 'tests', 'HDF5', 'backup_test%s.hdf5' % self.trajname1)) self.traj1.f_load(load_data=2) self.traj1.f_backup(backup_filename=self.bfilename) self.traj3 = load_trajectory(index=-1, filename=self.bfilename, load_all=2) old_length = len(self.traj1) self.traj1.f_merge(self.traj3, backup=False, remove_duplicates=False) self.assertTrue(len(self.traj1) > old_length) self.traj1.f_load(load_data=2) for run in self.traj1.f_get_run_names(): self.traj1.v_crun = run idx = self.traj1.v_idx param = self.traj1['test.crun.gg'] if idx < prev_old_length or old_length <= idx < prev_old_length + old_length: self.assertTrue(param == 42, '%s != 42' % str(param)) else: self.assertTrue(param == 44, '%s != 44' % str(param)) param = self.traj1['test.hh.crun'] if idx < prev_old_length or old_length <= idx < prev_old_length + old_length: self.assertTrue(param == 111, '%s != 111' % str(param)) else: self.assertTrue(param == 53, '%s != 53' % str(param)) self.assertTrue(len(self.traj1) > old_length) for irun in range(len(self.traj1.f_get_run_names())): self.assertTrue( self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB) self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB) self.env1.f_disable_logging() self.env2.f_disable_logging()
def main(): env = Environment(trajectory='Example_05_Euler_Integration', filename='experiments/example_05/HDF5/example_05.hdf5', file_title='Example_05_Euler_Integration', log_folder='experiments/example_05/LOGS/', comment = 'Go for Euler!') traj = env.v_trajectory trajectory_name = traj.v_name # 1st a) phase parameter addition add_parameters(traj) # 1st b) phase preparation # We will add the differential equation (well, its source code only) as a derived parameter traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz, comment='Source code of our equation!') # We want to explore some initial conditions traj.f_explore({'initial_conditions' : [ np.array([0.01,0.01,0.01]), np.array([2.02,0.02,0.02]), np.array([42.0,4.2,0.42]) ]}) # 3 different conditions are enough for an illustrative example # 2nd phase let's run the experiment # We pass `euler_scheme` as our top-level simulation function and # the Lorenz equation 'diff_lorenz' as an additional argument env.f_run(euler_scheme, diff_lorenz) # We don't have a 3rd phase of post-processing here # 4th phase analysis. # I would recommend to do post-processing completely independent from the simulation, # but for simplicity let's do it here. # Let's assume that we start all over again and load the entire trajectory new. # Yet, there is an error within this approach, do you spot it? del traj traj = Trajectory(filename='experiments/example_05/HDF5/example_05.hdf5') # We will only fully load parameters and derived parameters. # Results will be loaded manually later on. try: # However, this will fail because our trajectory does not know how to # build the FunctionParameter. You have seen this coming, right? traj.f_load(name=trajectory_name,load_parameters=2, load_derived_parameters=2,load_results=1) except ImportError as e: print 'That did\'nt work, I am sorry. %s ' % e.message # Ok, let's try again but this time with adding our parameter to the imports traj = Trajectory(filename='experiments/example_05/HDF5/example_05.hdf5', dynamically_imported_classes=FunctionParameter) # Now it works: traj.f_load(name=trajectory_name,load_parameters=2, load_derived_parameters=2,load_results=1) #For the fun of it, let's print the source code print '\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq # Let's get the exploration array: initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range() # Now let's plot our simulated equations for the different initial conditions: # We will iterate through the run names for idx, run_name in enumerate(traj.f_get_run_names()): #Get the result of run idx from the trajectory euler_result = traj.results.f_get(run_name).euler_evolution # Now we manually need to load the result. Actually the results are not so large and we # could load them all at once. But for demonstration we do as if they were huge: traj.f_load_item(euler_result) euler_data = euler_result.data #Plot fancy 3d plot fig = plt.figure(idx) ax = fig.gca(projection='3d') x = euler_data[:,0] y = euler_data[:,1] z = euler_data[:,2] ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx])) plt.legend() plt.show() # Now we free the data again (because we assume its huuuuuuge): del euler_data euler_result.f_empty()
def main(): env = Environment(trajectory='Example_06_Euler_Integration', filename='experiments/example_06/HDF5/example_06.hdf5', file_title='Example_06_Euler_Integration', log_folder='experiments/example_06/LOGS/', comment = 'Go for Euler!') traj = env.v_trajectory # 1st a) phase parameter addition # Remember we have some control flow in the `add_parameters` function, the default parameter # set we choose is the `'diff_lorenz'` one, but we want to deviate from that and use the # `'diff_roessler'`. # In order to do that we can preset the corresponding name parameter to change the # control flow: traj.f_preset_parameter('diff_name', 'diff_roessler') # If you erase this line, you will get # again the lorenz attractor add_parameters(traj) # 1st b) phase preparation # Let's check which function we want to use if traj.diff_name=='diff_lorenz': diff_eq = diff_lorenz elif traj.diff_name=='diff_roessler': diff_eq = diff_roessler else: raise ValueError('I don\'t know what %s is.' % traj.diff_name) # And add the source code of the function as a derived parameter. traj.f_add_derived_parameter(FunctionParameter, 'diff_eq', diff_eq, comment='Source code of our equation!') # We want to explore some initial conditions traj.f_explore({'initial_conditions' : [ np.array([0.01,0.01,0.01]), np.array([2.02,0.02,0.02]), np.array([42.0,4.2,0.42]) ]}) # 3 different conditions are enough for now # 2nd phase let's run the experiment # We pass 'euler_scheme' as our top-level simulation function and # the Roessler function as an additional argument env.f_run(euler_scheme, diff_eq) # Again no post-processing # 4th phase analysis. # I would recommend to do the analysis completely independent from the simulation # but for simplicity let's do it here. # We won't reload the trajectory this time but simply update the skeleton traj.f_update_skeleton() #For the fun of it, let's print the source code print '\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq # Let's get the exploration array: initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range() # Now let's plot our simulated equations for the different initial conditions. # We will iterate through the run names for idx, run_name in enumerate(traj.f_get_run_names()): # Get the result of run idx from the trajectory euler_result = traj.results.f_get(run_name).euler_evolution # Now we manually need to load the result. Actually the results are not so large and we # could load them all at once, but for demonstration we do as if they were huge: traj.f_load_item(euler_result) euler_data = euler_result.data # Plot fancy 3d plot fig = plt.figure(idx) ax = fig.gca(projection='3d') x = euler_data[:,0] y = euler_data[:,1] z = euler_data[:,2] ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx])) plt.legend() plt.show() # Now we free the data again (because we assume its huuuuuuge): del euler_data euler_result.f_empty()