def print_traj_parameters_explored(traj_dir): # Load the trajectory from the hdf5 file # Only load parameters, results will be loaded at runtime (auto loading) #traj_dir = os.path.join('trajectories', '2019_03_21_22h48m29s_HCP_test') #if not os.path.isdir(traj_dir): # traj_dir = os.path.join('..', traj_dir) traj_filename = 'traj.hdf5' traj_fullpath = os.path.join(traj_dir, traj_filename) traj = Trajectory() traj.f_load(filename=traj_fullpath, index=0, load_parameters=2, load_results=0, load_derived_parameters=0, force=True) # Turn on auto loading traj.v_auto_load = True # Count number of runs runs_n = len(traj.f_get_run_names()) print('number of runs = {0}'.format(runs_n)) # Get list of explored parameters parameters_explored = [ str.split(par, '.').pop() for par in (traj.f_get_explored_parameters()) ] print(parameters_explored)
# Only load parameters, results will be loaded at runtime (auto loading) traj_dir = 'TE_from_couplings_WS_sweep_noise1_w0.15_100nodes_100000samples_10rep_history14' traj_filename = 'traj.hdf5' traj_fullpath = os.path.join(traj_dir, traj_filename) traj = Trajectory() traj.f_load(filename=traj_fullpath, index=0, load_parameters=2, load_results=0, load_derived_parameters=0, force=True) # Turn on auto loading traj.v_auto_load = True # Count number of runs runs_n = len(traj.f_get_run_names()) print('Number of runs = {0}'.format(runs_n)) # Get list of explored parameters parameters_explored = [ str.split(par, '.').pop() for par in (traj.f_get_explored_parameters()) ] # Initialise analysis summary table # (it is important that the columns with the explored parameters # preceed the ones with the results) df = pd.DataFrame(index=traj.f_get_run_names(), columns=parameters_explored + [ 'bTE_empirical_causal_vars', 'bTE_approx2_causal_vars', 'bTE_approx4_causal_vars',
def main(): filename = os.path.join('hdf5', 'example_05.hdf5') env = Environment(trajectory='Example_05_Euler_Integration', filename=filename, file_title='Example_05_Euler_Integration', comment='Go for Euler!') traj = env.v_trajectory trajectory_name = traj.v_name # 1st a) phase parameter addition add_parameters(traj) # 1st b) phase preparation # We will add the differential equation (well, its source code only) as a derived parameter traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz, comment='Source code of our equation!') # We want to explore some initial conditions traj.f_explore({'initial_conditions' : [ np.array([0.01,0.01,0.01]), np.array([2.02,0.02,0.02]), np.array([42.0,4.2,0.42]) ]}) # 3 different conditions are enough for an illustrative example # 2nd phase let's run the experiment # We pass `euler_scheme` as our top-level simulation function and # the Lorenz equation 'diff_lorenz' as an additional argument env.f_run(euler_scheme, diff_lorenz) # We don't have a 3rd phase of post-processing here # 4th phase analysis. # I would recommend to do post-processing completely independent from the simulation, # but for simplicity let's do it here. # Let's assume that we start all over again and load the entire trajectory new. # Yet, there is an error within this approach, do you spot it? del traj traj = Trajectory(filename=filename) # We will only fully load parameters and derived parameters. # Results will be loaded manually later on. try: # However, this will fail because our trajectory does not know how to # build the FunctionParameter. You have seen this coming, right? traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2, load_results=1) except ImportError as e: print('That did\'nt work, I am sorry: %s ' % str(e)) # Ok, let's try again but this time with adding our parameter to the imports traj = Trajectory(filename=filename, dynamically_imported_classes=FunctionParameter) # Now it works: traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2, load_results=1) #For the fun of it, let's print the source code print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq) # Let's get the exploration array: initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range() # Now let's plot our simulated equations for the different initial conditions: # We will iterate through the run names for idx, run_name in enumerate(traj.f_get_run_names()): #Get the result of run idx from the trajectory euler_result = traj.results.f_get(run_name).euler_evolution # Now we manually need to load the result. Actually the results are not so large and we # could load them all at once. But for demonstration we do as if they were huge: traj.f_load_item(euler_result) euler_data = euler_result.data #Plot fancy 3d plot fig = plt.figure(idx) ax = fig.gca(projection='3d') x = euler_data[:,0] y = euler_data[:,1] z = euler_data[:,2] ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx])) plt.legend() plt.show() # Now we free the data again (because we assume its huuuuuuge): del euler_data euler_result.f_empty() # You have to click through the images to stop the example_05 module! # Finally disable logging and close all log-files env.f_disable_logging()
traj_dir = 'TE_from_couplings_BA_noise1_w0.1_m1_100nodes_100000samples_history14_10000rep' traj_filename = 'traj.hdf5' traj_fullpath = os.path.join(traj_dir, traj_filename) traj = Trajectory() traj.f_load( filename=traj_fullpath, index=0, load_parameters=2, load_results=0, load_derived_parameters=0, force=True) # Turn on auto loading traj.v_auto_load = True # Count number of runs runs_n = len(traj.f_get_run_names()) print('Number of runs = {0}'.format(runs_n)) # Get list of explored parameters parameters_explored = [str.split(par, '.').pop() for par in ( traj.f_get_explored_parameters())] # Initialise analysis summary table # (it is important that the columns with the explored parameters # preceed the ones with the results) df = pd.DataFrame( index=traj.f_get_run_names(), columns=parameters_explored + [ 'bTE_empirical_causal_vars', 'bTE_approx2_causal_vars', 'bTE_approx4_causal_vars',
# In[ ]: traj.f_load(index=-1, load_parameters=2, load_results=2) # In[ ]: traj.f_get_parameters() # In[ ]: traj.f_get_explored_parameters() # In[ ]: traj.f_get_run_names() # In[ ]: def my_filter_function(location, dt): result = location == 'mars' and dt = 1e-2 return result # In[ ]: set(traj.f_get('incline').f_get_range()) # In[ ]:
traj.f_load(index=-1, load_parameters=2, load_results=2) # In[ ]: traj.f_get_parameters() # In[ ]: traj.f_get_explored_parameters() # In[ ]: traj.f_get_run_names() # In[ ]: def my_filter_function(location,dt): result = location =='mars' and dt=1e-2 return result # In[ ]: set(traj.f_get('incline').f_get_range()) # In[ ]:
def main(): filename = os.path.join('hdf5', 'example_05.hdf5') env = Environment(trajectory='Example_05_Euler_Integration', filename=filename, file_title='Example_05_Euler_Integration', overwrite_file=True, comment='Go for Euler!') traj = env.trajectory trajectory_name = traj.v_name # 1st a) phase parameter addition add_parameters(traj) # 1st b) phase preparation # We will add the differential equation (well, its source code only) as a derived parameter traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz, comment='Source code of our equation!') # We want to explore some initial conditions traj.f_explore({'initial_conditions' : [ np.array([0.01,0.01,0.01]), np.array([2.02,0.02,0.02]), np.array([42.0,4.2,0.42]) ]}) # 3 different conditions are enough for an illustrative example # 2nd phase let's run the experiment # We pass `euler_scheme` as our top-level simulation function and # the Lorenz equation 'diff_lorenz' as an additional argument env.run(euler_scheme, diff_lorenz) # We don't have a 3rd phase of post-processing here # 4th phase analysis. # I would recommend to do post-processing completely independent from the simulation, # but for simplicity let's do it here. # Let's assume that we start all over again and load the entire trajectory new. # Yet, there is an error within this approach, do you spot it? del traj traj = Trajectory(filename=filename) # We will only fully load parameters and derived parameters. # Results will be loaded manually later on. try: # However, this will fail because our trajectory does not know how to # build the FunctionParameter. You have seen this coming, right? traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2, load_results=1) except ImportError as e: print('That did\'nt work, I am sorry: %s ' % str(e)) # Ok, let's try again but this time with adding our parameter to the imports traj = Trajectory(filename=filename, dynamically_imported_classes=FunctionParameter) # Now it works: traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2, load_results=1) #For the fun of it, let's print the source code print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq) # Let's get the exploration array: initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range() # Now let's plot our simulated equations for the different initial conditions: # We will iterate through the run names for idx, run_name in enumerate(traj.f_get_run_names()): #Get the result of run idx from the trajectory euler_result = traj.results.f_get(run_name).euler_evolution # Now we manually need to load the result. Actually the results are not so large and we # could load them all at once. But for demonstration we do as if they were huge: traj.f_load_item(euler_result) euler_data = euler_result.data #Plot fancy 3d plot fig = plt.figure(idx) ax = fig.gca(projection='3d') x = euler_data[:,0] y = euler_data[:,1] z = euler_data[:,2] ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx])) plt.legend() plt.show() # Now we free the data again (because we assume its huuuuuuge): del euler_data euler_result.f_empty() # You have to click through the images to stop the example_05 module! # Finally disable logging and close all log-files env.disable_logging()
def main(): """Main function to protect the *entry point* of the program.""" # Load settings from file settings_file = 'pypet_settings.pkl' settings = load_obj(settings_file) # Print settings dictionary print('\nSettings dictionary:') for key, value in settings.items(): print(key, ' : ', value) print('\nParameters to explore:') for key, value in settings.items(): if isinstance(value, list): print(key, ' : ', value) # Create new folder to store results traj_dir = os.getcwd() # Read output path (if provided) if len(sys.argv) > 1: # Add trailing slash if missing dir_provided = os.path.join(sys.argv[1], '') # Check if provided directory exists if os.path.isdir(dir_provided): # Convert to full path traj_dir = os.path.abspath(dir_provided) else: print( 'WARNING: Output path not found, current directory will be used instead' ) else: print( 'WARNING: Output path not provided, current directory will be used instead' ) # Add time stamp (the final '' is to make sure there is a trailing slash) traj_dir = os.path.join(traj_dir, datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss"), '') # Create directory with time stamp os.makedirs(traj_dir) # Change current directory to the one containing the trajectory files os.chdir(traj_dir) print('Trajectory and results will be stored in: {0}'.format(traj_dir)) # Create new pypet Trajectory object traj_filename = 'traj.hdf5' traj_fullpath = os.path.join(traj_dir, traj_filename) traj = Trajectory(filename=traj_fullpath) # ------------------------------------------------------------------- # Add config parameters (those that DO NOT influence the final result of the experiment) traj.f_add_config('debug', False, comment='Activate debug mode') # #traj.f_add_config('max_mem_frac', 0.7, comment='Fraction of global GPU memory to use') # Set up trajectory parameters param_to_explore = {} for key, val in settings.items(): if isinstance(val, list): param_to_explore[key] = val traj.f_add_parameter(key, val[0]) else: traj.f_add_parameter(key, val) # Define parameter combinations to explore (a trajectory in # the parameter space). The second argument, the tuple, specifies the order # of the cartesian product. # The variable on the right most side changes fastest and defines the # 'inner for-loop' of the cartesian product explore_dict = cartesian_product(param_to_explore, tuple(param_to_explore.keys())) print(explore_dict) traj.f_explore(explore_dict) # Store trajectory parameters to disk pypet_utils.print_traj_leaves(traj, 'parameters', file=os.path.join(traj_dir, 'traj_parameters.txt')) # Store trajectory traj.f_store() # Define PBS script bash_lines = '\n'.join([ '#! /bin/bash', '#PBS -P InfoDynFuncStruct', '#PBS -l select=1:ncpus=1:mem=1GB', #'#PBS -l select=1:ncpus=1:ngpus=1:mem=1GB', '#PBS -M [email protected]', '#PBS -m abe', 'module load java', 'module load python/3.5.1', 'module load cuda/8.0.44', 'source /project/RDS-FEI-InfoDynFuncStruct-RW/Leo/idtxl_env/bin/activate', 'cd ${traj_dir}', 'python ${python_script_path} ${traj_dir} ${traj_filename} ${file_prefix} $PBS_ARRAY_INDEX' ]) # Save PBS script file (automatically generated) bash_script_name = 'run_python_script.pbs' job_script_path = os.path.join(traj_dir, bash_script_name) with open(job_script_path, 'w', newline='\n') as bash_file: bash_file.writelines(bash_lines) # Run job array job_walltime_hours = 0 job_walltime_minutes = 5 #after_job_array_ends = 1573895 job_settings = { 'N': 'run_traj', 'l': 'walltime={0}:{1}:00'.format(job_walltime_hours, job_walltime_minutes), #'W': 'depend=afteranyarray:{0}[]'.format(after_job_array_ends), 'q': 'defaultQ' } if len(traj.f_get_run_names()) > 1: job_settings['J'] = '{0}-{1}'.format(0, len(traj.f_get_run_names()) - 1) job_args = { 'python_script_path': '/project/RDS-FEI-InfoDynFuncStruct-RW/Leo/inference/hpc_pypet_single_run.py', 'traj_dir': traj_dir, 'traj_filename': traj_filename, 'file_prefix': 'none' } run_job_array(job_script_path, job_settings, job_args)