batch_string = get_batch_string(test_list_n_node, job_time_min, module_Cname[architecture], n_node) # Loop on tests for count, current_run in enumerate(test_list_n_node): current_run.scale_n_cell(n_node) runtime_param_string = ' amr.n_cell=' + ' '.join(str(i) for i in current_run.n_cell) runtime_param_string += ' amr.max_grid_size=' + str(current_run.max_grid_size) runtime_param_string += ' amr.blocking_factor=' + str(current_run.blocking_factor) runtime_param_string += ' max_step=' + str( current_run.n_step ) # runtime_param_list.append( runtime_param_string ) run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string) batch_string += run_string batch_string += 'rm -rf plotfiles lab_frame_data diags\n' submit_job_command = get_submit_job_command() # Run the simulations. run_batch_nnode(test_list_n_node, res_dir, cwd, bin_name, config_command, batch_string, submit_job_command) os.chdir(cwd) # submit batch for analysis if os.path.exists( 'read_error.txt' ): os.remove( 'read_error.txt' ) if os.path.exists( 'read_output.txt' ): os.remove( 'read_output.txt' ) process_analysis(args.automated, cwd, compiler, architecture, args.n_node_list, start_date, source_dir_base, res_dir_base) # read the output file from each test and store timers in # hdf5 file with pandas format # ------------------------------------------------------- for n_node in n_node_list: print(n_node) if browse_output_files:
[run_name, args.compiler, args.architecture, str(n_node)]) + '/' runtime_param_list = [] # Deep copy as we change the attribute n_cell of # each instance of class test_element test_list_n_node = copy.deepcopy(test_list) # Loop on tests for current_run in test_list_n_node: current_run.scale_n_cell(n_node) runtime_param_string = ' amr.n_cell=' + ' '.join( str(i) for i in current_run.n_cell) runtime_param_string += ' max_step=' + str(current_run.n_step) runtime_param_list.append(runtime_param_string) # Run the simulations. run_batch_nnode(test_list_n_node, res_dir, bin_name, config_command,\ architecture=args.architecture, Cname=module_Cname[args.architecture], \ n_node=n_node, runtime_param_list=runtime_param_list) os.chdir(cwd) # submit batch for analysis process_analysis() # read the output file from each test and store timers in # hdf5 file with pandas format # ------------------------------------------------------- for n_node in n_node_list: print(n_node) if browse_output_files: for count, current_run in enumerate(test_list): res_dir = res_dir_base res_dir += '_'.join([run_name, args.compiler,\ args.architecture, str(n_node)]) + '/'