def mk_job_list(): time_0 = datetime.datetime(2011, 8, 26, 0, 0) # These choices of restart times match the restart frequencies set above. time_1 = time_0 + datetime.timedelta(hours=6) time_2 = time_1 + datetime.timedelta(hours=18) job_object_1 = wrfhydropy.Job( nproc=2, model_start_time=time_0, model_end_time=time_1 ) job_object_2 = wrfhydropy.Job( nproc=2, model_start_time=time_1, model_end_time=time_2 ) return [job_object_1, job_object_2]
def test_run_candidate(candidate_sim, output_dir, ncores): print("\nQuestion: The candidate runs successfully?\n", end='') print('\n') # Set run directory and change working directory to run dir for simulation run_dir = output_dir / 'run_candidate' run_dir.mkdir(parents=True) os.chdir(str(run_dir)) # Job exe_command = ('mpirun -np {0} ./wrf_hydro.exe').format(str(ncores)) job = wrfhydropy.Job(job_id='run_candidate', exe_cmd=exe_command) candidate_sim.add(job) # Run, catch warnings related to missing start and end job times with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_sim.compose() print('\nwaiting for job to complete...', end='') candidate_sim.run() # Wait to collect until job has finished. All test runs are performed on a single job with # job_id='test_job' wait_job(candidate_sim) candidate_sim.collect() candidate_sim.pickle(run_dir.joinpath('WrfHydroSim_collected.pkl')) # Check job run statuses for job in candidate_sim.jobs: assert job.exit_status == 0, \ "Candidate code run exited with non-zero status"
def setup_wrf_hydro_ens_job(config, wrf_hydro_ens_sim): config_job_exe = config['run_experiment']['job_execution'] # I think the exe name is hard-coded in wrfhydropy to this. If it's # tracked, could bring it in. hostname = subprocess.run( 'hostname', stdout=subprocess.PIPE).stdout.rstrip().decode('utf-8') fmt_dict = { **{ 'cmd': 'wrf_hydro.exe', 'hostname': hostname, 'nproc': config_job_exe['wrf_hydro']['nproc'] } } exe_cmd = config_job_exe['wrf_hydro']['exe_cmd'].format(**fmt_dict) # This is ugly, but there are no files on disk to query, which is the other way of doing this. def get_nlist_date(member): file_name_hydro = member.base_hydro_namelist['hydro_nlist'][ 'restart_file'] date_str_hydro = pathlib.Path(file_name_hydro).name date_hydro = datetime.datetime.strptime( date_str_hydro, 'HYDRO_RST.%Y-%m-%d_%H:%M_DOMAIN1') if member.base_hrldas_namelist['wrf_hydro_offline'][ 'forc_typ'] not in [9, 10]: file_name_hrldas = \ member.base_hrldas_namelist['noahlsm_offline']['restart_filename_requested'] date_str_hrldas = pathlib.Path(file_name_hrldas).name date_hrldas = datetime.datetime.strptime( date_str_hrldas, 'RESTART.%Y%m%d%H_DOMAIN1') if date_hrldas != date_hydro: raise ValueError("Namelist restart times do not match.") return date_hydro member_dates = [get_nlist_date(mm) for mm in wrf_hydro_ens_sim.members] if not all([mm == member_dates[0] for mm in member_dates]): raise ValueError("Ensemble members are not at the same times") start_time = member_dates[0] end_time = \ member_dates[0] + \ datetime.timedelta(hours=config['run_experiment']['time']['advance_model_hours']) entry_cmd = config['run_experiment']['perturb_forcing']['noise_cmd'] exit_cmd = None job = wrfhydropy.Job(exe_cmd=exe_cmd, job_id=config_job_exe['scheduler']['job_name'], model_start_time=start_time, model_end_time=end_time, restart=True, entry_cmd=entry_cmd, exit_cmd=exit_cmd) wrf_hydro_ens_sim.add(job) return wrf_hydro_ens_sim
def test_run_candidate_nwm_output_sim(candidate_sim, candidate_nwm_output_sim, output_dir, ncores): if candidate_nwm_output_sim.model.model_config.lower().find('nwm') < 0: pytest.skip('NWM Output test only applicable to nwm configs') print( "\nQuestion: The candidate nwm ouput configuration runs successfully?\n", end='') print('\n') # Set run directory and change working directory to run dir for simulation run_dir = output_dir / 'nwm_output_candidate' if run_dir.exists(): pytest.skip( 'Candidate nwm output run exists, skipping nwm candidate output run.' ) run_dir.mkdir(parents=True) os.chdir(str(run_dir)) candidate_nwm_output_sim_copy = copy.deepcopy(candidate_nwm_output_sim) # Dont recompile the model, just use the candidate's model. candidate_nwm_output_sim_copy.model = copy.deepcopy(candidate_sim.model) # Job exe_command = 'mpirun -np {0} ./wrf_hydro.exe'.format(str(ncores)) job = wrfhydropy.Job(job_id='run_candidate', exe_cmd=exe_command, restart_freq_hr=1, output_freq_hr=1) candidate_nwm_output_sim_copy.add(job) # start_time, end_time = candidate_nwm_output_sim_copy.jobs[0]._solve_model_start_end_times() # candidate_nwm_output_sim_copy.jobs[0].model_start_time = start_time # candidate_nwm_output_sim_copy.jobs[0].model_end_time = start_time + dt.timedelta(hours=24) # candidate_nwm_output_sim_copy.jobs[0].restart_freq_hr = 1 # Run, catch warnings related to missing start and end job times with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_nwm_output_sim_copy.compose() print('\nwaiting for job to complete...', end='') candidate_nwm_output_sim_copy.run() # Wait to collect until job has finished. All test runs are performed on a single job with # job_id='test_job' wait_job(candidate_nwm_output_sim_copy) candidate_nwm_output_sim_copy.collect() candidate_nwm_output_sim_copy.pickle( run_dir.joinpath('WrfHydroSim_collected.pkl')) # Check job run statuses for job in candidate_nwm_output_sim.jobs: assert job.exit_status == 0, \ "Candidate run exited with non-zero status"
def test_run_candidate_channel_only(candidate_sim, candidate_channel_only_sim, output_dir, ncores, capsys): if candidate_sim.model.model_config.lower().find('nwm') < 0: pytest.skip('Channel-only test only applicable to nwm_ana config') with capsys.disabled(): print("\nQuestion: The candidate channel-only mode runs successfully?", end='') # Dont recompile the model, just use the candidate's model. candidate_channel_only_sim.model = candidate_sim.model # Set the forcing directory # Set run directory and go for execution. run_dir = output_dir / 'run_candidate_channel_only' run_dir.mkdir(parents=True) os.chdir(str(run_dir)) # Job exe_command = ('mpirun -np {0} ./wrf_hydro.exe').format(str(ncores)) job = wrfhydropy.Job(job_id='run_candidate_channel_only', exe_cmd=exe_command) candidate_channel_only_sim.add(job) candidate_channel_only_sim.jobs[0]._hrldas_namelist['noahlsm_offline']['indir'] = \ str(output_dir / 'run_candidate') # Run with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_channel_only_sim.compose() with capsys.disabled(): print('\nwaiting for job to complete...', end='') candidate_channel_only_sim.run() # Wait to collect until job has finished. All test runs are performed on a single job with # job_id='test_job' wait_job(candidate_channel_only_sim) candidate_channel_only_sim.collect() candidate_channel_only_sim.pickle( run_dir.joinpath('WrfHydroSim_collected.pkl')) # Check job run statuses for job in candidate_channel_only_sim.jobs: assert job.exit_status == 0, \ "Candidate channel-only code run exited with non-zero status"
def test_run_candidate(candidate_sim, output_dir, ncores, exe_cmd): print("\nQuestion: The candidate runs successfully?\n", end='') print('\n') candidate_sim_copy = copy.deepcopy(candidate_sim) # Set run directory and change working directory to run dir for simulation run_dir = output_dir / 'run_candidate' if run_dir.exists(): pytest.skip('Candidate run dir exists, skipping candidate run test') run_dir.mkdir(parents=True) os.chdir(str(run_dir)) # Job exe_command = exe_cmd.format(str(ncores)) out_dt = 1 if 'channel' in candidate_sim.model.model_config else 24 job = wrfhydropy.Job( job_id='run_candidate', exe_cmd=exe_command, restart_freq_hr=24, output_freq_hr=out_dt ) candidate_sim_copy.add(job) # Run, catch warnings related to missing start and end job times with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_sim_copy.compose() print('\nwaiting for job to complete...', end='') candidate_sim_copy.run() # Wait to collect until job has finished. All test runs are performed on a single job with # job_id='test_job' wait_job(candidate_sim_copy) candidate_sim_copy.collect() candidate_sim_copy.pickle(run_dir.joinpath('WrfHydroSim_collected.pkl')) # Check job run statuses for job in candidate_sim_copy.jobs: assert job.exit_status == 0, \ "Candidate code run exited with non-zero status"
model_config=configuration, compiler='ifort') model_pkl = compile_dir / 'WrfHydroModel.pkl' if not model_pkl.exists(): model.compile(compile_dir) else: model = pickle.load(model_pkl.open('rb')) # ### Job model_start_time = datetime.datetime(2018, 8, 1, 0) model_end_time = model_start_time + datetime.timedelta(hours=2) job = wrfhydropy.Job(job_id='flo_sim', model_start_time=model_start_time, model_end_time=model_end_time, output_freq_hr=1, restart_freq_hr=1, exe_cmd='mpirun -np 1 ./wrf_hydro.exe') # ### Simulation sim = wrfhydropy.Simulation() sim.add(domain) sim.add(model) sim.add(job) # ### Ensemble ens = wrfhydropy.EnsembleSimulation() ens.add(sim) ens.add(job)
if machine_name == 'docker': default_scheduler = None else: if candidate_spec['queue'] is None or candidate_spec['queue'] == 'None': default_scheduler = None else: default_scheduler = wrfhydropy.Scheduler( job_name='default', account=user_spec['PBS']['account'], walltime=candidate_spec['wall_time'], queue=candidate_spec['queue'], nproc=candidate_spec['n_cores']['default'], ppn=machine_spec[machine_name]['cores_per_node']).__dict__ default_scheduler = json.dumps(default_scheduler) job_default = wrfhydropy.Job(nproc=candidate_spec['n_cores']['default']) job_ncores = copy.deepcopy(job_default) job_ncores.nproc = candidate_spec['n_cores']['test'] job_default = json.dumps(job_default.__dict__) job_ncores = json.dumps(job_ncores.__dict__) log.debug('') # ################################### log.info(horiz_bar) log.info("Calling pytest:") pytest_cmd = [ #'--pdb', # for debugging the tests. May make this an option in the future... '-v', '--color', 'yes',
def test_ncores_candidate(output_dir): print( "\nQuestion: The candidate outputs from a ncores run match outputs from" " ncores-1 run?\n", end='') print('\n') candidate_sim_file = output_dir / 'run_candidate' / 'WrfHydroSim.pkl' candidate_collected_file = output_dir / 'run_candidate' / 'WrfHydroSim_collected.pkl' if candidate_collected_file.is_file() is False: pytest.skip('Candidate run object not found, skipping test.') # Load initial sim object, collect sim_object and copy for makign new sims candidate_sim = pickle.load(candidate_sim_file.open(mode="rb")) candidate_sim_expected = pickle.load( candidate_collected_file.open(mode="rb")) candidate_sim_ncores = copy.deepcopy(candidate_sim) # Set run directory run_dir = output_dir.joinpath('ncores_candidate') run_dir.mkdir(parents=True) os.chdir(str(run_dir)) # Make a new job based on the old job but with a new job ID old_job = candidate_sim.jobs[0] new_job = wrfhydropy.Job(job_id='ncores_candidate', exe_cmd=old_job._exe_cmd, restart_freq_hr=24, output_freq_hr=24) # Remove old job and add new job candidate_sim_ncores.jobs.pop(0) candidate_sim_ncores.add(new_job) # Edit the sim object number of cores if candidate_sim_ncores.scheduler is not None: candidate_sim_ncores.scheduler.nproc = candidate_sim_ncores.scheduler.nproc - 1 else: orig_exe_cmd = candidate_sim_ncores.jobs[0]._exe_cmd orig_exe_cmd = orig_exe_cmd.replace('-np 2', '-np 1') # Recompose into new directory and run # catch warnings related to missing start and end job times with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_sim_ncores.compose(force=True) print('\nwaiting for job to complete...', end='') candidate_sim_ncores.run() # Wait to collect until job has finished. All test runs are performed on a single job with # job_id='test_job' wait_job(candidate_sim_ncores) candidate_sim_ncores.collect() candidate_sim_ncores.pickle(run_dir.joinpath('WrfHydroSim_collected.pkl')) # Check outputs with warnings.catch_warnings(): warnings.simplefilter("ignore") diffs = wrfhydropy.outputdiffs.OutputDataDiffs( candidate_sim_ncores.output, candidate_sim_expected.output, exclude_vars=EXCLUDE_VARS) # Assert all diff values are 0 and print diff stats if not has_diffs = any(value != 0 for value in diffs.diff_counts.values()) if has_diffs: print_diffs(diffs) assert has_diffs is False, \ 'Outputs for candidate run with ncores do not match outputs with ncores-1'
def test_run_candidate_channel_only(candidate_sim, candidate_channel_only_sim, output_dir, ncores): if candidate_sim.model.model_config.lower().find('nwm') < 0: pytest.skip('Channel-only test only applicable to nwm_ana config') print("\nQuestion: The candidate channel-only mode runs successfully?\n", end='') print('\n') candidate_sim_copy = copy.deepcopy(candidate_sim) candidate_sim_copy.base_hydro_namelist['hydro_nlist'][ 'output_channelbucket_influx'] = 2 candidate_channel_only_sim_copy = copy.deepcopy(candidate_channel_only_sim) candidate_channel_only_sim_copy. \ base_hydro_namelist['hydro_nlist']['output_channelbucket_influx'] = 2 ################## # re-run candidate at shorter duration since requires hourly outputs # Set run directory and change working directory to run dir for simulation run_dir = output_dir / 'candidate_run_output_for_channel_only' run_dir.mkdir(parents=True) os.chdir(str(run_dir)) # Job exe_command = 'mpirun -np {0} ./wrf_hydro.exe'.format(str(ncores)) job = wrfhydropy.Job(job_id='run_candidate', exe_cmd=exe_command, restart_freq_hr=24, output_freq_hr=1) candidate_sim_copy.add(job) start_time, end_time = candidate_sim_copy.jobs[ 0]._solve_model_start_end_times() candidate_sim_copy.jobs[0].model_start_time = start_time candidate_sim_copy.jobs[0].model_end_time = start_time + dt.timedelta( hours=24) candidate_sim_copy.jobs[0].restart_freq_hr = 6 # Run, catch warnings related to missing start and end job times with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_sim_copy.compose() print('\nwaiting for job to complete...', end='') candidate_sim_copy.run() # Wait to collect until job has finished. All test runs are performed on a single job with # job_id='test_job' wait_job(candidate_sim_copy) candidate_sim_copy.collect() candidate_sim_copy.pickle(run_dir.joinpath('WrfHydroSim_collected.pkl')) # Check job run statuses for job in candidate_sim.jobs: assert job.exit_status == 0, \ "Candidate code run exited with non-zero status" ######################### # Run channel only # Dont recompile the model, just use the candidate's model. candidate_channel_only_sim_copy.model = copy.deepcopy(candidate_sim.model) # Set run directory and go for execution. run_dir = output_dir / 'run_candidate_channel_only' run_dir.mkdir(parents=True) os.chdir(str(run_dir)) # Set the forcing directory candidate_channel_only_sim_copy.base_hrldas_namelist['noahlsm_offline']['indir'] = \ str(output_dir / 'candidate_run_output_for_channel_only') # Job exe_command = 'mpirun -np {0} ./wrf_hydro.exe'.format(str(ncores)) job = wrfhydropy.Job(job_id='run_candidate_channel_only', exe_cmd=exe_command, restart_freq_hr=24, output_freq_hr=1) candidate_channel_only_sim_copy.add(job) start_time, end_time = candidate_channel_only_sim_copy.jobs[ 0]._solve_model_start_end_times() candidate_channel_only_sim_copy.jobs[0].model_start_time = start_time candidate_channel_only_sim_copy.jobs[ 0].model_end_time = start_time + dt.timedelta(hours=24) candidate_channel_only_sim_copy.jobs[0].restart_freq_hr = 6 # Run with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_channel_only_sim_copy.compose() print('\nwaiting for job to complete...', end='') candidate_channel_only_sim_copy.run() # Wait to collect until job has finished. All test runs are performed on a single job with # job_id='test_job' wait_job(candidate_channel_only_sim_copy) candidate_channel_only_sim_copy.collect() candidate_channel_only_sim_copy.pickle( run_dir.joinpath('WrfHydroSim_collected.pkl')) # Check job run statuses for job in candidate_channel_only_sim_copy.jobs: assert job.exit_status == 0, \ "Candidate channel-only code run exited with non-zero status"
def test_ncores_candidate_channel_only(output_dir, ncores, exe_cmd, xrcmp_n_cores): candidate_channel_only_sim_file = \ output_dir / 'channel_only_candidate_run' / 'WrfHydroSim.pkl' candidate_channel_only_collected_file = \ output_dir / 'channel_only_candidate_run' / 'WrfHydroSim_collected.pkl' if candidate_channel_only_collected_file.is_file() is False: pytest.skip( 'candidate_channel_only collected run object not found, skipping test.' ) print( "\nQuestion: The candidate_channel-only output files from an ncores runmatch those " "from an ncores-1 run?\n", end='') print('\n') candidate_channel_only_sim_expected = pickle.load( candidate_channel_only_collected_file.open("rb")) run_dir = output_dir / 'channel_only_candidate_ncores' if not run_dir.exists(): run_dir.mkdir(parents=True) os.chdir(str(run_dir)) candidate_channel_only_sim = \ pickle.load(candidate_channel_only_sim_file.open("rb")) candidate_channel_only_sim_ncores = copy.deepcopy( candidate_channel_only_sim) candidate_channel_only_sim_ncores. \ base_hydro_namelist['hydro_nlist']['output_channelbucket_influx'] = 2 old_job = candidate_channel_only_sim.jobs[0] new_job = wrfhydropy.Job(job_id='ncores_candidate', model_start_time=old_job._model_start_time, model_end_time=old_job._model_end_time, exe_cmd=old_job._exe_cmd, restart_freq_hr=6, output_freq_hr=1) # Remove old job and add new job candidate_channel_only_sim_ncores.jobs.pop(0) candidate_channel_only_sim_ncores.add(new_job) # Edit the sim object number of cores if candidate_channel_only_sim_ncores.scheduler is not None: candidate_channel_only_sim_ncores.scheduler.nproc = \ candidate_channel_only_sim_ncores.scheduler.nproc - 1 else: candidate_channel_only_sim_ncores.jobs[ 0]._exe_cmd = exe_cmd.format(str(int(ncores) - 1)) # Recompose into new directory and run # catch warnings related to missing start and end job times with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_channel_only_sim_ncores.compose(force=True) print('\nwaiting for job to complete...', end='') candidate_channel_only_sim_ncores.run() wait_job(candidate_channel_only_sim_ncores) candidate_channel_only_sim_ncores.collect() candidate_channel_only_sim_ncores.pickle( run_dir.joinpath('WrfHydroSim_collected.pkl')) else: print('Candidate channel-only n_cores run dir exists, ' 'skipping n_cores candidate channel-only run...') candidate_channel_only_sim_ncores = pickle.load( open(run_dir.joinpath('WrfHydroSim_collected.pkl'), 'rb')) for job in candidate_channel_only_sim_ncores.jobs: assert job.exit_status == 0, \ "Candidate channel-only ncores run exited with non-zero status" # Check outputs with warnings.catch_warnings(): warnings.simplefilter("ignore") diffs = wrfhydropy.outputdiffs.OutputDataDiffs( candidate_channel_only_sim_ncores.output, candidate_channel_only_sim_expected.output, exclude_vars=EXCLUDE_VARS, xrcmp_n_cores=xrcmp_n_cores) # Assert all diff values are 0 and print diff stats if not has_diffs = any(value != 0 for value in diffs.diff_counts.values()) if has_diffs: print_diffs(diffs) assert has_diffs is False, \ 'Outputs for candidate_channel_only run with ncores do not match outputs with ncores-1'
def setup_wrf_hydro(config): print('wrfhydropy.Domain object.') wrf_hydro_domain = wrfhydropy.Domain( domain_top_dir=config['wrf_hydro']['domain_src'], domain_config=config['wrf_hydro']['model_config'] #model_version=config['wrf_hydro']['domain_version'] ) print('wrfhydropy.Model object.') wrf_hydro_build_dir = config['experiment']['experiment_dir'] / config[ 'wrf_hydro']['build_dir'] wrf_hydro_model_pickle = wrf_hydro_build_dir / 'WrfHydroModel.pkl' if config['wrf_hydro']['use_existing_build']: if wrf_hydro_model_pickle.exists(): print('WrfHydroModel object: using existing pickle.') wrf_hydro_model = pickle.load(open(wrf_hydro_model_pickle, 'rb')) else: raise ValueError( 'Existing WRF_HYDRO model requested but no pickle file found') else: # TODO(JLM): If fork specified, pull fork. Check out desired commit. # TODO(JLM): clone to build_dir? or make a clone_dir in this case? # If not fork, only use local repo state. wrf_hydro_model = wrfhydropy.Model( source_dir=config['wrf_hydro']['wrf_hydro_src'] / 'trunk/NDHMS/', model_config=config['wrf_hydro']['model_config'], compiler=config['wrf_hydro']['compiler'], hydro_namelist_config_file=config['wrf_hydro'] ['hydro_namelist_config_file'], hrldas_namelist_config_file=config['wrf_hydro'] ['hrldas_namelist_config_file'], compile_options_config_file=config['wrf_hydro'] ['compile_options_config_file']) # Apply compile option patches. if config['wrf_hydro']['compile_options'] is not None: wrf_hydro_model.compile_options.update( config['wrf_hydro']['compile_options']) wrf_hydro_model.compile( compile_dir=config['experiment']['experiment_dir'] / config['wrf_hydro']['build_dir']) print('wrfhydropy.Job object') job = wrfhydropy.Job(exe_cmd='mpirun -np 4 ./wrf_hydro.exe', job_id='nwm_spinup', restart=False) print('wrfhydropy.Simulation object.') wrf_hydro_sim = wrfhydropy.Simulation() wrf_hydro_sim.add(wrf_hydro_model) wrf_hydro_sim.add(wrf_hydro_domain) # These were deep copied, delete them. del wrf_hydro_model, wrf_hydro_domain return wrf_hydro_sim
ncores=n_cores ) # Add ens fcst.add(ens) # Add Job model_start_time = init_times[0] model_end_time = init_times[0] + datetime.timedelta(hours=18) #exe_cmd = 'mpirun -np {0} ./wrf_hydro.exe' # for a scheduler exe_cmd = 'mpirun -np 1 ./wrf_hydro.exe' job_name = 'ensfcst' job = wrfhydropy.Job( exe_cmd=exe_cmd, job_id=job_name, restart=True, model_start_time= model_start_time, model_end_time=model_end_time ) fcst.add(job) # Write to disk fcst_dir = pathlib.Path('/glade/scratch/jamesmcc/ens_fcst_example/') if not fcst_dir.exists(): fcst_dir.mkdir() os.chdir(str(fcst_dir)) fcst.compose() #fcst.pickle('WrfHydroCycle.pkl') fcst.run(n_concurrent=n_cores)
def test_ncores_candidate_channel_only(output_dir): candidate_channel_only_sim_file = \ output_dir / 'run_candidate_channel_only' / 'WrfHydroSim.pkl' candidate_channel_only_collected_file = \ output_dir / 'run_candidate_channel_only' / 'WrfHydroSim_collected.pkl' if candidate_channel_only_collected_file.is_file() is False: pytest.skip( 'candidate_channel_only collected run object not found, skipping test.' ) print( "\nQuestion: The candidate_channel-only output files from an ncores runmatch those " "from an ncores-1 run?\n", end='') print('\n') candidate_channel_only_sim = \ pickle.load(candidate_channel_only_sim_file.open("rb")) candidate_channel_only_sim_expected = \ pickle.load(candidate_channel_only_collected_file.open("rb")) candidate_channel_only_sim_ncores = copy.deepcopy( candidate_channel_only_sim) run_dir = output_dir / 'ncores_candidate_channel_only' run_dir.mkdir(parents=True) os.chdir(str(run_dir)) old_job = candidate_channel_only_sim.jobs[0] new_job = wrfhydropy.Job(job_id='ncores_candidate', exe_cmd=old_job._exe_cmd) # Remove old job and add new job candidate_channel_only_sim_ncores.jobs.pop(0) candidate_channel_only_sim_ncores.add(new_job) # Edit the sim object number of cores if candidate_channel_only_sim_ncores.scheduler is not None: candidate_channel_only_sim_ncores.scheduler.nproc = \ candidate_channel_only_sim_ncores.scheduler.nproc - 1 else: orig_exe_cmd = candidate_channel_only_sim_ncores.jobs[0]._exe_cmd orig_exe_cmd = orig_exe_cmd.replace('-np 2', '-np 1') # Recompose into new directory and run # catch warnings related to missing start and end job times with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_channel_only_sim_ncores.compose(force=True) print('\nwaiting for job to complete...', end='') candidate_channel_only_sim_ncores.run() # Wait to collect until job has finished. All test runs are performed on a single job with # job_id='test_job' wait_job(candidate_channel_only_sim_ncores) candidate_channel_only_sim_ncores.collect() candidate_channel_only_sim_ncores.pickle( run_dir.joinpath('WrfHydroSim_collected.pkl')) # Check outputs with warnings.catch_warnings(): warnings.simplefilter("ignore") diffs = wrfhydropy.outputdiffs.OutputDataDiffs( candidate_channel_only_sim_ncores.output, candidate_channel_only_sim_expected.output) # Assert all diff values are 0 and print diff stats if not has_diffs = any(value != 0 for value in diffs.diff_counts.values()) if has_diffs: eprint(diffs.diff_counts) for key, value in diffs.diff_counts.items(): if value != 0: diffs = getattr(diffs, key) eprint('\n' + key + '\n') for diff in diffs: eprint(diff) assert has_diffs is False, \ 'Outputs for candidate_channel_only run with ncores do not match outputs with ncores-1'