def test_run_reference(reference_setup, output_dir, job_default, scheduler, capsys): with capsys.disabled(): print("\nQuestion: The reference runs successfully?", end='') #Set run directory run_dir = output_dir / 'run_reference' reference_job = job_default reference_job.scheduler = scheduler # Run reference_run = wrfhydropy.WrfHydroRun(wrf_hydro_setup=reference_setup, run_dir=run_dir, jobs=reference_job) check_run_dir = reference_run.run_jobs() if scheduler is not None: reference_run = wrfhydropy.job_tools.restore_completed_scheduled_job( check_run_dir) # Check subprocess and model run status assert reference_run.jobs_completed[0].exit_status == 0, \ "Reference code run exited with non-zero status" assert reference_run.jobs_completed[0].job_status == 'completed success', \ "Reference code run did not complete"
def test_ncores_candidate(candidate_setup, output_dir, job_ncores, scheduler, capsys): with capsys.disabled(): print( "\nQuestion: The candidate restarts from a 1 core run match restarts from standard run?", end='') candidate_run_file = output_dir / 'run_candidate' / 'WrfHydroRun.pkl' if candidate_run_file.is_file() is False: pytest.skip('Candidate run object not found, skipping test.') # Load initial run model object candidate_run_expected = pickle.load(open(candidate_run_file, "rb")) # Set run directory run_dir = output_dir.joinpath('ncores_candidate') candidate_ncores_job = job_ncores candidate_ncores_job.scheduler = scheduler # Run candidate_ncores_run = wrfhydropy.WrfHydroRun( wrf_hydro_setup=candidate_setup, run_dir=run_dir, jobs=candidate_ncores_job) check_run_dir = candidate_ncores_run.run_jobs() if scheduler is not None: candidate_ncores_run = wrfhydropy.job_tools.restore_completed_scheduled_job( check_run_dir) #Check against initial run ncores_restart_diffs = wrfhydropy.RestartDiffs(candidate_ncores_run, candidate_run_expected) ## Check hydro restarts for diff in ncores_restart_diffs.hydro: if diff is not None: with capsys.disabled(): print(diff) assert diff == None, "Candidate hydro restart files do not match when run with different number of cores" ## Check lsm restarts for diff in ncores_restart_diffs.lsm: if diff is not None: with capsys.disabled(): print(diff) assert diff == None, "Candidate lsm restart files do not match when run with different number of cores" ## Check nudging restarts for diff in ncores_restart_diffs.nudging: if diff is not None: with capsys.disabled(): print(diff) assert diff == None, "Candidate nudging restart files do not match when run with different number of cores"
def test_run_candidate_channel_only(candidate_setup, candidate_channel_only_setup, output_dir, job_default, scheduler, capsys): if candidate_channel_only_setup is None: pytest.skip("unsupported configuration") with capsys.disabled(): print("\nQuestion: The candidate channel-only mode runs successfully?", end='') # Dont recompile the model, just use the candidate's model. candidate_channel_only_setup.model = candidate_setup.model # Set the forcing directory candidate_channel_only_setup.namelist_hrldas['noahlsm_offline']['indir'] = \ str(output_dir / 'run_candidate') # Set run directory run_dir = output_dir / 'run_candidate_channel_only' candidate_channel_only_job = job_default candidate_channel_only_job.scheduler = scheduler # Run candidate_channel_only_run = wrfhydropy.WrfHydroRun( wrf_hydro_setup=candidate_channel_only_setup, run_dir=run_dir, jobs=candidate_channel_only_job) check_run_dir = candidate_channel_only_run.run_jobs() if scheduler is not None: # This function waits for the completed run. candidate_channel_only_run = \ wrfhydropy.job_tools.restore_completed_scheduled_job(check_run_dir) # Check subprocess and model run status assert candidate_channel_only_run.jobs_completed[0].exit_status == 0, \ "Candidate code run exited with non-zero status" assert candidate_channel_only_run.jobs_completed[0].job_status == 'completed success', \ "Candidate code run did not complete"
def test_perfrestart_candidate(candidate_setup, output_dir, job_default, scheduler, capsys): with capsys.disabled(): print( "\nQuestion: The candidate restarts from a restart run match the restarts from standard run?", end='') candidate_run_file = output_dir / 'run_candidate' / 'WrfHydroRun.pkl' if candidate_run_file.is_file() is False: pytest.skip('Candidate run object not found, skipping test') # Load initial run model object candidate_run_expected = pickle.load( open(output_dir / 'run_candidate' / 'WrfHydroRun.pkl', "rb")) #Make deep copy since changing namelist optoins perfrestart_setup = copy.deepcopy(candidate_setup) # Set run directory run_dir = output_dir / 'restart_candidate' # Establish the run (run after setting external files) candidate_perfrestart_job = job_default # TODO(JLM): edit scheduler names candidate_perfrestart_job.scheduler = scheduler # Add the jobs after determining the restart time. candidate_perfrestart_run = wrfhydropy.WrfHydroRun( wrf_hydro_setup=perfrestart_setup, run_dir=run_dir, mode='r') # Symlink restarts files to new directory and modify namelistrestart files # Hydro hydro_rst = candidate_run_expected.restart_hydro[0] new_hydro_rst_path = run_dir.joinpath(hydro_rst.name) new_hydro_rst_path.unlink() new_hydro_rst_path.symlink_to(hydro_rst) perfrestart_setup.hydro_namelist['hydro_nlist'].update( {'restart_file': str(new_hydro_rst_path)}) # LSM lsm_rst = candidate_run_expected.restart_lsm[0] new_lsm_rst_path = run_dir.joinpath(lsm_rst.name) new_lsm_rst_path.unlink() new_lsm_rst_path.symlink_to(lsm_rst) perfrestart_setup.namelist_hrldas['noahlsm_offline'].update( {'restart_filename_requested': str(run_dir.joinpath(lsm_rst.name))}) # Nudging if candidate_run_expected.restart_nudging is not None and \ len(candidate_run_expected.restart_nudging) > 0: nudging_rst = candidate_run_expected.restart_nudging[0] new_nudging_rst_path = run_dir.joinpath(nudging_rst.name) new_nudging_rst_path.unlink() new_nudging_rst_path.symlink_to(nudging_rst) perfrestart_setup.hydro_namelist['nudging_nlist'].update( {'nudginglastobsfile': str(run_dir.joinpath(nudging_rst.name))}) # Setup the restart in the run. orig_start_time, orig_end_time = wrfhydropy.job_tools.solve_model_start_end_times( None, None, candidate_perfrestart_run.setup) restart_dt = hydro_rst.open() restart_time = dt.datetime.strptime(restart_dt.Restart_Time, '%Y-%m-%d_%H:%M:%S') candidate_perfrestart_job.model_start_time = restart_time candidate_perfrestart_job.model_end_time = orig_end_time # Run with warnings.catch_warnings(): warnings.simplefilter("ignore") candidate_perfrestart_run.add_jobs(candidate_perfrestart_job) check_run_dir = candidate_perfrestart_run.run_jobs() if scheduler is not None: candidate_perfrestart_run = \ wrfhydropy.job_tools.restore_completed_scheduled_job(check_run_dir) #Check against initial run perfstart_restart_diffs = wrfhydropy.RestartDiffs( candidate_perfrestart_run, candidate_run_expected) ## Check hydro restarts for diff in perfstart_restart_diffs.hydro: if diff is not None: with capsys.disabled(): print(diff) assert diff is None, \ "Candidate hydro restart files do not match when starting from a restart" ## Check lsm restarts for diff in perfstart_restart_diffs.lsm: if diff is not None: with capsys.disabled(): print(diff) assert diff is None, \ "Candidate lsm restart files do not match when starting from a restart" ## Check nudging restarts for diff in perfstart_restart_diffs.nudging: if diff is not None: with capsys.disabled(): print(diff) assert diff is None, \ "Candidate nudging restart files do not match when starting from a restart"
def test_run_nwm(tmp_data_dir, capsys): with capsys.disabled(): print( "\nQuestion: WrfHydroSetup object is able to run NWM public? ", end='' ) # Set directory paths expected_dir = tmp_data_dir / 'data' / 'expected' compile_dir = tmp_data_dir / 'data' / 'wrf_hydro_nwm_public' / 'compiled' domain_top_dir = tmp_data_dir / 'data' / 'domain' run_dir = tmp_data_dir / 'data' / 'wrf_hydro_nwm_public' / 'run' # Load expected objects setup_object_expected = pickle.load(open(expected_dir / 'test_setup_nwm.pkl', "rb")) run_objects_expected = pickle.load(open(expected_dir / 'test_run_nwm.pkl', "rb")) job_list_expected = pickle.load(open(expected_dir / 'test_job_list_nwm.pkl', "rb")) # Load previous test artifacts model_object_postcompile = pickle.load(open(compile_dir / 'WrfHydroModel.pkl', 'rb')) # Setup a setup object domain_object = wrfhydropy.WrfHydroDomain( domain_top_dir=domain_top_dir, domain_config='NWM', model_version='v1.2.1' ) # compare the domain object? setup_object = wrfhydropy.WrfHydroSetup(model_object_postcompile, domain_object) setup_object.namelist_hrldas['noahlsm_offline']['restart_frequency_hours'] = 6 setup_object.hydro_namelist['hydro_nlist']['rst_dt'] = 360 #assert setup_object == setup_object_expected # TODO JLM: why does this not check? job_list = mk_job_list() # assert job_list == job_list_expected # TODO(JLM): why does this not check? # Run the setup run_object = wrfhydropy.WrfHydroRun( setup_object, run_dir=run_dir, rm_existing_run_dir=True, jobs=job_list ) prerun_diffs = deepdiff.DeepDiff( run_objects_expected['run_object_prerun'], run_object ) # with capsys.disabled(): # #How to check that the diffs are actually allowable # #pprint.pprint(prerun_diffs['values_changed']) # pprint.pprint(prerun_diffs) # #How to get the allowable diffs # #pprint.pprint(prerun_diffs['values_changed'].keys()) # pprint.pprint(prerun_diffs.keys()) allowable_prerun_diffs = set( ['root.jobs_pending[0].job_date_id', 'root.jobs_pending[1].job_submission_time', 'root.setup.model.compile_log.args[1]', 'root.setup.model.object_id', 'root.jobs_pending[1].job_date_id', 'root.jobs_pending[0].job_submission_time', 'root.setup.model.compile_log.stdout'] ) assert set(prerun_diffs.keys()) == set(['values_changed']) assert set(prerun_diffs['values_changed'].keys()) == allowable_prerun_diffs run_object.run_jobs() postrun_diffs = deepdiff.DeepDiff( run_objects_expected['run_object_postrun'], run_object ) # with capsys.disabled(): # How to check that the diffs are actually allowable # pprint.pprint(postrun_diffs['values_changed']) # How to get the allowable diffs # pprint.pprint(postrun_diffs['values_changed'].keys()) allowable_postrun_diffs = set( ['root.jobs_completed[0].exe_cmd', 'root.jobs_completed[0].job_submission_time', 'root.jobs_completed[0].run_log.args[2]', 'root.jobs_completed[1].job_submission_time', 'root.jobs_completed[0].job_end_time', 'root.object_id', 'root.jobs_completed[0].job_date_id', 'root.jobs_completed[0].job_start_time', 'root.jobs_completed[1].job_date_id', 'root.jobs_completed[1].job_end_time', 'root.jobs_completed[1].run_log.args[2]', 'root.jobs_completed[1].exe_cmd', 'root.setup.model.compile_log.args[1]', 'root.setup.model.compile_log.stdout', 'root.jobs_completed[1].job_start_time', 'root.setup.model.object_id'] ) assert set(postrun_diffs.keys()) == set(['values_changed']) assert set(postrun_diffs['values_changed'].keys()) == allowable_postrun_diffs