def _run_metgrid(self, j_id=None): ''' run metgrid.exe (locally or using slurm script defined in config.json) ''' if len(self.config['options_slurm']['slurm_metgrid.exe']): if j_id: mid = "--dependency=afterok:%d" %j_id metgrid_command = ['sbatch', mid, self.config['options_slurm']['slurm_metgrid.exe']] else: metgrid_command = ['sbatch', self.config['options_slurm']['slurm_metgrid.exe']] utils.check_file_exists(metgrid_command[-1]) utils.silentremove(os.path.join(self.wps_workdir, 'metgrid', 'metgrid.exe')) os.symlink(os.path.join(self.config['filesystem']['wps_dir'],'metgrid','metgrid.exe'), os.path.join(self.wps_workdir, 'metgrid', 'metgrid.exe')) try: res = subprocess.check_output(metgrid_command, cwd=self.wps_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: #logger.error('Metgrid failed %s:' %metgrid_command) raise # re-raise exception return j_id # return slurm job-id else: metgrid_command = os.path.join(self.config['filesystem']['wps_dir'], 'metgrid', 'metgrid.exe') utils.check_file_exists(metgrid_command) try: subprocess.check_call(metgrid_command, cwd=self.wps_workdir, stdout=utils.devnull(), stderr=utils.devnull()) except subprocess.CalledProcessError: #logger.error('Metgrid failed %s:' %metgrid_command) raise # re-raise exception
def run_wrf(self): ''' run wrf ''' j_id = None if len(self.config['options_slurm']['slurm_wrf.exe']): # run using slurm if j_id: mid = "--dependency=afterok:%d" %j_id wrf_command = ['sbatch', mid, self.config['options_slurm']['slurm_wrf.exe']] else: wrf_command = ['sbatch', self.config['options_slurm']['slurm_wrf.exe']] utils.check_file_exists(wrf_command[-1]) try: res = subprocess.check_output(wrf_command, cwd=self.wrf_run_dir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: #logger.error('WRF failed %s:' %wrf_command) raise # re-raise exception utils.waitJobToFinish(j_id) else: # run locally subprocess.check_call(os.path.join(self.wrf_run_dir, 'wrf.exe'), cwd=self.wrf_run_dir, stdout=utils.devnull(), stderr=utils.devnull())
def __init__(self, wrfpy_config=False): global logger wrfpy_dir = os.environ['HOME'] logger = utils.start_logging(os.path.join(wrfpy_dir, 'wrfpy.log')) if not wrfpy_config: try: # get CYLC_SUITE_DEF_PATH environment variable wrfpy_dir = os.environ['CYLC_SUITE_DEF_PATH'] except KeyError: # default back to user home dir in case CYLC is not used wrfpy_dir = os.environ['HOME'] # config.json needs to be in base of wrfpy_dir self.configfile = os.path.join(wrfpy_dir, 'config.json') else: self.configfile = wrfpy_config try: logger.debug('Checking if configuration file exists: %s' % self.configfile) utils.check_file_exists(self.configfile) except IOError: # create config file self._create_empty_config() # TODO: exit and notify user to manually edit config file # read json config file self._read_json()
def _run_geogrid(self, j_id=None): ''' run geogrid.exe (locally or using slurm script defined in config.json) ''' # get number of domains from wps namelist wps_nml = f90nml.read(self.config['options_wps']['namelist.wps']) ndoms = wps_nml['share']['max_dom'] # check if geo_em files already exist for all domains try: for dom in range(1, ndoms + 1): fname = "geo_em.d{}.nc".format(str(dom).zfill(2)) ncfile = Dataset(os.path.join(self.wps_workdir, fname)) ncfile.close() except IOError: # create geo_em nc files if len(self.config['options_slurm']['slurm_geogrid.exe']): # run using slurm if j_id: mid = "--dependency=afterok:%d" % j_id geogrid_command = [ 'sbatch', mid, self.config['options_slurm']['slurm_geogrid.exe'] ] else: geogrid_command = [ 'sbatch', self.config['options_slurm']['slurm_geogrid.exe'] ] utils.check_file_exists(geogrid_command[1]) utils.silentremove( os.path.join(self.wps_workdir, 'geogrid', 'geogrid.exe')) os.symlink( os.path.join(self.config['filesystem']['wps_dir'], 'geogrid', 'geogrid.exe'), os.path.join(self.wps_workdir, 'geogrid', 'geogrid.exe')) try: res = subprocess.check_output(geogrid_command, cwd=self.wps_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: #logger.error('Metgrid failed %s:' %geogrid_command) raise # re-raise exception utils.waitJobToFinish(j_id) else: geogrid_command = os.path.join( self.config['filesystem']['wps_dir'], 'geogrid', 'geogrid.exe') utils.check_file_exists(geogrid_command) try: subprocess.check_call(geogrid_command, cwd=self.wps_workdir, stdout=utils.devnull(), stderr=utils.devnull()) except subprocess.CalledProcessError: #logger.error('Geogrid failed %s:' %geogrid_command) raise # re-raise exception
def _check_wrf(self): ''' check wrf options in json config file ''' # verify that the config option is specified by the user assert (len(self.config['options_wrf']['namelist.input']) > 0), ('No WRF namelist.input specified in config file') # check if specified namelist.wps exist and are readable utils.check_file_exists(self.config['options_wrf']['namelist.input']) # check if namelist.input is in the required format and has all keys needed self._check_namelist_wrf()
def _check_namelist_wps(self): ''' check if namelist.wps is in the required format and has all keys needed ''' # verify that example namelist.wps exists and is not removed by user basepath = utils.get_script_path() self.example_file = os.path.join(basepath, 'examples', 'namelist.wps') utils.check_file_exists(self.example_file) # load specified namelist self.user_nml = f90nml.read(self.config['options_wps']['namelist.wps']) # verify that all keys in self.user_nml are also in example namelist self._verify_namelist_wps_keys() # validate the key information specified self._validate_namelist_wps_keys()
def _check_wps(self): ''' check wps options in json config file ''' # verify that the config option is specified by the user assert (len(self.config['options_wps']['namelist.wps']) > 0), ('No WPS namelist.wps specified in config file') # check if specified namelist.wps exist and are readable utils.check_file_exists(self.config['options_wps']['namelist.wps']) # check if run_hours is specified run_hours = self.config['options_wps']['run_hours'] assert run_hours, "No WPS run_hours specified in config file" # check if namelist.wps is in the required format and has all keys needed self._check_namelist_wps()
def check_cv5_cv7(self): ''' return True if cv_type=5 or cv_type=7 is set and be.dat is defined (and exist on filesystem) for the outer domain in config.json ''' if (int(self.config['options_wrfda']['cv_type']) in [5, 7]): # check if be.dat is a filepath or an array of filepaths if isinstance(self.config['options_wrfda']['be.dat'], str): # option is a filepath self.wrfda_be_dat = self.config['options_wrfda']['be.dat'] elif isinstance(self.config['options_wrfda']['be.dat'], list): if len(self.config['options_wrfda']['be.dat']) == 1: # lenght == 1, so threat the first element as a str case month_idx = 0 elif len(self.config['options_wrfda']['be.dat']) == 12: # there is one be.dat matrix for each month # find month number from self.datestart month_idx = self.datestart.month - 1 else: # list but not of length 1 or 12 raise IOError( "config['options_wrfda']['be.dat'] ", "should be a string or a ", "list of length 1 or 12. Found a list of ", "length ", str(len(self.config['options_wrfda']['be.dat']))) self.wrfda_be_dat = self.config['options_wrfda']['be.dat'][ month_idx] else: # not a list or str raise TypeError("unkonwn type for be.dat configuration:", type(self.config['options_wrfda']['be.dat'])) return utils.check_file_exists(self.wrfda_be_dat, boolean=True)
def _run_ungrib(self, j_id=None): ''' run ungrib.exe (locally or using slurm script defined in config.json) ''' if len(self.config['options_slurm']['slurm_ungrib.exe']): # run using slurm if j_id: mid = "--dependency=afterok:%d" % j_id ungrib_command = [ 'sbatch', mid, self.config['options_slurm']['slurm_ungrib.exe'] ] else: ungrib_command = [ 'sbatch', self.config['options_slurm']['slurm_ungrib.exe'] ] utils.check_file_exists(ungrib_command[-1]) utils.silentremove( os.path.join(self.wps_workdir, 'ungrib', 'ungrib.exe')) if not os.path.isdir(os.path.join(self.wps_workdir, 'ungrib')): utils._create_directory( os.path.join(self.wps_workdir, 'ungrib')) os.symlink( os.path.join(self.config['filesystem']['wps_dir'], 'ungrib', 'ungrib.exe'), os.path.join(self.wps_workdir, 'ungrib', 'ungrib.exe')) try: res = subprocess.check_output(ungrib_command, cwd=self.wps_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: #logger.error('Ungrib failed %s:' %ungrib_command) raise # re-raise exception utils.waitJobToFinish(j_id) else: ungrib_command = os.path.join(self.config['filesystem']['wps_dir'], 'ungrib', 'ungrib.exe') utils.check_file_exists(ungrib_command) try: subprocess.check_call(ungrib_command, cwd=self.wps_workdir, stdout=utils.devnull(), stderr=utils.devnull()) except subprocess.CalledProcessError: #logger.error('Ungrib failed %s:' %ungrib_command) raise # re-raise exception
def check_cv5(self): ''' return True if cv_type=5 is set and be.dat is defined (and exist on filesystem) for the outer domain in config.json ''' if int(self.config['options_wrfda']['cv_type']) == 5: return utils.check_file_exists( self.config['options_wrfda']['be.dat'], boolean=True)
def run_wrf(self, j_id=None): ''' run wrf.exe ''' # check if slurm_wrf.exe is defined if len(self.config['options_slurm']['slurm_wrf.exe']): if j_id: mid = "--dependency=afterok:%d" % j_id wrf_command = [ 'sbatch', mid, self.config['options_slurm']['slurm_wrf.exe'] ] else: wrf_command = [ 'sbatch', self.config['options_slurm']['slurm_wrf.exe'] ] utils.check_file_exists(wrf_command[-1]) utils.silentremove(os.path.join(self.wrf_rundir, 'wrf.exe')) os.symlink( os.path.join(self.config['filesystem']['wrf_dir'], 'main', 'wrf.exe'), os.path.join(self.wrf_rundir, 'wrf.exe')) try: res = subprocess.check_output(wrf_command, cwd=self.wrf_rundir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: logger.error('Wrf failed %s:' % wrf_command) raise # re-raise exception utils.waitJobToFinish(j_id) else: # run locally wrf_command = os.path.join(self.config['filesystem']['wrf_dir'], 'main', 'wrf.exe') utils.check_file_exists(wrf_command) try: subprocess.check_call(wrf_command, cwd=self.wrf_rundir, stdout=utils.devnull(), stderr=utils.devnull()) except subprocess.CalledProcessError: logger.error('wrf.exe failed %s:' % wrf_command) raise # re-raise exception
def _check_upp_dir(self): assert os.path.isdir(self.config['filesystem']['upp_dir']), ( 'upp directory %s not found' % self.config['filesystem']['upp_dir']) # create list of files to check files_to_check = [ os.path.join(self.config['filesystem']['upp_dir'], filename) for filename in ['bin/unipost.exe', 'parm/wrf_cntrl.parm'] ] # check if all files in the list exist and are readable [utils.check_file_exists(filename) for filename in files_to_check]
def _archive_output(self, current_time, thours, domain): ''' rename unipost.exe output to wrfpost_d0${domain}_time.grb and archive ''' import shutil # verify that domain is an int if not isinstance(domain, int): message = 'domain id should be an integer' #logger.error(message) raise IOError(message) # define original and destination filename origname = 'WRFPRS%02d.tm00' % thours outname = 'wrfpost_d%02d_%s.grb' % (domain, current_time) # rename file and move to archive dir shutil.move( os.path.join(self.post_dir, origname), os.path.join(self.config['filesystem']['upp_archive_dir'], outname)) # check if file is indeed archived utils.check_file_exists( os.path.join(self.config['filesystem']['upp_archive_dir'], outname))
def wrfvar_run(self, domain): ''' run da_wrfvar.exe ''' # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) logfile = os.path.join(wrfda_workdir, 'log.wrfda_d' + str(domain)) j_id = None if len(self.config['options_slurm']['slurm_wrfvar.exe']): if j_id: mid = "--dependency=afterok:%d" % j_id wrfvar_command = [ 'sbatch', mid, self.config['options_slurm']['slurm_wrfvar.exe'] ] else: wrfvar_command = [ 'sbatch', self.config['options_slurm']['slurm_wrfvar.exe'] ] utils.check_file_exists(wrfvar_command[-1]) try: res = subprocess.check_output(wrfvar_command, cwd=wrfda_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: logger.error('Wrfvar failed %s:' % wrfvar_command) raise # re-raise exception while True: time.sleep(1) if not utils.testjob(j_id): break else: # run locally subprocess.check_call( [os.path.join(wrfda_workdir, 'da_wrfvar.exe'), '>&!', logfile], cwd=wrfda_workdir, stdout=utils.devnull(), stderr=utils.devnull())
def obsproc_run(self): ''' run obsproc.exe ''' obslist = list(set(self.obs.values())) obsproc_dir = obslist[0][0] # TODO: check if output is file is created and no errors have occurred j_id = None if len(self.config['options_slurm']['slurm_obsproc.exe']): # run using slurm if j_id: mid = "--dependency=afterok:%d" % j_id obsproc_command = [ 'sbatch', mid, self.config['options_slurm']['slurm_obsproc.exe'] ] else: obsproc_command = [ 'sbatch', self.config['options_slurm']['slurm_obsproc.exe'] ] utils.check_file_exists(obsproc_command[-1]) try: res = subprocess.check_output(obsproc_command, cwd=obsproc_dir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: #logger.error('Obsproc failed %s:' % obsproc_command) raise # re-raise exception utils.waitJobToFinish(j_id) else: # run locally subprocess.check_call(os.path.join(obsproc_dir, 'obsproc.exe'), cwd=obsproc_dir, stdout=utils.devnull(), stderr=utils.devnull()) return None
def _check_wrfda_dir(self): ''' check if the wrfda directory exist check if obsproc.exe and da_wrfvar.exe executables exist in the wrfda directory ''' # TODO: find out if we can verify that WRFDA dir is 3dvar or 4dvar compiled assert os.path.isdir(self.config['filesystem']['wrfda_dir']), ( 'wrfda directory %s not found' % self.config['filesystem']['wrfda_dir']) # create list of files to check files_to_check = [ os.path.join(self.config['filesystem']['wrfda_dir'], filename) for filename in ['var/obsproc/obsproc.exe', 'var/da/da_wrfvar.exe'] ] # check if all files in the list exist and are readable [utils.check_file_exists(filename) for filename in files_to_check]
def prepare_wrfda_namelist(self, domain): # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) # read WRFDA namelist, use namelist.wrfda as supplied in config.json # if not supplied, fall back to default from WRFDA if utils.check_file_exists( self.config['options_wrfda']['namelist.wrfda'], boolean=True): wrfda_namelist = self.config['options_wrfda']['namelist.wrfda'] else: wrfda_namelist = os.path.join( self.config['filesystem']['wrfda_dir'], 'var/test/tutorial/namelist.input') wrfda_nml = f90nml.read(wrfda_namelist) # read WRF namelist in WRF work_dir wrf_nml = f90nml.read( os.path.join(self.config['filesystem']['wrf_run_dir'], 'namelist.input')) # set domain specific information in namelist for var in ['e_we', 'e_sn', 'e_vert', 'dx', 'dy']: # get variable from ${RUNDIR}/namelist.input var_value = wrf_nml['domains'][var] # set domain specific variable in WRDFA_WORKDIR/namelist.input wrfda_nml['domains'][var] = var_value[domain - 1] for var in [ 'mp_physics', 'ra_lw_physics', 'ra_sw_physics', 'radt', 'sf_sfclay_physics', 'sf_surface_physics', 'bl_pbl_physics', 'cu_physics', 'cudt', 'num_soil_layers' ]: # get variable from ${RUNDIR}/namelist.input var_value = wrf_nml['physics'][var] # set domain specific variable in WRDFA_WORKDIR/namelist.input try: wrfda_nml['physics'][var] = var_value[domain - 1] except TypeError: wrfda_nml['physics'][var] = var_value obsproc_nml = f90nml.read( os.path.join(self.obs[domain][0], 'namelist.obsproc')) # sync wrfda namelist with obsproc namelist wrfda_nml['wrfvar18']['analysis_date'] = ( obsproc_nml['record2']['time_analysis']) wrfda_nml['wrfvar21']['time_window_min'] = ( obsproc_nml['record2']['time_window_min']) wrfda_nml['wrfvar22']['time_window_max'] = ( obsproc_nml['record2']['time_window_max']) if self.check_cv5_cv7(): wrfda_nml['wrfvar7']['cv_options'] = int( self.config['options_wrfda']['cv_type']) wrfda_nml['wrfvar6']['max_ext_its'] = 2 wrfda_nml['wrfvar5']['check_max_iv'] = True else: wrfda_nml['wrfvar7']['cv_options'] = 3 tana = utils.return_validate( obsproc_nml['record2']['time_analysis'][:-6]) wrfda_nml['time_control']['start_year'] = tana.year wrfda_nml['time_control']['start_month'] = tana.month wrfda_nml['time_control']['start_day'] = tana.day wrfda_nml['time_control']['start_hour'] = tana.hour wrfda_nml['time_control']['end_year'] = tana.year wrfda_nml['time_control']['end_month'] = tana.month wrfda_nml['time_control']['end_day'] = tana.day wrfda_nml['time_control']['end_hour'] = tana.hour # save changes to wrfda_nml utils.silentremove(os.path.join(wrfda_workdir, 'namelist.input')) wrfda_nml.write(os.path.join(wrfda_workdir, 'namelist.input'))
def _prepare_post_dir(self): ''' Create and prepare post_dir ''' #logger.debug('Preparing postprd directory: %s' %config['post_dir']) # create self.post_dir if it does not exist yet utils._create_directory(self.post_dir) # Link all the relevant files need to compute various diagnostics relpath_to_link = [ 'EmisCoeff/Big_Endian/EmisCoeff.bin', 'AerosolCoeff/Big_Endian/AerosolCoeff.bin', 'CloudCoeff/Big_Endian/CloudCoeff.bin', 'SpcCoeff/Big_Endian/imgr_g11.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/imgr_g11.TauCoeff.bin', 'SpcCoeff/Big_Endian/imgr_g12.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/imgr_g12.TauCoeff.bin', 'SpcCoeff/Big_Endian/imgr_g13.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/imgr_g13.TauCoeff.bin', 'SpcCoeff/Big_Endian/imgr_g15.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/imgr_g15.TauCoeff.bin', 'SpcCoeff/Big_Endian/imgr_mt1r.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/imgr_mt1r.TauCoeff.bin', 'SpcCoeff/Big_Endian/imgr_mt2.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/imgr_mt2.TauCoeff.bin', 'SpcCoeff/Big_Endian/imgr_insat3d.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/imgr_insat3d.TauCoeff.bin', 'SpcCoeff/Big_Endian/amsre_aqua.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/amsre_aqua.TauCoeff.bin', 'SpcCoeff/Big_Endian/tmi_trmm.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/tmi_trmm.TauCoeff.bin', 'SpcCoeff/Big_Endian/ssmi_f13.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/ssmi_f13.TauCoeff.bin', 'SpcCoeff/Big_Endian/ssmi_f14.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/ssmi_f14.TauCoeff.bin', 'SpcCoeff/Big_Endian/ssmi_f15.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/ssmi_f15.TauCoeff.bin', 'SpcCoeff/Big_Endian/ssmis_f16.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/ssmis_f16.TauCoeff.bin', 'SpcCoeff/Big_Endian/ssmis_f17.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/ssmis_f17.TauCoeff.bin', 'SpcCoeff/Big_Endian/ssmis_f18.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/ssmis_f18.TauCoeff.bin', 'SpcCoeff/Big_Endian/ssmis_f19.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/ssmis_f19.TauCoeff.bin', 'SpcCoeff/Big_Endian/ssmis_f20.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/ssmis_f20.TauCoeff.bin', 'SpcCoeff/Big_Endian/seviri_m10.SpcCoeff.bin', 'TauCoeff/ODPS/Big_Endian/seviri_m10.TauCoeff.bin', 'SpcCoeff/Big_Endian/v.seviri_m10.SpcCoeff.bin' ] # abspath coefficients for crtm2 (simulated synthetic satellites) abspath_coeff = [ os.path.join(self.crtm_dir, relpath) for relpath in relpath_to_link ] # abspath wrf_cntrl param file abspath_pf = os.path.join(self.config['filesystem']['upp_dir'], 'parm', 'wrf_cntrl.parm') # concatenate lists of paths abspath_to_link = abspath_coeff + [abspath_pf] # create a symlink for every file in abspath_to_link for fl in abspath_to_link: utils.check_file_exists(fl) # check if file exist and is readable os.symlink(fl, os.path.join(self.post_dir, os.path.basename(fl))) # symlink wrf_cntrl.parm to config['post_dir']/fort.14 os.symlink(abspath_pf, os.path.join(self.post_dir, 'fort.14')) # symlink microphysic's tables - code used is based on mp_physics option # used in the wrfout file os.symlink( os.path.join(self.config['filesystem']['wrf_run_dir'], 'ETAMPNEW_DATA'), os.path.join(self.post_dir, 'nam_micro_lookup.dat')) os.symlink( os.path.join(self.config['filesystem']['wrf_run_dir'], 'ETAMPNEW_DATA.expanded_rain'), os.path.join(self.post_dir, 'hires_micro_lookup.dat'))