def __init__(self): config.__init__(self) rundir = self.config['filesystem']['wrf_run_dir'] wpsdir = os.path.join(self.config['filesystem']['work_dir'], 'wps') ## wrf run dir # cleanup old met_em files # create list of files to remove files = [glob.glob(os.path.join(rundir, ext)) for ext in ['met_em*']] # flatten list files_flat = [item for sublist in files for item in sublist] # remove files silently [ utils.silentremove(filename) for filename in files_flat ] # copy new met_em files # create list of files to copy files = [glob.glob(os.path.join(wpsdir, ext)) for ext in ['met_em*']] # flatten list files_flat = [item for sublist in files for item in sublist] [ shutil.copyfile(filename, os.path.join(rundir, os.path.basename(filename))) for filename in files_flat ] ## wps workdir # create list of files to remove files = [glob.glob(os.path.join(wpsdir, ext)) for ext in ['met_em*', 'FILE*', 'PFILE*', 'GRIBFILE*']] # flatten list files_flat = [item for sublist in files for item in sublist] # remove files silently [ utils.silentremove(filename) for filename in files_flat ]
def write_tbl(self): ''' Write URBPARM.TBL to wrf run directory ''' outfile = os.path.join(self.config['filesystem']['wrf_run_dir'], 'URBPARM.TBL') # remove outfile if exists utils.silentremove(outfile) # write new outfile file = open(outfile, 'w') space_sep = ['HSEQUIP', 'AHDIUPRF', 'ALHDIUPRF'] for key in self.options.keys(): if key not in ['STREET PARAMETERS', 'BUILDING HEIGHTS']: try: if key not in space_sep: file.write("{0} : {1}\n".format( key, ", ".join(str(x) for x in self.options.get(key)))) else: file.write("{0} : {1}\n".format( key, " ".join(str(x) for x in self.options.get(key)))) except TypeError: file.write("{0} : {1}\n".format(key, self.options.get(key))) file.close()
def wrfda_post(self): ''' Move files into WRF run dir after all data assimilation steps have completed ''' # prepare a WRFDA workdirectory for each domain for domain in range(1, self.max_dom + 1): # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) if domain == 1: # copy over updated lateral boundary conditions to RUNDIR # only for outer domain utils.silentremove(os.path.join(self.rundir, 'wrfbdy_d01')) shutil.copyfile(os.path.join(wrfda_workdir, 'wrfbdy_d01'), os.path.join(self.rundir, 'wrfbdy_d01')) # copy wrfvar_output_d0${domain} to ${RUNDIR}/wrfinput_d0${domain} utils.silentremove( os.path.join(self.rundir, 'wrfinput_d0' + str(domain))) if not self.low_only: shutil.copyfile( os.path.join(wrfda_workdir, 'wrfvar_output'), os.path.join(self.rundir, 'wrfinput_d0' + str(domain))) else: shutil.copyfile( os.path.join(wrfda_workdir, 'fg'), os.path.join(self.rundir, 'wrfinput_d0' + str(domain)))
def _run_metgrid(self, j_id=None): ''' run metgrid.exe (locally or using slurm script defined in config.json) ''' if len(self.config['options_slurm']['slurm_metgrid.exe']): if j_id: mid = "--dependency=afterok:%d" %j_id metgrid_command = ['sbatch', mid, self.config['options_slurm']['slurm_metgrid.exe']] else: metgrid_command = ['sbatch', self.config['options_slurm']['slurm_metgrid.exe']] utils.check_file_exists(metgrid_command[-1]) utils.silentremove(os.path.join(self.wps_workdir, 'metgrid', 'metgrid.exe')) os.symlink(os.path.join(self.config['filesystem']['wps_dir'],'metgrid','metgrid.exe'), os.path.join(self.wps_workdir, 'metgrid', 'metgrid.exe')) try: res = subprocess.check_output(metgrid_command, cwd=self.wps_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: #logger.error('Metgrid failed %s:' %metgrid_command) raise # re-raise exception return j_id # return slurm job-id else: metgrid_command = os.path.join(self.config['filesystem']['wps_dir'], 'metgrid', 'metgrid.exe') utils.check_file_exists(metgrid_command) try: subprocess.check_call(metgrid_command, cwd=self.wps_workdir, stdout=utils.devnull(), stderr=utils.devnull()) except subprocess.CalledProcessError: #logger.error('Metgrid failed %s:' %metgrid_command) raise # re-raise exception
def _prepare_namelist(self, datestart, dateend): ''' prepare wps namelist ''' # read WPS namelist in WPS work_dir wps_nml = f90nml.read(self.config['options_wps']['namelist.wps']) # get numer of domains ndoms = wps_nml['share']['max_dom'] # check if ndoms is an integer and >0 if not (isinstance(ndoms, int) and ndoms>0): raise ValueError("'domains_max_dom' namelist variable should be an " \ "integer>0") # check if both datestart and dateend are a datetime instance if not all([ isinstance(dt, datetime) for dt in [datestart, dateend] ]): raise TypeError("datestart and dateend must be an instance of datetime") # set new datestart and dateend wps_nml['share']['start_date'] = [datetime.strftime(datestart, '%Y-%m-%d_%H:%M:%S')] * ndoms wps_nml['share']['end_date'] = [datetime.strftime(dateend, '%Y-%m-%d_%H:%M:%S')] * ndoms # write namelist in wps work_dir utils.silentremove(os.path.join( self.config['filesystem']['work_dir'], 'wps', 'namelist.wps')) wps_nml.write(os.path.join( self.config['filesystem']['work_dir'], 'wps', 'namelist.wps'))
def wrfda_post(self, datestart): ''' Move files into WRF run dir after all data assimilation steps have completed ''' # prepare a WRFDA workdirectory for each domain for domain in range(1, self.max_dom + 1): # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) if domain == 1: # copy over updated lateral boundary conditions to RUNDIR # only for outer domain utils.silentremove(os.path.join(self.rundir, 'wrfbdy_d01')) shutil.copyfile(os.path.join(wrfda_workdir, 'wrfbdy_d01'), os.path.join(self.rundir, 'wrfbdy_d01')) # copy wrfvar_output_d0${domain} to ${RUNDIR}/wrfinput_d0${domain} utils.silentremove( os.path.join(self.rundir, 'wrfinput_d0' + str(domain))) if not self.low_only: shutil.copyfile( os.path.join(wrfda_workdir, 'wrfvar_output'), os.path.join(self.rundir, 'wrfinput_d0' + str(domain))) else: shutil.copyfile( os.path.join(wrfda_workdir, 'fg'), os.path.join(self.rundir, 'wrfinput_d0' + str(domain))) datestr = datetime.strftime(datestart, '%Y-%m-%d_%H:%M:%S') rsl_out_name = 'wrfda_rsl_out_' + datestr statistics_out_name = 'wrfda_statistics_' + datestr shutil.copyfile(os.path.join(wrfda_workdir, 'rsl.out.0000'), os.path.join(self.rundir, rsl_out_name)) shutil.copyfile(os.path.join(wrfda_workdir, 'statistics'), os.path.join(self.rundir, statistics_out_name))
def _write_itag(self, wrfout, current_time): ''' Create input file for unipost --------content itag file --------------------------------------- First line is location of wrfout data Second line is required format Third line is the modeltime to process Fourth line is the model identifier (WRF, NMM) ----------------------------------------------------------------- ''' #logger.debug('Enter write_itag') #logger.debug('Time in itag file is: %s' %current_time) # set itag filename and cleanup filename = os.path.join(self.post_dir, 'itag') utils.silentremove(filename) # template of itag file template = """{wrfout} netcdf {current_time}:00:00 NCAR """ # context variables in template context = {"wrfout": wrfout, "current_time": current_time} # create the itag file and write content to it based on the template try: with open(filename, 'w') as itag: itag.write(template.format(**context)) except IOError as e: #logger.error('Unable to write itag file: %s' %filename) print('Unable to write itag file: %s' % filename) raise # re-raise exception
def _run_geogrid(self, j_id=None): ''' run geogrid.exe (locally or using slurm script defined in config.json) ''' # get number of domains from wps namelist wps_nml = f90nml.read(self.config['options_wps']['namelist.wps']) ndoms = wps_nml['share']['max_dom'] # check if geo_em files already exist for all domains try: for dom in range(1, ndoms + 1): fname = "geo_em.d{}.nc".format(str(dom).zfill(2)) ncfile = Dataset(os.path.join(self.wps_workdir, fname)) ncfile.close() except IOError: # create geo_em nc files if len(self.config['options_slurm']['slurm_geogrid.exe']): # run using slurm if j_id: mid = "--dependency=afterok:%d" % j_id geogrid_command = [ 'sbatch', mid, self.config['options_slurm']['slurm_geogrid.exe'] ] else: geogrid_command = [ 'sbatch', self.config['options_slurm']['slurm_geogrid.exe'] ] utils.check_file_exists(geogrid_command[1]) utils.silentremove( os.path.join(self.wps_workdir, 'geogrid', 'geogrid.exe')) os.symlink( os.path.join(self.config['filesystem']['wps_dir'], 'geogrid', 'geogrid.exe'), os.path.join(self.wps_workdir, 'geogrid', 'geogrid.exe')) try: res = subprocess.check_output(geogrid_command, cwd=self.wps_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: #logger.error('Metgrid failed %s:' %geogrid_command) raise # re-raise exception utils.waitJobToFinish(j_id) else: geogrid_command = os.path.join( self.config['filesystem']['wps_dir'], 'geogrid', 'geogrid.exe') utils.check_file_exists(geogrid_command) try: subprocess.check_call(geogrid_command, cwd=self.wps_workdir, stdout=utils.devnull(), stderr=utils.devnull()) except subprocess.CalledProcessError: #logger.error('Geogrid failed %s:' %geogrid_command) raise # re-raise exception
def _link_vtable(self): ''' link the required Vtable ''' utils.silentremove(os.path.join(self.wps_workdir, 'Vtable')) vtable = self.config['options_wps']['vtable'] vtable_path = os.path.join(self.config['filesystem']['wps_dir'], 'ungrib', 'Variable_Tables', vtable) os.symlink(vtable_path, os.path.join(self.wps_workdir, 'Vtable'))
def _initialize(self): ''' Check if archive dir exists, create if not. The archive dir is used to ... ''' # create archive dir utils._create_directory(self.config['filesystem']['upp_archive_dir']) # create post_dir (remove old one if needed) utils.silentremove(self.post_dir) utils._create_directory(self.post_dir)
def __init__(self, args): config.__init__(self) obsDir = self.config['filesystem']['obs_dir'] obsFilename = self.config['filesystem']['obs_filename'] outputFile = os.path.join(obsDir, obsFilename) dt = utils.convert_cylc_time(args.datestring) # startdate dt1 = datetime.datetime(dt.year, dt.month, 1) dt1s = dt1.strftime('%Y%m%d') # convert to string inputdir = os.path.join(args.inputdir, dt1s) inputFile = os.path.join(inputdir, args.inputfile) # remove existing file utils.silentremove(outputFile) # copy inputfile to location specified in config.json shutil.copyfile(inputFile, outputFile)
def _save_namelists(self): ''' write coarse and fine WRF namelist.input to the respective run directories as namelist.forecast ''' # define namelist directories coarse_namelist_dir = os.path.join( self.config['filesystem']['work_dir'], 'wrf_coarse') fine_namelist_dir = os.path.join(self.config['filesystem']['work_dir'], 'wrf_fine') # create directories [ utils._create_directory(directory) for directory in [coarse_namelist_dir, fine_namelist_dir] ] # remove old files if needed [ utils.silentremove(filename) for filename in [ os.path.join(dn, 'namelist.forecast') for dn in [coarse_namelist_dir, fine_namelist_dir] ] ] # write namelists self.nml_coarse.write( os.path.join(coarse_namelist_dir, 'namelist.forecast')) self.nml_fine.write( os.path.join(fine_namelist_dir, 'namelist.forecast'))
def cleanup(self): ''' cleanup files in WRF run directory ''' # loop over all domains for domain in range(1, self.ndoms + 1): # iterate over all variables that need to be archived for var in (self.hour_var + self.minute_var + ['wrfout', 'wrfvar_input']): for cdate in pandas.date_range(self.startdate, self.enddate, freq='2H')[:-1]: datestr_in = cdate.strftime('%Y-%m-%d_%H:%M:%S') # define and load input file input_fn = var + '_d0' + str(domain) + '_' + datestr_in input_file = os.path.join(self.rundir, input_fn) utils.silentremove(input_file)
def _run_ungrib(self, j_id=None): ''' run ungrib.exe (locally or using slurm script defined in config.json) ''' if len(self.config['options_slurm']['slurm_ungrib.exe']): # run using slurm if j_id: mid = "--dependency=afterok:%d" % j_id ungrib_command = [ 'sbatch', mid, self.config['options_slurm']['slurm_ungrib.exe'] ] else: ungrib_command = [ 'sbatch', self.config['options_slurm']['slurm_ungrib.exe'] ] utils.check_file_exists(ungrib_command[-1]) utils.silentremove( os.path.join(self.wps_workdir, 'ungrib', 'ungrib.exe')) if not os.path.isdir(os.path.join(self.wps_workdir, 'ungrib')): utils._create_directory( os.path.join(self.wps_workdir, 'ungrib')) os.symlink( os.path.join(self.config['filesystem']['wps_dir'], 'ungrib', 'ungrib.exe'), os.path.join(self.wps_workdir, 'ungrib', 'ungrib.exe')) try: res = subprocess.check_output(ungrib_command, cwd=self.wps_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: #logger.error('Ungrib failed %s:' %ungrib_command) raise # re-raise exception utils.waitJobToFinish(j_id) else: ungrib_command = os.path.join(self.config['filesystem']['wps_dir'], 'ungrib', 'ungrib.exe') utils.check_file_exists(ungrib_command) try: subprocess.check_call(ungrib_command, cwd=self.wps_workdir, stdout=utils.devnull(), stderr=utils.devnull()) except subprocess.CalledProcessError: #logger.error('Ungrib failed %s:' %ungrib_command) raise # re-raise exception
def obsproc_init(self, datestart): ''' Sync obsproc namelist with WRF namelist.input ''' from shutil import copyfile from datetime import timedelta from datetime import datetime # convert to unique list obslist = list(set(self.obs.values())) # read WRF namelist in WRF work_dir wrf_nml = f90nml.read(self.config['options_wrf']['namelist.input']) for obs in obslist: # read obsproc namelist obsproc_nml = f90nml.read( os.path.join(self.obsproc_dir, 'namelist.obsproc.3dvar.wrfvar-tut')) # create obsproc workdir self.create_obsproc_dir(obs[0]) # copy observation in LITTLE_R format to obsproc_dir shutil.copyfile( os.path.join(self.config['filesystem']['obs_dir'], obs[1]), os.path.join(obs[0], obs[1])) # sync obsproc namelist variables with wrf namelist.input obsproc_nml['record1']['obs_gts_filename'] = obs[1] obsproc_nml['record8']['nesti'] = wrf_nml['domains'][ 'i_parent_start'] obsproc_nml['record8']['nestj'] = wrf_nml['domains'][ 'j_parent_start'] obsproc_nml['record8']['nestix'] = wrf_nml['domains']['e_we'] obsproc_nml['record8']['nestjx'] = wrf_nml['domains']['e_sn'] obsproc_nml['record8']['numc'] = wrf_nml['domains']['parent_id'] obsproc_nml['record8']['dis'] = wrf_nml['domains']['dx'] obsproc_nml['record8']['maxnes'] = wrf_nml['domains']['max_dom'] # set time_analysis, time_window_min, time_window_max # check if both datestart and dateend are a datetime instance if not isinstance(datestart, datetime): raise TypeError("datestart must be an instance of datetime") obsproc_nml['record2']['time_analysis'] = datetime.strftime( datestart, '%Y-%m-%d_%H:%M:%S') obsproc_nml['record2']['time_window_min'] = datetime.strftime( datestart - timedelta(minutes=15), '%Y-%m-%d_%H:%M:%S') obsproc_nml['record2']['time_window_max'] = datetime.strftime( datestart + timedelta(minutes=15), '%Y-%m-%d_%H:%M:%S') # save obsproc_nml utils.silentremove(os.path.join(obs[0], 'namelist.obsproc')) obsproc_nml.write(os.path.join(obs[0], 'namelist.obsproc'))
def run_wrf(self, j_id=None): ''' run wrf.exe ''' # check if slurm_wrf.exe is defined if len(self.config['options_slurm']['slurm_wrf.exe']): if j_id: mid = "--dependency=afterok:%d" % j_id wrf_command = [ 'sbatch', mid, self.config['options_slurm']['slurm_wrf.exe'] ] else: wrf_command = [ 'sbatch', self.config['options_slurm']['slurm_wrf.exe'] ] utils.check_file_exists(wrf_command[-1]) utils.silentremove(os.path.join(self.wrf_rundir, 'wrf.exe')) os.symlink( os.path.join(self.config['filesystem']['wrf_dir'], 'main', 'wrf.exe'), os.path.join(self.wrf_rundir, 'wrf.exe')) try: res = subprocess.check_output(wrf_command, cwd=self.wrf_rundir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: logger.error('Wrf failed %s:' % wrf_command) raise # re-raise exception utils.waitJobToFinish(j_id) else: # run locally wrf_command = os.path.join(self.config['filesystem']['wrf_dir'], 'main', 'wrf.exe') utils.check_file_exists(wrf_command) try: subprocess.check_call(wrf_command, cwd=self.wrf_rundir, stdout=utils.devnull(), stderr=utils.devnull()) except subprocess.CalledProcessError: logger.error('wrf.exe failed %s:' % wrf_command) raise # re-raise exception
def create_obsproc_dir(self, workdir): ''' symlink all files required to run obsproc.exe into obsproc workdir ''' # cleanup utils.silentremove(workdir) # create work directory utils._create_directory(workdir) # symlink error files files = [ 'DIR.txt', 'HEIGHT.txt', 'PRES.txt', 'RH.txt', 'TEMP.txt', 'UV.txt', 'obserr.txt' ] for fl in files: os.symlink(os.path.join(self.obsproc_dir, fl), os.path.join(workdir, fl)) # symlink obsproc.exe os.symlink(os.path.join(self.obsproc_dir, 'src', 'obsproc.exe'), os.path.join(workdir, 'obsproc.exe'))
def _clean_boundaries_wps(self): ''' clean old leftover boundary files in WPS directory ''' # create list of files to remove files = [glob.glob(os.path.join(self.wps_workdir, ext)) for ext in ['GRIBFILE.*', 'FILE:', 'PFILE:', 'PRES:']] # flatten list files_flat = [item for sublist in files for item in sublist] # remove files silently [ utils.silentremove(filename) for filename in files_flat ]
def prepare_updatebc_type(self, boundary_type, datestart, domain): # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) if (boundary_type == 'lower'): # define parame.in file self.create_parame(boundary_type, domain) # copy first guess (wrfout in wrfinput format) for WRFDA first_guess = os.path.join( self.rundir, ('wrfvar_input_d0' + str(domain) + '_' + datetime.strftime(datestart, '%Y-%m-%d_%H:%M:%S'))) try: shutil.copyfile(first_guess, os.path.join(wrfda_workdir, 'fg')) except Exception: shutil.copyfile( os.path.join(self.rundir, 'wrfinput_d0' + str(domain)), os.path.join(wrfda_workdir, 'fg')) # read parame.in file parame = f90nml.read(os.path.join(wrfda_workdir, 'parame.in')) # set domain in parame.in parame['control_param']['domain_id'] = domain # set wrf_input (IC from WPS and WRF real) parame['control_param']['wrf_input'] = str( os.path.join(self.rundir, 'wrfinput_d0' + str(domain))) # save changes to parame.in file utils.silentremove(os.path.join(wrfda_workdir, 'parame.in')) parame.write(os.path.join(wrfda_workdir, 'parame.in')) elif (boundary_type == 'lateral'): # define parame.in file self.create_parame(boundary_type, domain) # read parame.in file parame = f90nml.read(os.path.join(wrfda_workdir, 'parame.in')) # set output from WRFDA parame['control_param']['da_file'] = os.path.join( wrfda_workdir, 'wrfvar_output') # save changes to parame.in file utils.silentremove(os.path.join(wrfda_workdir, 'parame.in')) parame.write(os.path.join(wrfda_workdir, 'parame.in')) else: raise Exception('unknown boundary type')
def create_parame(self, parame_type, domain): # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) filename = os.path.join(wrfda_workdir, 'parame.in') utils.silentremove(filename) # add configuration to parame.in file parame = open(filename, 'w') # open file if parame_type == 'lower': ## start config file lower boundary conditions parame.write("""&control_param da_file = './fg' wrf_input = './wrfinput_d01' wrf_input = '/home/WUR/haren009/sources/WRFV3/run/wrfinput_d01' domain_id = 1 cycling = .true. debug = .true. low_bdy_only = .true. update_lsm = .false. var4d_lbc = .false. iswater = 16 / """) ## end config file lower boundary conditions else: ## start config file lateral boundary conditions parame.write("""&control_param da_file = '/home/haren/model/WRFV3/run2/wrfinput_d01' wrf_bdy_file = './wrfbdy_d01' domain_id = 1 cycling = .true. debug = .true. update_low_bdy = .false. update_lateral_bdy = .true. update_lsm = .false. var4d_lbc = .false. / """) ## end config file lateral boundary conditions parame.close() # close file
def _cleanup_output_files(self): ''' Clean up old output files in post_dir ''' #logger.debug('Enter cleanup_output_files') file_ext = ['*.out', '*.tm00', 'fort.110', 'itag'] files_found = [ f for files in [glob.glob(os.path.join(self.post_dir, ext)) for ext in file_ext] for f in files ] # try to remove files, raise exception if needed [utils.silentremove(fl) for fl in files_found]
def __init__(self, datestring, cylc_suite_def_path): config.__init__(self) dt = utils.convert_cylc_time(datestring) wrfout_time = datetime.datetime.strftime(dt, '%Y-%m-%d_%H:%M:%S') nml = self.config['options_wrf']['namelist.input'] max_dom = utils.get_max_dom(nml) rundir = self.config['filesystem']['wrf_run_dir'] archivedir = self.config['filesystem']['archive_dir'] gis_archive = os.path.join(archivedir, 'gis', wrfout_time) utils._create_directory(gis_archive) for dom in range(1, max_dom + 1): wrfout = os.path.join(rundir, 'wrfout_d0' + str(dom) + '_' + wrfout_time) archived = os.path.join(archivedir, 'wrfout_d0' + str(dom) + '_' + wrfout_time) utils.silentremove(archived) os.system('nc3tonc4 ' + wrfout + ' ' + archived) try: gis_out = os.path.join( gis_archive, 'meteo_gis_d0' + str(dom) + '_' + wrfout_time) os.system('cdo -f nc4c -z zip_4 selvar,Q2,T2,U10,V10 ' + wrfout + ' ' + gis_out) except Exception: pass plot_archive = os.path.join(archivedir, 'plot', wrfout_time) utils._create_directory(plot_archive) wrfncl = os.path.join(cylc_suite_def_path, 'bin', 'wrf_Surface3.ncl') os.system('ncl ' + wrfncl + ' inputfile=' + r'\"' + archived + r'\" outputfile=\"' + plot_archive + r'/surface_d0' + str(dom) + '.png' + r'\"') plot_latest = os.path.join(archivedir, 'plot', 'latest') try: os.symlink(plot_archive, plot_latest) except OSError, e: if e.errno == errno.EEXIST: os.remove(plot_latest) os.symlink(plot_archive, plot_latest)
def prepare_wrfda_namelist(self, domain): # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) # read WRFDA namelist, use namelist.wrfda as supplied in config.json # if not supplied, fall back to default from WRFDA if utils.check_file_exists( self.config['options_wrfda']['namelist.wrfda'], boolean=True): wrfda_namelist = self.config['options_wrfda']['namelist.wrfda'] else: wrfda_namelist = os.path.join( self.config['filesystem']['wrfda_dir'], 'var/test/tutorial/namelist.input') wrfda_nml = f90nml.read(wrfda_namelist) # read WRF namelist in WRF work_dir wrf_nml = f90nml.read( os.path.join(self.config['filesystem']['wrf_run_dir'], 'namelist.input')) # set domain specific information in namelist for var in ['e_we', 'e_sn', 'e_vert', 'dx', 'dy']: # get variable from ${RUNDIR}/namelist.input var_value = wrf_nml['domains'][var] # set domain specific variable in WRDFA_WORKDIR/namelist.input wrfda_nml['domains'][var] = var_value[domain - 1] for var in [ 'mp_physics', 'ra_lw_physics', 'ra_sw_physics', 'radt', 'sf_sfclay_physics', 'sf_surface_physics', 'bl_pbl_physics', 'cu_physics', 'cudt', 'num_soil_layers' ]: # get variable from ${RUNDIR}/namelist.input var_value = wrf_nml['physics'][var] # set domain specific variable in WRDFA_WORKDIR/namelist.input try: wrfda_nml['physics'][var] = var_value[domain - 1] except TypeError: wrfda_nml['physics'][var] = var_value obsproc_nml = f90nml.read( os.path.join(self.obs[domain][0], 'namelist.obsproc')) # sync wrfda namelist with obsproc namelist wrfda_nml['wrfvar18']['analysis_date'] = ( obsproc_nml['record2']['time_analysis']) wrfda_nml['wrfvar21']['time_window_min'] = ( obsproc_nml['record2']['time_window_min']) wrfda_nml['wrfvar22']['time_window_max'] = ( obsproc_nml['record2']['time_window_max']) if self.check_cv5_cv7(): wrfda_nml['wrfvar7']['cv_options'] = int( self.config['options_wrfda']['cv_type']) wrfda_nml['wrfvar6']['max_ext_its'] = 2 wrfda_nml['wrfvar5']['check_max_iv'] = True else: wrfda_nml['wrfvar7']['cv_options'] = 3 tana = utils.return_validate( obsproc_nml['record2']['time_analysis'][:-6]) wrfda_nml['time_control']['start_year'] = tana.year wrfda_nml['time_control']['start_month'] = tana.month wrfda_nml['time_control']['start_day'] = tana.day wrfda_nml['time_control']['start_hour'] = tana.hour wrfda_nml['time_control']['end_year'] = tana.year wrfda_nml['time_control']['end_month'] = tana.month wrfda_nml['time_control']['end_day'] = tana.day wrfda_nml['time_control']['end_hour'] = tana.hour # save changes to wrfda_nml utils.silentremove(os.path.join(wrfda_workdir, 'namelist.input')) wrfda_nml.write(os.path.join(wrfda_workdir, 'namelist.input'))