def hyperslab(config): logger=shared.get_logger() logger.info("*** Hyperslabbing wrfout files ***") wrfout_dir = config['wrfout_dir'] init_time = config['init_time'] max_dom = config['max_dom'] dimspec = config['post.hyperslab.dimspec'] wrfout_files = ['%s/wrfout_d%02d_%s' %(wrfout_dir, d, init_time.strftime('%Y-%m-%d_%H:%M:%S')) for d in range(1,max_dom+1)] for f in wrfout_files: if not os.path.exists(f): raise MissingFile("could not find %s" % f) tmp_name = f + '.tmp' logger.debug("compressing %s to temporary file: %s" % (f, tmp_name)) cmd = 'ncks -4 -O %s %s %s' % (dimspec, f, tmp_name) shared.run(cmd, config) if not os.path.exists(tmp_name): raise IOError("compression failed for %s" % f) os.remove('%s' %f) os.rename(f+'.tmp', f) logger.info("*** Done hyperslabbing wrfout files ***")
def run_geogrid(config): """ Runs geogrid.exe and checks output was sucessful Arguments: config -- dictionary specifying configuration options """ logger =shared.get_logger() logger.info("\n*** RUNINING GEOGRID ***") wps_run_dir = config['wps_run_dir'] os.chdir(wps_run_dir) queue = config['queue'] log_file = '%s/geogrid.log' % wps_run_dir geogrid_wps = '%(wps_run_dir)s/GEOGRID.TBL' % config if not os.path.exists(geogrid_wps): raise IOError("Could not find GEOGRID.TBL at: %s " % geogrid_wps) cmd = '%s/geogrid.exe' % wps_run_dir shared.run(cmd, config, wps_run_dir) cmd = 'grep "Successful completion" %s/geogrid.log*' %(wps_run_dir) ret =shared.run_cmd(cmd, config) if ret!=0: raise IOError('geogrid.exe did not complete') logger.info('*** SUCESS GEOGRID ***\n')
def run_wrf(config): """ Run wrf.exe and check output was sucessful Arguments: config -- dictionary containing various configuration options """ logger =shared.get_logger() logger.info('\n*** RUNNNING WRF ***') queue = config['queue'] wrf_run_dir = config['wrf_run_dir'] log_file = '%s/wrf.log' % wrf_run_dir executable = '%s/wrf.exe' % wrf_run_dir shared.run(executable, config, from_dir=wrf_run_dir) # # Check for success # cmd = 'grep "SUCCESS COMPLETE" %s/rsl.error.0000' % wrf_run_dir ret = shared.run_cmd(cmd, config) if ret!=0: raise IOError('wrf.exe did not complete') logger.info('*** SUCESS WRF ***\n')
def run_wrf(config): """ Run wrf.exe and check output was sucessful Arguments: config -- dictionary containing various configuration options """ logger = shared.get_logger() logger.info('\n*** RUNNNING WRF ***') queue = config['queue'] wrf_run_dir = config['wrf_run_dir'] log_file = '%s/wrf.log' % wrf_run_dir executable = '%s/wrf.exe' % wrf_run_dir shared.run(executable, config, from_dir=wrf_run_dir) # # Check for success # cmd = 'grep "SUCCESS COMPLETE" %s/rsl.error.0000' % wrf_run_dir ret = shared.run_cmd(cmd, config) if ret != 0: raise IOError('wrf.exe did not complete') logger.info('*** SUCESS WRF ***\n')
def run_geogrid(config): """ Runs geogrid.exe and checks output was sucessful Arguments: config -- dictionary specifying configuration options """ logger = shared.get_logger() logger.info("\n*** RUNINING GEOGRID ***") wps_run_dir = config['wps_run_dir'] os.chdir(wps_run_dir) queue = config['queue'] log_file = '%s/geogrid.log' % wps_run_dir geogrid_wps = '%(wps_run_dir)s/GEOGRID.TBL' % config if not os.path.exists(geogrid_wps): raise IOError("Could not find GEOGRID.TBL at: %s " % geogrid_wps) cmd = '%s/geogrid.exe' % wps_run_dir shared.run(cmd, config, wps_run_dir) cmd = 'grep "Successful completion" %s/geogrid.log*' % (wps_run_dir) ret = shared.run_cmd(cmd, config) if ret != 0: raise IOError('geogrid.exe did not complete') logger.info('*** SUCESS GEOGRID ***\n')
def hyperslab(config): logger = shared.get_logger() logger.info("*** Hyperslabbing wrfout files ***") wrfout_dir = config['wrfout_dir'] init_time = config['init_time'] max_dom = config['max_dom'] dimspec = config['post.hyperslab.dimspec'] wrfout_files = [ '%s/wrfout_d%02d_%s' % (wrfout_dir, d, init_time.strftime('%Y-%m-%d_%H:%M:%S')) for d in range(1, max_dom + 1) ] for f in wrfout_files: if not os.path.exists(f): raise MissingFile("could not find %s" % f) tmp_name = f + '.tmp' logger.debug("compressing %s to temporary file: %s" % (f, tmp_name)) cmd = 'ncks -4 -O %s %s %s' % (dimspec, f, tmp_name) shared.run(cmd, config) if not os.path.exists(tmp_name): raise IOError("compression failed for %s" % f) os.remove('%s' % f) os.rename(f + '.tmp', f) logger.info("*** Done hyperslabbing wrfout files ***")
def compress(config): """Compresses netcdf files to netcdf4 format. Relies on the NCO operator nccopy. Will try and compress all output netcdf files associated with the current initial time, based on the standard WRF naming convention. If a simulation produces multiple wrfout files for an initial time (i.e. one file per day for three days), then only the first file will be compressed under the current configuration. nccopy does not support the -O overwrite flag, so we need to manually rename the files, and remove the originals on sucess""" logger=shared.get_logger() logger.info("*** Compressing wrfout files ***") wrfout_dir = config['wrfout_dir'] init_time = config['init_time'] max_dom = config['max_dom'] comp_level = config['compression_level'] wrfout_files = ['%s/wrfout_d%02d_%s' %(wrfout_dir, d, init_time.strftime('%Y-%m-%d_%H:%M:%S')) for d in range(1,max_dom+1)] for f in wrfout_files: if not os.path.exists(f): raise MissingFile("could not find %s" % f) tmp_name = f + '.tmp' logger.debug("compressing %s to temporary file: %s" % (f, tmp_name)) cmd = 'nccopy -k4 -d %s %s %s' %(comp_level, f, tmp_name) shared.run(cmd, config) if not os.path.exists(tmp_name): raise IOError("compression failed for %s" % f) os.remove('%s' %f) os.rename(f+'.tmp', f) logger.info("*** Done compressing wrfout files ***")
def run_metgrid(config): """ Runs metgrid.exe and checks output was sucessful Arguments: config -- dictionary specifying configuration options """ logger = shared.get_logger() logger.info("\n*** RUNNING METGRID ***") queue = config['queue'] wps_run_dir = config['wps_run_dir'] log_file = '%s/metgrid.log' % wps_run_dir bdy_conditions = config['bdy_conditions'] namelist_wps = config['namelist_wps'] namelist = shared.read_namelist(namelist_wps) met_em_dir = shared.sub_date(config['met_em_dir'], config['init_time']) # # vtable may be a dictionary to support running ungrib multiple # times. In which case, we need to put multiple prefixes into # the namelist.wps file # vtable = config['vtable'] if type(vtable) == type({}): prefixes = vtable.keys() else: prefixes = [bdy_conditions] namelist.update('fg_name', prefixes) namelist.update('opt_output_from_metgrid_path', met_em_dir, section='metgrid') if not config['sst']: namelist.remove('constants_name') namelist.to_file(namelist_wps) logger.debug('met_em_dir: %s' % met_em_dir) if not os.path.exists(met_em_dir): logger.debug('creating met_em_dir: %s ' % met_em_dir) os.makedirs(met_em_dir) os.chdir(wps_run_dir) cmd = "%s/metgrid.exe" % wps_run_dir shared.run(cmd, config, from_dir=wps_run_dir) cmd = 'grep "Successful completion" %s/metgrid.log*' % wps_run_dir ret = shared.run_cmd(cmd, config) if ret != 0: raise IOError('metgrid.exe did not complete') logger.info('*** SUCESS METGRID ***\n')
def run_metgrid(config): """ Runs metgrid.exe and checks output was sucessful Arguments: config -- dictionary specifying configuration options """ logger =shared.get_logger() logger.info("\n*** RUNNING METGRID ***") queue = config['queue'] wps_run_dir = config['wps_run_dir'] log_file = '%s/metgrid.log' % wps_run_dir bdy_conditions = config['bdy_conditions'] namelist_wps = config['namelist_wps'] namelist = shared.read_namelist(namelist_wps) met_em_dir = shared.sub_date(config['met_em_dir'], config['init_time']) # # vtable may be a dictionary to support running ungrib multiple # times. In which case, we need to put multiple prefixes into # the namelist.wps file # vtable = config['vtable'] if type(vtable)==type({}): prefixes = vtable.keys() else: prefixes = [bdy_conditions] namelist.update('fg_name', prefixes) namelist.update('opt_output_from_metgrid_path', met_em_dir, section='metgrid') if not config['sst']: namelist.remove('constants_name') namelist.to_file(namelist_wps) logger.debug('met_em_dir: %s' % met_em_dir) if not os.path.exists(met_em_dir): logger.debug('creating met_em_dir: %s ' % met_em_dir) os.makedirs(met_em_dir) os.chdir(wps_run_dir) cmd = "%s/metgrid.exe" % wps_run_dir shared.run(cmd, config, from_dir=wps_run_dir) cmd = 'grep "Successful completion" %s/metgrid.log*' % wps_run_dir ret =shared.run_cmd(cmd, config) if ret!=0: raise IOError('metgrid.exe did not complete') logger.info('*** SUCESS METGRID ***\n')
def build_qtbase(srcbase, buildbase, configureFlags, target_makeflags): srcdir = os.path.join(srcbase, 'qtbase') if osx: # out-of-source doesn't work on OS X (August 7 2012) builddir = srcdir run('git', ['clean', '-xdf'], cwd=builddir) else: builddir = os.path.join(buildbase, 'qtbase') create_empty(builddir) configure = 'configure.bat' if windows else 'configure' absConfigure = os.path.join(srcdir, configure) run(absConfigure, configureFlags, cwd=builddir) if target_makeflags != "": run(make, ['sub-src-qmake_all'], cwd=builddir) run(make, cwd=os.path.join(builddir, 'src', 'tools')) os.environ[ 'MAKEFLAGS'] = os.environ['MAKEFLAGS'] + ' ' + target_makeflags run(make, cwd=builddir) makeInstall(cwd=builddir)
def run_real(config): """ Run real.exe and check output was sucessful Arguments: config -- dictionary containing various configuration options """ logger =shared.get_logger() logger.info('*** RUNNING REAL ***') queue = config['queue'] working_dir = config['working_dir'] wrf_run_dir = config['wrf_run_dir'] wps_dir = config['wps_dir'] domain = config['domain'] model_run = config['model_run'] init_time = config['init_time'] log_file = '%s/real.log' % wrf_run_dir # Log files from real appear in the current directory, # so we need to change directory first. os.chdir(wrf_run_dir) cmd = "%s/real.exe" % wrf_run_dir shared.run(cmd, config, wrf_run_dir) rsl = '%s/rsl.error.0000' % wrf_run_dir if not os.path.exists(rsl): raise IOError('No log file found for real.exe') # now copy rsl file to a log directory cmd = 'cp %s %s/rsl/rsl.error.%s.%s.%s' % (rsl, working_dir, domain, model_run, init_time.strftime('%y-%m-%d_%H') ) shared.run_cmd(cmd, config) cmd = 'grep "SUCCESS COMPLETE" %s/rsl.error.0000' % wrf_run_dir ret =shared.run_cmd(cmd, config) if ret!=0: raise IOError('real.exe did not complete') logger.info('*** SUCESS REAL ***')
def run_real(config): """ Run real.exe and check output was sucessful Arguments: config -- dictionary containing various configuration options """ logger =shared.get_logger() logger.info('\n*** RUNNING REAL ***') queue = config['queue'] working_dir = config['working_dir'] wrf_run_dir = config['wrf_run_dir'] wps_dir = config['wps_dir'] domain = config['domain'] model_run = config['model_run'] init_time = config['init_time'] log_file = '%s/real.log' % wrf_run_dir # Log files from real appear in the current directory, # so we need to change directory first. os.chdir(wrf_run_dir) cmd = "%s/real.exe" % wrf_run_dir shared.run(cmd, config, from_dir=wrf_run_dir) rsl = '%s/rsl.error.0000' % wrf_run_dir if not os.path.exists(rsl): raise IOError('No log file found for real.exe') # now copy rsl file to a log directory cmd = 'cp %s %s/rsl/rsl.error.%s.%s.%s' % (rsl, working_dir, domain, model_run, init_time.strftime('%y-%m-%d_%H') ) shared.run_cmd(cmd, config) cmd = 'grep "SUCCESS COMPLETE" %s/rsl.error.0000' % wrf_run_dir ret =shared.run_cmd(cmd, config) if ret!=0: raise IOError('real.exe did not complete') logger.info('*** SUCESS REAL ***\n')
def run_gribmaster(config): """Runs the gribmaster programme to download the most recent boundary conditions """ logger = shared.get_logger() gm_dir = config['gm_dir'] gm_transfer = config['gm_transfer'] gm_dataset = config['gm_dataset'] start = config['init_time'] fcst_hours = config['fcst_hours'] gm_log = config['gm_log'] gm_sleep = config['gm_sleep'] # this is in minutes gm_max_attempts = int(config['gm_max_attempts']) log_dir = '/home/slha/forecasting' cmd = '%s/gribmaster --verbose --%s --dset %s --date %s --cycle %s --length %s > %s' % ( gm_dir, gm_transfer, gm_dataset, start.strftime('%Y%m%d'), start.strftime('%H'), fcst_hours, gm_log) for attempt in range(gm_max_attempts): logger.info('*** RUNNING GRIBMASTER, %s attempt ***' % (attempt + 1)) shared.run(cmd, config) cmd = 'grep "BUMMER" %s' % gm_log # check for failure ret = subprocess.call(cmd, shell=True) # if we positively find the string BUMMER, we know we have failed if ret == 0: logger.error('*** FAIL GRIBMASTER: Attempt %d of %d ***' % (attempt + 1, gm_max_attempts)) logger.info('Sleeping for %s minutes' % gm_sleep) time.sleep(gm_sleep * 60) # else we check for definite sucess else: cmd = 'grep "ENJOY" %s' % gm_log # check for failure ret = subprocess.call(cmd, shell=True) if ret == 0: logger.info('*** SUCESS GRIBMASTER ***') return raise IOError('gribmaster did not find files after %d attempts' % gm_max_attempts)
def build_qtmodule(srcbase, buildbase, module, buildmode, installTarget): srcdir = os.path.join(srcbase, module) builddir = os.path.join(buildbase, module) create_empty(builddir) if buildmode == 'debug': configOptions = [ 'CONFIG-=release', 'CONFIG+=debug', 'CONFIG-=debug_and_release' ] elif buildmode == 'release': configOptions = [ 'CONFIG+=release', 'CONFIG-=debug', 'CONFIG-=debug_and_release' ] else: configOptions = [ 'CONFIG-=release', 'CONFIG-=debug', 'CONFIG+=debug_and_release' ] run(os.path.join(installTarget, 'bin', 'qmake'), configOptions + [srcdir], cwd=builddir) run(make, cwd=builddir) makeInstall(cwd=builddir)
def run_gribmaster(config): """Runs the gribmaster programme to download the most recent boundary conditions """ logger = shared.get_logger() gm_dir = config['gm_dir'] gm_transfer = config['gm_transfer'] gm_dataset = config['gm_dataset'] start = config['init_time'] fcst_hours = config['fcst_hours'] gm_log = config['gm_log'] gm_sleep = config['gm_sleep'] # this is in minutes gm_max_attempts = int(config['gm_max_attempts']) log_dir = '/home/slha/forecasting' cmd = '%s/gribmaster --verbose --%s --dset %s --date %s --cycle %s --length %s > %s' %(gm_dir, gm_transfer, gm_dataset, start.strftime('%Y%m%d'), start.strftime('%H'), fcst_hours, gm_log ) for attempt in range(gm_max_attempts): logger.info('*** RUNNING GRIBMASTER, %s attempt ***' % (attempt+1)) shared.run(cmd, config) cmd = 'grep "BUMMER" %s' % gm_log # check for failure ret = subprocess.call(cmd, shell=True) # if we positively find the string BUMMER, we know we have failed if ret==0: logger.error('*** FAIL GRIBMASTER: Attempt %d of %d ***' % (attempt+1, gm_max_attempts)) logger.info('Sleeping for %s minutes' % gm_sleep) time.sleep(gm_sleep*60) # else we check for definite sucess else: cmd = 'grep "ENJOY" %s' % gm_log # check for failure ret = subprocess.call(cmd, shell=True) if ret==0: logger.info('*** SUCESS GRIBMASTER ***') return raise IOError('gribmaster did not find files after %d attempts' % gm_max_attempts)
def compress(config): """Compresses netcdf files to netcdf4 format. Relies on the NCO operator nccopy. Will try and compress all output netcdf files associated with the current initial time, based on the standard WRF naming convention. If a simulation produces multiple wrfout files for an initial time (i.e. one file per day for three days), then only the first file will be compressed under the current configuration. nccopy does not support the -O overwrite flag, so we need to manually rename the files, and remove the originals on sucess""" logger = shared.get_logger() logger.info("*** Compressing wrfout files ***") wrfout_dir = config['wrfout_dir'] init_time = config['init_time'] max_dom = config['max_dom'] comp_level = config['compression_level'] wrfout_files = [ '%s/wrfout_d%02d_%s' % (wrfout_dir, d, init_time.strftime('%Y-%m-%d_%H:%M:%S')) for d in range(1, max_dom + 1) ] for f in wrfout_files: if not os.path.exists(f): raise MissingFile("could not find %s" % f) tmp_name = f + '.tmp' logger.debug("compressing %s to temporary file: %s" % (f, tmp_name)) cmd = 'nccopy -k4 -d %s %s %s' % (comp_level, f, tmp_name) shared.run(cmd, config) if not os.path.exists(tmp_name): raise IOError("compression failed for %s" % f) os.remove('%s' % f) os.rename(f + '.tmp', f) logger.info("*** Done compressing wrfout files ***")
def run_ndown(config): logger =shared.get_logger() logger.info('*** RUNNING NDOWN ***') wrf_run_dir = config['wrf_run_dir'] queue = config['queue'] log_file = '%s/ndown.log' % wrf_run_dir cmd = '%s/ndown.exe' % wrf_run_dir nprocs = config['num_procs'] poll_interval = config['poll_interval'] logger.debug(poll_interval) logger.debug(nprocs) logger.debug(nprocs['ndown.exe']) shared.run(cmd, config, wrf_run_dir) cmd = 'grep "Successful completion" %s' % log_file # check for success ret =shared.run_cmd(cmd,config) if ret!=0: raise IOError('ndown.exe did not complete') logger.info('*** SUCESS NDOWN ***')
def run_ndown(config): logger = shared.get_logger() logger.info('*** RUNNING NDOWN ***') wrf_run_dir = config['wrf_run_dir'] queue = config['queue'] log_file = '%s/ndown.log' % wrf_run_dir cmd = '%s/ndown.exe' % wrf_run_dir nprocs = config['num_procs'] poll_interval = config['poll_interval'] logger.debug(poll_interval) logger.debug(nprocs) logger.debug(nprocs['ndown.exe']) shared.run(cmd, config, wrf_run_dir) cmd = 'grep "Successful completion" %s' % log_file # check for success ret = shared.run_cmd(cmd, config) if ret != 0: raise IOError('ndown.exe did not complete') logger.info('*** SUCESS NDOWN ***')
def ungrib_sst(config): """ Runs ungrib.exe for SST fields, makes and modifies a copy of namelist.wps, then restores the original namelist.wps""" logger = shared.get_logger() wps_dir = config['wps_dir'] wps_run_dir = config['wps_run_dir'] tmp_dir = config['tmp_dir'] working_dir = config['working_dir'] init_time = config['init_time'] max_dom = config['max_dom'] sst_local_dir = config['sst_local_dir'] sst_time = shared.get_sst_time(config) sst_filename = shared.get_sst_filename(config) vtable_sst = wps_dir+'/ungrib/Variable_Tables/'+config['sst_vtable'] #vtable_dom = wps_dir+'/ungrib/Variable_Tables/'+config['vtable'] vtable = wps_run_dir+'/Vtable' queue = config['queue'] log_file = '%s/ungrib.sst.log' % wps_run_dir namelist_wps = config['namelist_wps'] namelist_sst = '%s/namelist.sst' % working_dir namelist = shared.read_namelist(namelist_wps) # # update one line to point to the new SST field # ungrib.exe will name SST field as e.g. # SST:2013-04-24_00 # constants_name = '%s/SST:%s' %(wps_run_dir, sst_time.strftime('%Y-%m-%d_%H')) logger.debug('Updating constants_name ----> %s' % constants_name) namelist.update('constants_name', constants_name, section='metgrid') # Write the changes into the original namelist.to_file(namelist_wps) # # Update start and end time to process SST # start_str = sst_time.strftime("%Y-%m-%d_%H:%M:%S") end_str = sst_time.strftime("%Y-%m-%d_%H:%M:%S") logger.debug("Updating namelist.sst") logger.debug('PREFIX ------> SST') logger.debug('start_date---> ' +start_str) logger.debug('end_date-----> '+ end_str) namelist.update('prefix', 'SST') namelist.update('start_date', [start_str]*max_dom) namelist.update('end_date', [end_str]*max_dom) logger.debug('writing modified namelist.sst to file -------> %s' % namelist_sst) namelist.to_file(namelist_sst) #remove any linked namelist.wps logger.debug('removing namelist.wps') namelist_run = '%s/namelist.wps' % wps_run_dir if os.path.exists(namelist_run): os.remove(namelist_run) # link namelist.sst to namelist.wps in WPS run dir logger.debug('linking namelist.sst -----> namelist.wps') cmd = 'ln -sf %s %s' %(namelist_sst, namelist_run) shared.run_cmd(cmd, config) logger.debug('removing Vtable') if os.path.exists(vtable): os.remove(vtable) logger.debug('linking Vtable.SST ----> Vtable') cmd = 'ln -sf %s %s' %(vtable_sst, vtable) shared.run_cmd(cmd, config) # run link_grib to link SST gribs files logger.debug('Linking SST GRIB files') cmd = '%s/link_grib.csh %s/%s' %(wps_dir, sst_local_dir, sst_filename) shared.run_cmd(cmd, config) logger.info('*** RUNNING UNGRIB FOR SST ***') cmd = '%s/ungrib.exe' % wps_run_dir shared.run(cmd, config, wps_run_dir) cmd = 'grep "Successful completion" ./ungrib.log*' # check for success ret = shared.run_cmd(cmd, config) if ret!=0: raise IOError('Ungrib failed for SST') logger.info('*** SUCCESS UNGRIB SST ***') logger.debug('Removing namelist.wps') if os.path.exists(namelist_run): os.remove(namelist_run) # link in original (unmodified) namelist.wps cmd = 'ln -sf %s %s' %(namelist_wps, namelist_run) shared.run_cmd(cmd, config)
def makeInstall(cwd): if osx: # parallel make install seems fragile on OS X run(make, ['-j1', 'install'], cwd=cwd) else: run(make, ['install'], cwd=cwd)
def produce_ncl_plots(config): """ Calls a series of ncl scripts to produce visualisations. Need to think about how to define a flexible visualisation framework Currently communication with NCL is via environment variables Perhaps in future we should move to PyNGL for easier (direct) integration as then we could simply pass in the config dictionary, use the same logging framework, and make use of a vtable like mapping to forecast vars. However, for the time being we design each ncl script to expect certain environment variables. Then, the list of ncl scripts to run can simply be specified somewhere in the config file e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc. Updated: Some plots are much easier to produce using the original wrfout netcdf files, rather than use the UPP post-processed grib files. Howeverm in future we should stick with one or the other. Arguments: config -- dictionary containing various configuration options """ logger = shared.get_logger() domain = config['domain'] model_run = config['model_run'] working_dir = config['working_dir'] ncl_code_dir = config['ncl_code_dir'] ncl_files = config['ncl_code'] #ncl_code = ['%s/%s' % (ncl_code_dir, f) for f in ncl_files] ncl_code = ncl_files ncl_log = config['ncl_log'] wrfout_dir = config['wrfout_dir'] init_time = config['init_time'] dom = config['dom'] fcst_file = '%s/wrfout_d%02d_%s:00:00.nc' %(wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H")) ncl_in_file = fcst_file ncl_loc_file = config['locations_file'] ncl_out_dir = shared.sub_date(config['ncl_out_dir'], init_time=init_time) ncl_out_type = config['ncl_out_type'] nest_id = '%02d' % dom logger.info('*** RUNNING NCL SCRIPTS FOR DOMAIN d%02d***' % dom) if not os.path.exists(ncl_out_dir): os.makedirs(ncl_out_dir) # # Communicate to NCL via environment variables # NCL expects the following to be set #File = getenv("FCST_FILE") #type = getenv("NCL_OUT_TYPE") #diro = getenv("NCL_OUT_DIR") #;web_dir = getenv("WEB_DIR") #domain = getenv("NEST_ID") #run_hour = getenv("RUN_HOUR") # # Try escaping : in fcst_file # #fcst_file = fcst_file.replace(':', r'\:') #os.environ['FCST_FILE'] = fcst_file #os.environ['LOCATIONS_FILE'] = loc_file #os.environ['NCL_OUT_DIR'] = ncl_out_dir #os.environ['NCL_OUT_TYPE'] = ncl_out_type #os.environ['NEST_ID'] = nest_id #os.environ['DOMAIN'] = domain #os.environ['MODEL_RUN'] = model_run logger.debug('ncl_in_file ----> %s' % ncl_in_file) logger.debug('ncl_out_dir ----> %s' % ncl_out_dir) logger.debug('ncl_out_type ----> %s' % ncl_out_type) logger.debug('ncl_loc_file ----> %s' % ncl_loc_file) if not ncl_in_file.endswith('.nc'): ncl_in_file = ncl_in_file + '.nc' for script in ncl_code: # # mem_total forces the use postprocessing node # #cmd = "ncl %s >> %s 2>&1" % (script, ncl_log) #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd logger.debug(script) queue = config['queue'] if queue['ncl']: cmd = """ncl ncl_in_file="%s" ncl_out_dir="%s" ncl_out_type="%s" ncl_loc_file="%s" %s""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script) else: cmd = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script, working_dir) ret = shared.run(cmd, config)
def run_ungrib(config): """ Runs ungrib.exe and checks output was sucessfull If vtable and gbr_input_fmt are NOT dictionaries, then dictionarius will be constructed from them using the key bdy_conditions from the metadata Arguments: config -- dictionary specifying configuration options """ logger =shared.get_logger() wps_dir = config['wps_dir'] wps_run_dir = config['wps_run_dir'] namelist_wps = config['namelist_wps'] working_dir = config['working_dir'] met_em_dir = config['met_em_dir'] init_time = config['init_time'] log_file = '%s/ungrib.log' % wps_run_dir vtable = config['vtable'] grb_input_fmt = config['grb_input_fmt'] grb_input_delay = config.get("grb_input_delay") # this allows None to be returned bdy_conditions = config['bdy_conditions'] logger.info("\n*** RUNNING UNGRIB ***") namelist = shared.read_namelist(namelist_wps) bdy_times = shared.get_bdy_times(config) if type(grb_input_fmt)!=type({}): grb_input_fmt = {bdy_conditions:grb_input_fmt} if type(vtable)!=type({}): vtable = {bdy_conditions:vtable} # # Check that boundary conditions exist # for key in vtable.keys(): if grb_input_delay and key in grb_input_delay: logger.debug("applying delay") delay = datetime.timedelta(0, grb_input_delay[key]*60*60) new_bdy_times = [b - delay for b in bdy_times] else: logger.debug("no delay applied") new_bdy_times = bdy_times fmt = grb_input_fmt[key] # # Generate filelist based on the initial time, and the forecast hour # filelist = list(OrderedDict.fromkeys(shared.get_bdy_filenames(fmt, new_bdy_times))) # # Check the boundary files exist # logger.debug('checking boundary condition files exists') for f in filelist: if not os.path.exists(f): raise IOError('cannot find file: %s' %f) logger.debug('all boundary conditions files exist') # # Now process boundary conditions # for key in vtable.keys(): if grb_input_delay and key in grb_input_delay: logger.debug("applying delay") delay = datetime.timedelta(0, grb_input_delay[key]*60*60) new_bdy_times = [b - delay for b in bdy_times] else: logger.debug("no delay applied") new_bdy_times = bdy_times fmt = grb_input_fmt[key] # # Generate filelist based on the initial time, and the forecast hour # filelist = list(OrderedDict.fromkeys(shared.get_bdy_filenames(fmt, new_bdy_times))) logger.debug('running link_grib.csh script to link grib files to GRIBFILE.AAA etc') os.chdir(wps_run_dir) args = ' '.join(filelist) cmd = '%s/link_grib.csh %s' %(wps_run_dir,args) shared.run_cmd(cmd, config) vtab_path = vtable[key] prefix = key namelist.update('prefix', key) namelist.to_file(namelist_wps) link_namelist_wps(config) vtab_wps = wps_run_dir+'/Vtable' if os.path.exists(vtab_wps): os.remove(vtab_wps) cmd = 'ln -sf %s %s' %(vtab_path, vtab_wps) logger.debug(cmd) subprocess.call(cmd, shell=True) #logger.debug("changing directory to %s" % wps_run_dir) #os.chdir(wps_run_dir) cmd = '%s/ungrib.exe' % wps_run_dir logger.debug(cmd) shared.run(cmd, config, wps_run_dir) cmd = 'grep "Successful completion" %s/ungrib.log*' % wps_run_dir # check for success ret =shared.run_cmd(cmd,config) if ret!=0: raise IOError('ungrib.exe did not complete') logger.info('*** SUCESS UNGRIB ***\n')
def prepare_wps(config): """ Runs all the pre-processing steps necessary for running WPS. Reads the current value of init_time from config, and links boundary condition files into correct directory. Creates an output directory for the met_em files. Arguments: config -- dictionary containing various configuration options""" logger = shared.get_logger() logger.debug('*** PREPARING FILES FOR WPS ***') wps_dir = config['wps_dir'] # the base installation of WPS wps_run_dir = config['wps_run_dir'] # the directory to run WPS from working_dir = config['working_dir'] # model run directory met_em_dir = config['met_em_dir'] init_time = config['init_time'] grb_input_fmt = config['grb_input_fmt'] vtable = config['vtable'] bdy_times = shared.get_bdy_times(config) if type(grb_input_fmt)==type({}): logger.debug(grb_input_fmt) fmts = grb_input_fmt.values() else: fmts = [grb_input_fmt] for fmt in fmts: # # Generate filelist based on the initial time, and the forecast hour # filelist = shared.get_bdy_filenames(fmt, bdy_times) # # Check the boundary files exist # logger.debug('checking boundary condition files exists') for f in filelist: if not os.path.exists(f): raise IOError('cannot find file: %s' %f) logger.debug('all boundary conditions files exist') # # Run the link_grib scipt to link the FNL files # logger.debug('running link_grib.csh script to link grib files to GRIBFILE.AAA etc') os.chdir(wps_run_dir) args = ' '.join(filelist) cmd = '%s/link_grib.csh %s' %(wps_run_dir,args) shared.run(cmd, config) logger.debug('Path for met_em files is %s' % met_em_dir) if not os.path.exists(met_em_dir): os.makedirs(met_em_dir) logger.debug('*** FINISHED PREPARING FILES FOR WPS ***')
def prepare_wps(config): """ Runs all the pre-processing steps necessary for running WPS. Reads the current value of init_time from config, and links boundary condition files into correct directory. Creates an output directory for the met_em files. Arguments: config -- dictionary containing various configuration options""" logger = shared.get_logger() logger.debug('*** PREPARING FILES FOR WPS ***') wps_dir = config['wps_dir'] # the base installation of WPS wps_run_dir = config['wps_run_dir'] # the directory to run WPS from working_dir = config['working_dir'] # model run directory met_em_dir = config['met_em_dir'] init_time = config['init_time'] grb_input_fmt = config['grb_input_fmt'] vtable = config['vtable'] bdy_times = shared.get_bdy_times(config) if type(grb_input_fmt) == type({}): logger.debug(grb_input_fmt) fmts = grb_input_fmt.values() else: fmts = [grb_input_fmt] for fmt in fmts: # # Generate filelist based on the initial time, and the forecast hour # filelist = shared.get_bdy_filenames(fmt, bdy_times) # # Check the boundary files exist # logger.debug('checking boundary condition files exists') for f in filelist: if not os.path.exists(f): raise IOError('cannot find file: %s' % f) logger.debug('all boundary conditions files exist') # # Run the link_grib scipt to link the FNL files # logger.debug( 'running link_grib.csh script to link grib files to GRIBFILE.AAA etc') os.chdir(wps_run_dir) args = ' '.join(filelist) cmd = '%s/link_grib.csh %s' % (wps_run_dir, args) shared.run(cmd, config) logger.debug('Path for met_em files is %s' % met_em_dir) if not os.path.exists(met_em_dir): os.makedirs(met_em_dir) logger.debug('*** FINISHED PREPARING FILES FOR WPS ***')
def run_ungrib(config): """ Runs ungrib.exe and checks output was sucessfull If vtable and gbr_input_fmt are NOT dictionaries, then dictionarius will be constructed from them using the key bdy_conditions from the metadata Arguments: config -- dictionary specifying configuration options """ logger = shared.get_logger() wps_dir = config['wps_dir'] wps_run_dir = config['wps_run_dir'] namelist_wps = config['namelist_wps'] working_dir = config['working_dir'] met_em_dir = config['met_em_dir'] init_time = config['init_time'] log_file = '%s/ungrib.log' % wps_run_dir vtable = config['vtable'] grb_input_fmt = config['grb_input_fmt'] grb_input_delay = config.get( "grb_input_delay") # this allows None to be returned bdy_conditions = config['bdy_conditions'] logger.info("\n*** RUNNING UNGRIB ***") namelist = shared.read_namelist(namelist_wps) bdy_times = shared.get_bdy_times(config) if type(grb_input_fmt) != type({}): grb_input_fmt = {bdy_conditions: grb_input_fmt} if type(vtable) != type({}): vtable = {bdy_conditions: vtable} # # Check that boundary conditions exist # for key in vtable.keys(): if grb_input_delay and key in grb_input_delay: logger.debug("applying delay") delay = datetime.timedelta(0, grb_input_delay[key] * 60 * 60) new_bdy_times = [b - delay for b in bdy_times] else: logger.debug("no delay applied") new_bdy_times = bdy_times fmt = grb_input_fmt[key] # # Generate filelist based on the initial time, and the forecast hour # filelist = list( OrderedDict.fromkeys(shared.get_bdy_filenames(fmt, new_bdy_times))) # # Check the boundary files exist # logger.debug('checking boundary condition files exists') for f in filelist: if not os.path.exists(f): raise IOError('cannot find file: %s' % f) logger.debug('all boundary conditions files exist') # # Now process boundary conditions # for key in vtable.keys(): if grb_input_delay and key in grb_input_delay: logger.debug("applying delay") delay = datetime.timedelta(0, grb_input_delay[key] * 60 * 60) new_bdy_times = [b - delay for b in bdy_times] else: logger.debug("no delay applied") new_bdy_times = bdy_times fmt = grb_input_fmt[key] # # Generate filelist based on the initial time, and the forecast hour # filelist = list( OrderedDict.fromkeys(shared.get_bdy_filenames(fmt, new_bdy_times))) logger.debug( 'running link_grib.csh script to link grib files to GRIBFILE.AAA etc' ) os.chdir(wps_run_dir) args = ' '.join(filelist) cmd = '%s/link_grib.csh %s' % (wps_run_dir, args) shared.run_cmd(cmd, config) vtab_path = vtable[key] prefix = key namelist.update('prefix', key) namelist.to_file(namelist_wps) link_namelist_wps(config) vtab_wps = wps_run_dir + '/Vtable' if os.path.exists(vtab_wps): os.remove(vtab_wps) cmd = 'ln -sf %s %s' % (vtab_path, vtab_wps) logger.debug(cmd) subprocess.call(cmd, shell=True) #logger.debug("changing directory to %s" % wps_run_dir) #os.chdir(wps_run_dir) cmd = '%s/ungrib.exe' % wps_run_dir logger.debug(cmd) shared.run(cmd, config, wps_run_dir) cmd = 'grep "Successful completion" %s/ungrib.log*' % wps_run_dir # check for success ret = shared.run_cmd(cmd, config) if ret != 0: raise IOError('ungrib.exe did not complete') logger.info('*** SUCESS UNGRIB ***\n')
def produce_ncl_plots(config): """ Calls a series of ncl scripts to produce visualisations. Need to think about how to define a flexible visualisation framework Currently communication with NCL is via environment variables Perhaps in future we should move to PyNGL for easier (direct) integration as then we could simply pass in the config dictionary, use the same logging framework, and make use of a vtable like mapping to forecast vars. However, for the time being we design each ncl script to expect certain environment variables. Then, the list of ncl scripts to run can simply be specified somewhere in the config file e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc. Arguments: config -- dictionary containing various configuration options """ logger = shared.get_logger() domain = config['domain'] model_run = config['model_run'] working_dir = config['working_dir'] ncl_code_dir = config['ncl_code_dir'] ncl_files = config['ncl_code'] ncl_code = ncl_files ncl_log = config['ncl_log'] wrfout_dir = config['wrfout_dir'] init_time = config['init_time'] dom = config['dom'] fcst_file = '%s/wrfout_d%02d_%s:00:00.nc' % ( wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H")) ncl_in_file = fcst_file ncl_loc_file = config['locations_file'] ncl_out_dir = shared.sub_date(config['ncl_out_dir'], init_time=init_time) ncl_out_type = config['ncl_out_type'] nest_id = '%02d' % dom ncl_opt_template = config['ncl_opt_template'] ncl_opt_file = config['ncl_opt_file'] extract_hgts = config['extract_hgts'] logger.info('*** RUNNING NCL SCRIPTS FOR DOMAIN d%02d***' % dom) if not os.path.exists(ncl_out_dir): os.makedirs(ncl_out_dir) if not ncl_in_file.endswith('.nc'): ncl_in_file = ncl_in_file + '.nc' ncl_hgts = '(/%s/)' % ','.join(map(str, extract_hgts)) replacements = { '<ncl_in_file>': ncl_in_file, '<ncl_out_dir>': ncl_out_dir, '<ncl_out_type>': ncl_out_type, '<ncl_loc_file>': ncl_loc_file, '<extract_heights>': ncl_hgts } fill_template(ncl_opt_template, ncl_opt_file, replacements) logger.debug('ncl_opt_template: %s' % ncl_opt_template) logger.debug(' ncl_in_file ----> %s' % ncl_in_file) logger.debug(' ncl_out_dir ----> %s' % ncl_out_dir) logger.debug(' ncl_out_type ----> %s' % ncl_out_type) logger.debug(' ncl_loc_file ----> %s' % ncl_loc_file) logger.debug('ncl_opt_file: %s' % ncl_opt_file) for script in ncl_code: # # mem_total forces the use postprocessing node # #cmd = "ncl %s >> %s 2>&1" % (script, ncl_log) #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd logger.debug(script) queue = config['queue'] cmd = "ncl %s" % script #if queue['ncl']: # works on Schumi #cmd = """ncl ncl_in_file="%s" ncl_out_dir="%s" ncl_out_type="%s" ncl_loc_file="%s" %s""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script) # works on maestro #cmd = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s """ % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script) #else: #cmd = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script, working_dir) env_vars = {'NCL_OPT_FILE': ncl_opt_file} ret = shared.run(cmd, config, env_vars=env_vars)
def produce_ncl_plots(config): """ Calls a series of ncl scripts to produce visualisations. Need to think about how to define a flexible visualisation framework Currently communication with NCL is via environment variables Perhaps in future we should move to PyNGL for easier (direct) integration as then we could simply pass in the config dictionary, use the same logging framework, and make use of a vtable like mapping to forecast vars. However, for the time being we design each ncl script to expect certain environment variables. Then, the list of ncl scripts to run can simply be specified somewhere in the config file e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc. Updated: Some plots are much easier to produce using the original wrfout netcdf files, rather than use the UPP post-processed grib files. Howeverm in future we should stick with one or the other. Arguments: config -- dictionary containing various configuration options """ logger = shared.get_logger() domain = config['domain'] model_run = config['model_run'] working_dir = config['working_dir'] ncl_code_dir = config['ncl_code_dir'] ncl_files = config['ncl_code'] #ncl_code = ['%s/%s' % (ncl_code_dir, f) for f in ncl_files] ncl_code = ncl_files ncl_log = config['ncl_log'] wrfout_dir = config['wrfout_dir'] init_time = config['init_time'] dom = config['dom'] fcst_file = '%s/wrfout_d%02d_%s:00:00.nc' % ( wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H")) ncl_in_file = fcst_file ncl_loc_file = config['locations_file'] ncl_out_dir = shared.sub_date(config['ncl_out_dir'], init_time=init_time) ncl_out_type = config['ncl_out_type'] nest_id = '%02d' % dom logger.info('*** RUNNING NCL SCRIPTS FOR DOMAIN d%02d***' % dom) if not os.path.exists(ncl_out_dir): os.makedirs(ncl_out_dir) # # Communicate to NCL via environment variables # NCL expects the following to be set #File = getenv("FCST_FILE") #type = getenv("NCL_OUT_TYPE") #diro = getenv("NCL_OUT_DIR") #;web_dir = getenv("WEB_DIR") #domain = getenv("NEST_ID") #run_hour = getenv("RUN_HOUR") # # Try escaping : in fcst_file # #fcst_file = fcst_file.replace(':', r'\:') #os.environ['FCST_FILE'] = fcst_file #os.environ['LOCATIONS_FILE'] = loc_file #os.environ['NCL_OUT_DIR'] = ncl_out_dir #os.environ['NCL_OUT_TYPE'] = ncl_out_type #os.environ['NEST_ID'] = nest_id #os.environ['DOMAIN'] = domain #os.environ['MODEL_RUN'] = model_run logger.debug('ncl_in_file ----> %s' % ncl_in_file) logger.debug('ncl_out_dir ----> %s' % ncl_out_dir) logger.debug('ncl_out_type ----> %s' % ncl_out_type) logger.debug('ncl_loc_file ----> %s' % ncl_loc_file) if not ncl_in_file.endswith('.nc'): ncl_in_file = ncl_in_file + '.nc' for script in ncl_code: # # mem_total forces the use postprocessing node # #cmd = "ncl %s >> %s 2>&1" % (script, ncl_log) #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd logger.debug(script) queue = config['queue'] if queue['ncl']: cmd = """ncl ncl_in_file="%s" ncl_out_dir="%s" ncl_out_type="%s" ncl_loc_file="%s" %s""" % ( ncl_in_file, ncl_out_dir, ncl_out_type, ncl_loc_file, script) else: cmd = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % ( ncl_in_file, ncl_out_dir, ncl_out_type, ncl_loc_file, script, working_dir) ret = shared.run(cmd, config)
def ungrib_sst(config): """ Runs ungrib.exe for SST fields, makes and modifies a copy of namelist.wps, then restores the original namelist.wps""" logger = shared.get_logger() wps_dir = config['wps_dir'] wps_run_dir = config['wps_run_dir'] tmp_dir = config['tmp_dir'] working_dir = config['working_dir'] init_time = config['init_time'] max_dom = config['max_dom'] sst_local_dir = config['sst_local_dir'] sst_time = shared.get_sst_time(config) sst_filename = shared.get_sst_filename(config) vtable_sst = config['sst_vtable'] vtable = wps_run_dir+'/Vtable' queue = config['queue'] log_file = '%s/ungrib.sst.log' % wps_run_dir namelist_wps = config['namelist_wps'] namelist_sst = '%s/namelist.sst' % working_dir namelist = shared.read_namelist(namelist_wps) # # update one line to point to the new SST field # ungrib.exe will name SST field as e.g. # SST:2013-04-24_00 # constants_name = '%s/SST:%s' %(wps_run_dir, sst_time.strftime('%Y-%m-%d_%H')) logger.debug('Updating constants_name ----> %s' % constants_name) namelist.update('constants_name', constants_name, section='metgrid') # Write the changes into the original namelist.to_file(namelist_wps) # # Update start and end time to process SST # start_str = sst_time.strftime("%Y-%m-%d_%H:%M:%S") end_str = sst_time.strftime("%Y-%m-%d_%H:%M:%S") logger.debug("Updating namelist.sst") logger.debug('PREFIX ------> SST') logger.debug('start_date---> ' +start_str) logger.debug('end_date-----> '+ end_str) namelist.update('prefix', 'SST') namelist.update('start_date', [start_str]*max_dom) namelist.update('end_date', [end_str]*max_dom) logger.debug('writing modified namelist.sst to file -------> %s' % namelist_sst) namelist.to_file(namelist_sst) #remove any linked namelist.wps logger.debug('removing namelist.wps') namelist_run = '%s/namelist.wps' % wps_run_dir if os.path.exists(namelist_run): os.remove(namelist_run) # link namelist.sst to namelist.wps in WPS run dir logger.debug('linking namelist.sst -----> namelist.wps') cmd = 'ln -sf %s %s' %(namelist_sst, namelist_run) shared.run_cmd(cmd, config) logger.debug('removing Vtable') if os.path.exists(vtable): os.remove(vtable) logger.debug('linking Vtable.SST ----> Vtable') cmd = 'ln -sf %s %s' %(vtable_sst, vtable) shared.run_cmd(cmd, config) # run link_grib to link SST gribs files logger.debug('Linking SST GRIB files') cmd = '%s/link_grib.csh %s/%s' %(wps_dir, sst_local_dir, sst_filename) shared.run_cmd(cmd, config) logger.info('*** RUNNING UNGRIB FOR SST ***') cmd = '%s/ungrib.exe' % wps_run_dir shared.run(cmd, config, wps_run_dir) cmd = 'grep "Successful completion" ./ungrib.log*' # check for success ret = shared.run_cmd(cmd, config) if ret!=0: raise IOError('Ungrib failed for SST') logger.info('*** SUCCESS UNGRIB SST ***') logger.debug('Removing namelist.wps') if os.path.exists(namelist_run): os.remove(namelist_run) # link in original (unmodified) namelist.wps cmd = 'ln -sf %s %s' %(namelist_wps, namelist_run) shared.run_cmd(cmd, config)
def produce_ncl_plots(config): """ Calls a series of ncl scripts to produce visualisations. Need to think about how to define a flexible visualisation framework Currently communication with NCL is via environment variables Perhaps in future we should move to PyNGL for easier (direct) integration as then we could simply pass in the config dictionary, use the same logging framework, and make use of a vtable like mapping to forecast vars. However, for the time being we design each ncl script to expect certain environment variables. Then, the list of ncl scripts to run can simply be specified somewhere in the config file e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc. Arguments: config -- dictionary containing various configuration options """ logger = shared.get_logger() domain = config['domain'] model_run = config['model_run'] working_dir = config['working_dir'] ncl_code_dir = config['ncl_code_dir'] ncl_files = config['ncl_code'] ncl_code = ncl_files ncl_log = config['ncl_log'] wrfout_dir = config['wrfout_dir'] init_time = config['init_time'] dom = config['dom'] fcst_file = '%s/wrfout_d%02d_%s:00:00.nc' %(wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H")) ncl_in_file = fcst_file ncl_loc_file = config['locations_file'] ncl_out_dir = shared.sub_date(config['ncl_out_dir'], init_time=init_time) ncl_out_type = config['ncl_out_type'] nest_id = '%02d' % dom ncl_opt_template = config['ncl_opt_template'] ncl_opt_file = config['ncl_opt_file'] extract_hgts = config['extract_hgts'] logger.info('*** RUNNING NCL SCRIPTS FOR DOMAIN d%02d***' % dom) if not os.path.exists(ncl_out_dir): os.makedirs(ncl_out_dir) if not ncl_in_file.endswith('.nc'): ncl_in_file = ncl_in_file + '.nc' ncl_hgts = '(/%s/)' % ','.join(map(str,extract_hgts)) replacements = {'<ncl_in_file>' : ncl_in_file, '<ncl_out_dir>' : ncl_out_dir, '<ncl_out_type>' : ncl_out_type, '<ncl_loc_file>' : ncl_loc_file, '<extract_heights>' : ncl_hgts} fill_template(ncl_opt_template, ncl_opt_file, replacements) logger.debug('ncl_opt_template: %s' % ncl_opt_template) logger.debug(' ncl_in_file ----> %s' % ncl_in_file) logger.debug(' ncl_out_dir ----> %s' % ncl_out_dir) logger.debug(' ncl_out_type ----> %s' % ncl_out_type) logger.debug(' ncl_loc_file ----> %s' % ncl_loc_file) logger.debug('ncl_opt_file: %s' % ncl_opt_file) for script in ncl_code: # # mem_total forces the use postprocessing node # #cmd = "ncl %s >> %s 2>&1" % (script, ncl_log) #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd logger.debug(script) queue = config['queue'] cmd = "ncl %s" % script #if queue['ncl']: # works on Schumi #cmd = """ncl ncl_in_file="%s" ncl_out_dir="%s" ncl_out_type="%s" ncl_loc_file="%s" %s""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script) # works on maestro #cmd = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s """ % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script) #else: #cmd = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script, working_dir) env_vars={'NCL_OPT_FILE' : ncl_opt_file} ret = shared.run(cmd, config,env_vars=env_vars )