def main(): ENV = os.environ gsi_domain = ENV['GSI_DOMAIN'].lower() if gsi_domain != 'd02' and gsi_domain != 'd03': fail('Aborting: gsi_domain="%s" must be "d02" or "d03"' % (gsi_domain, )) import hwrf_expt hwrf_expt.init_module() logger = hwrf_expt.conf.log('exhwrf_gsi') if not hwrf_expt.conf.getbool('config', 'run_gsi'): jlogger.info('GSI is disabled. This job need not be run.') sys.exit(0) if produtil.cluster.name() in ['gyre', 'tide']: hwrf_wcoss.set_vars_for_gsi(logger) else: logger.info('Not on WCOSS, so not setting WCOSS-specific vars.') if not hwrf.gsi.get_gsistatus(hwrf_expt.conf, 'gsi_' + gsi_domain, logger): jlogger.info('GSI is disabled for %s. This job need not be run.' % (gsi_domain, )) sys.exit(0) else: logger.info('GSI is enabled for %s.' % (gsi_domain, )) if gsi_domain == 'd02': hwrf_expt.gsi_d02.run() else: hwrf_expt.gsi_d03.run()
def main(): import hwrf_expt hwrf_expt.init_module(make_ensemble_da=True) conf = hwrf_expt.conf run_ensemble_da = conf.getbool('config', 'run_ensemble_da') ensda_flag_file = conf.getstr('tdrcheck', 'tdr_flag_file') run_ensda = read_ensda_flag_file(ensda_flag_file) if run_ensemble_da and run_ensda: ensda_size = conf.getint('hwrf_da_ens', 'ensda_size') else: jlogger.info('ENSDA was not run.') ensda_size = 0 logger = conf.log('output') bad = False for ens in xrange(ensda_size): imemb = ens + 1 omemb = hwrf_expt.ensda.member(hwrf_expt.conf.cycle, imemb) for prod in omemb.products(): if not prod.location: logger.warning('ensda %03d: No product: %s' % ( imemb, prod.did, )) bad = True elif not prod.available: logger.warning( 'ensda %03d: product %s not available (location %s)' % (imemb, repr(prod.did), repr(prod.location))) bad = True else: dest = '%s/%s.ensda_%03d.%s' % ( hwrf_expt.conf.getdir('com'), hwrf_expt.conf.getstr('config', 'out_prefix'), imemb, os.path.basename(prod.location)) if not os.path.exists(dest): logger.warning('ensda %03d: %s: does not exist' % ( imemb, dest, )) bad = True elif os.path.getsize(dest) < 1: logger.warning('ensda %03d: %s: is empty' % (imemb, dest)) bad = True else: logger.info('ensda %03d: %s exists and is non-empty.' % (imemb, dest)) if bad: logger.critical( 'HWRF data assimilation ensemble products are missing.') sys.exit(1) jlogger.info('Creating donefile.') donefile = os.path.join(conf.strinterp('config', '{com}/{stormlabel}.done')) with open(donefile, 'wt') as f: f.write('Cycle is complete.')
def doit(): produtil.setup.setup() if 'SCR_COMM_RANK' not in os.environ \ and os.environ.get('LAUNCH_SELF','yes')=='yes': # This is the top level of the job: we are NOT inside an # mpi_serial call. # Initialize the hwrf_expt and re-call any callbacks for completed products: hwrf_expt.init_module() logger = logging.getLogger('exhwrf_products') hwrf_wcoss.set_vars_for_products(logger) logger.info('Ensure incomplete products are marked as such...') hwrf_expt.gribber.uncomplete() logger.info('Add alerts and delveries...') hwrf_alerts.add_nhc_alerts() hwrf_alerts.add_regrib_alerts() hwrf_alerts.add_wave_alerts() logger.warning('''Rerunning dbn_alert for prior jobs' posted files.''') hwrf_expt.gribber.call_completed_callbacks() # We're in the top-level job. Launch copies of ourself to run the # gribber and tracker: logger.warning('---------------------------------------------------') logger.warning('LAUNCH PARALLEL PORTION OF SCRIPT------------------') logger.warning('---------------------------------------------------') launchself() logger.warning('---------------------------------------------------') logger.warning('PARALLEL PORTION OF SCRIPT HAS ENDED---------------') logger.warning('---------------------------------------------------') # Gribber and tracker succeeded. Run the products job: products() else: # We're in a subprocess. Just run the gribber and tracker and return: slave_main()
def main(): import hwrf_expt hwrf_expt.init_module() conf = hwrf_expt.conf logger = conf.log('exhwrf_ocean_init') hwrf.mpipomtc.unset_ocstatus(conf, logger) try: if not conf.getbool('config', 'run_ocean'): jlogger.info('Ocean is disabled. This job need not be run.') hwrf.mpipomtc.set_ocstatus(conf, False, logger) return hwrf_expt.pominit.run() hwrf.mpipomtc.set_ocstatus(conf, True, logger) except pom.exceptions.POMUnsupportedBasin as e: produtil.log.postmsg('Unsupported basin: will run without ocean.') hwrf.mpipomtc.set_ocstatus(conf, False, logger) return except Exception as e: if conf.getbool('config', 'allow_fallbacks', False): logger.error('Could not run ocean init: will run without ocean.' ' Unhandled exception: ' + str(e), exc_info=True) hwrf.mpipomtc.set_ocstatus(conf, False, logger) return raise
def main(): ENV=os.environ init_model=ENV.get('INIT_MODEL','GDAS1').lower() if init_model!='gfs' and init_model!='gdas1': fail('Aborting: init_model="%s" must be "gfs" or "gdas1"' %(init_model,)) import hwrf_expt hwrf_expt.init_module() conf=hwrf_expt.conf logger=conf.log('exhwrf_merge') if init_model=='gfs': jlogger.info('MERGE does not need to be run for INIT_MODEL=GFS') hwrf_expt.gfs_merge.run() elif not hwrf_expt.conf.getbool('config','run_gsi'): jlogger.info('GSI is disabled via configuration settings. ' 'This job need not be run.') sys.exit(0) elif not hwrf.gsi.get_gsistatus(conf,'gsi_d02',logger) and \ not hwrf.gsi.get_gsistatus(conf,'gsi_d03',logger): jlogger.info('GSI status file claims GSI is disabled for both ' 'domains. This job need not be run.') sys.exit(0) else: hwrf_expt.gdas_merge.run()
def main(): import hwrf_expt hwrf_expt.init_module(make_ensemble_da=True) conf=hwrf_expt.conf run_ensemble_da=conf.getbool('config','run_ensemble_da') if not run_ensemble_da: jlogger.info('ENSDA is disabled for this configuration. ' 'This job need not be run.') hwrf_expt.ensda_pre.write_flag_file(False) return hwrf_expt.ensda_pre.run()
def post(): produtil.setup.setup() jlogger.info('starting post') import hwrf_expt hwrf_expt.init_module() run_copier = hwrf_expt.conf.getbool('config', 'post_runs_wrfcopier', False) run_satpost = hwrf_expt.conf.getbool('config', 'run_satpost', True) # Make sure we check all tasks to see if they're posted: hwrf_expt.nonsatpost.state = UNSTARTED hwrf_expt.satpost.state = UNSTARTED if run_copier: hwrf_expt.wrfcopier.state = UNSTARTED logger = logging.getLogger('exhwrf_post') # Change to a temp directory to run the post: with NamedDir(hwrf_expt.WORKhwrf, logger=logger) as t: #hwrf_expt.ds.dump() # dump entire database state to stdout alldone = False while not alldone: before = int(time.time()) if run_copier: if not done(hwrf_expt.wrfcopier): hwrf_expt.wrfcopier.runpart() if not done(hwrf_expt.nonsatpost): hwrf_expt.nonsatpost.runpart() if not done(hwrf_expt.nonsatpost): hwrf_expt.nonsatpost.runpart() if run_satpost: if not done(hwrf_expt.satpost): hwrf_expt.satpost.runpart() if not done(hwrf_expt.nonsatpost): hwrf_expt.nonsatpost.runpart() alldone = ( done(hwrf_expt.satpost) or not run_satpost ) \ and done(hwrf_expt.nonsatpost) \ and ( not run_copier or done(hwrf_expt.wrfcopier) ) after = int(time.time()) took = after - before threshold = 5 sleeptime = 20 if took < threshold: logger.info( 'Post loop iteration took only %d seconds, which is ' 'less than the threshold of %d seconds. Will sleep ' '%d seconds.' % (took, threshold, sleeptime)) time.sleep(sleeptime) else: logger.info('Post loop iteration took %d seconds, ' 'which is above the threshold of %d. ' 'Sleeping only one second.' % (took, threshold)) time.sleep(1) # avoid thrash loop in case of logic error logger.info('Done sleeping.') jlogger.info('completed post')
def main(): produtil.setup.setup() hwrf_expt.init_module() conf = hwrf_expt.conf logger = conf.log("check_init") ocean_flag = conf.getbool('config', 'run_ocean') gsi_flag = conf.getbool('config', 'run_gsi') reloc_flag = conf.getbool('config', 'run_relocation') okay = True if ocean_flag: if check_ocean_init(logger): logger.info('Ocean init succeeded.') elif conf.syndat.basin1 in hwrf_expt.non_ocean_basins: logger.info('Ocean init aborted, but basin is not supported.') else: logger.error('Ocean init failed.') okay = False else: logger.info('Ocean is disabled. Skipping ocean checks.') if gsi_flag: if not check_gsi(logger): logger.error('GSI failed.') okay = False if not check_fgat_relocate(conf, logger): logger.error('FGAT relocate failed.') okay = False elif reloc_flag: logger.info('GSI is disabled. Skipping GSI and FGAT ' 'relocation checks.') if not check_gfs_relocate(conf, logger): logger.error('GFS relocate failed.') else: logger.info('Relocation and GSI are disabled. ' 'Skipping relocation checks.') logger.info('Asking the forecast object to check if all input files ' 'are available.') print type(hwrf_expt.runwrf).__name__ have_input = hwrf_expt.runwrf.check_all_inputs() if not have_input: okay = False logger.error('FAILURE: WRF or POM inputs are missing') if not have_input: logger.critical('FORECAST INPUTS ARE MISSING!!') sys.exit(1) elif not okay: logger.critical('INIT JOBS DID NOT SUCCEED!!') sys.exit(1)
def main(): produtil.setup.setup() hwrf_expt.init_module() jlogger = produtil.log.jlogger jlogger.info('unpost starting') with hwrf_expt.ds.transaction(): hwrf_expt.runwrf.state = UNSTARTED hwrf_expt.runwrf.unrun() hwrf_expt.wrfcopier.unrun() hwrf_expt.satpost.unrun() hwrf_expt.nonsatpost.unrun() hwrf_expt.gribber.unrun() jlogger.info('unpost completed')
def main(): import hwrf_expt hwrf_expt.init_module() conf = hwrf_expt.conf logger = conf.log('exhwrf_bufrprep') unset_gsistatus(conf, logger) if hwrf_expt.conf.getbool('config', 'run_gsi'): hwrf_expt.bufrprep.run() else: jlogger.info('GSI is disabled. This job need not be run.') set_gsistatus(conf, logger)
def main(): ENV = os.environ init_model = ENV['INIT_MODEL'].lower() init_fhr = int(ENV.get('INIT_FHR', '0')) init_parts = ENV['INIT_PARTS'].lower() if init_model != 'gfs' and init_model != 'gdas1': fail('Aborting: init_model="%s" must be "gfs" or "gdas1"' % (init_model, )) if init_model == 'gdas1' and init_fhr < 1: fail( 'Aborting: when init_model=gdas1, init_fhr must be > 1 (init_fhr=%d)' % (init_fhr, )) if init_model == 'gfs': init_fhr = 0 import hwrf_expt hwrf_expt.init_module() os.chdir(hwrf_expt.conf.getdir('WORKhwrf')) if init_model == 'gfs': init = hwrf_expt.gfs_init elif not hwrf_expt.conf.getbool('config', 'run_gsi'): jlogger.info('GSI is disabled. This job need not be run.') sys.exit(0) else: init = None logger = hwrf_expt.fgat_init.log() logger.info('search for fgat hour %d' % (init_fhr, )) for fhr, init in hwrf_expt.fgat_init.fhr_and_init(): if abs(fhr - init_fhr) < 0.01: logger.info('fhr %d is init_fhr %d' % (fhr, init_fhr)) #init.run() break else: logger.info('fhr %d is not init_fhr %d' % (fhr, init_fhr)) assert (init is not None) if init_parts == 'parent': init.run_through_anl() elif init_parts == '3dvar': init.run_through_anl() init.run_init_after_anl() elif init_parts == 'bdy': init.run_real_bdy() elif init_parts == 'all': init.run_through_anl() init.run_init_after_anl() init.run_real_bdy() else: fail( 'Aborting: invalid value of INIT_PARTS: "%s" (must be "parent," "3dvar" or "bdy")' % (init_parts, ))
def slave_main(): """This is run multiple times in parallel, once in each subprocess.""" rank = int(os.environ['SCR_COMM_RANK']) count = int(os.environ['SCR_COMM_SIZE']) print 'MPI communicator: rank=%d size=%d' % (rank, count) hwrf_expt.init_module() hwrf_alerts.add_regrib_alerts() hwrf_alerts.add_tracker_alerts() subdict = { 'RANK': rank, 'COUNT': count, 'WHO': 'regribber', 'jobid': produtil.batchsystem.jobid(), 'WORKhwrf': hwrf_expt.conf.getdir('WORKhwrf') } whoami = starter(dryrun=True) subdict['THREAD_WHOAMI'] = whoami if whoami.find('tracker') >= 0: # Redirect stdout and stderr to one stream for tracker job: if 'TRACKER_LOGS' in os.environ: r = os.environ.get('TRACKER_LOGS') else: r = hwrf_expt.conf.strinterp( 'config', '%(WORKhwrf)s/%(jobid)s-%(THREAD_WHOAMI)s.log') rstdout = r % dict(subdict, WHO='tracker', STREAM='out') rstderr = r % dict(subdict, WHO='tracker', STREAM='err') produtil.log.mpi_redirect(stdoutfile=rstdout, stderrfile=None, threadname='tracker') else: # Regribber and copier have one file per stream (out, err). if 'REGRIBBER_LOGS' in os.environ: r = os.environ['REGRIBBER_LOGS'] else: r = hwrf_expt.conf.strinterp( 'config', '%(WORKhwrf)s/%(jobid)s-%(THREAD_WHOAMI)s.%(STREAM)s', threadwhoami=whoami) rstdout = r % dict(subdict, WHO='regribber', STREAM='out') rstderr = r % dict(subdict, WHO='regribber', STREAM='err') logging.getLogger('hwrf').warning( 'Redirecting regribber %d to: stderr=%s stdout=%s' % (rank, rstderr, rstdout)) produtil.log.mpi_redirect(stdoutfile=rstdout, stderrfile=rstderr, threadname='regrib%d' % (rank, )) whoami = starter(dryrun=False)
def doit(): produtil.setup.setup() import hwrf_expt hwrf_expt.init_module() conf = hwrf_expt.conf logger = hwrf_expt.conf.log('exhwrf_forecast') ocean_flag = conf.getbool('config', 'run_ocean') fallbacks_flag = conf.getbool('config', 'allow_fallbacks') wrf_ranks = conf.getint('runwrf', 'wrf_ranks') if ocean_flag: ocean_success = hwrf.mpipomtc.get_ocstatus(conf, logger) if not hwrf_expt.pominit.is_completed(): logger.warning('The pom_init completion flag is off. ' 'Ocean init failed.') ocean_success = False if not ocean_success: basin1 = conf.syndat.basin1 if basin1 in hwrf_expt.non_ocean_basins: produtil.log.postmsg( 'Cannot run ocean in this basin- run uncoupled.') set_vars(False, logger, wrf_ranks) hwrf_expt.runwrf.run(coupled=False) return elif fallbacks_flag: logger.critical( 'CRITICAL FAILURE: HWRF ocean init failed, but ' 'fallbacks are enabled. Running uncoupled.') set_vars(False, logger, wrf_ranks) hwrf_expt.runwrf.run(coupled=False) return else: logger.critical( 'CRITICAL FAILURE: HWRF ocean init failed, and ' 'fallbacks are disabled. Aborting.') sys.exit(1) else: produtil.log.postmsg('Ocean init succeeded. Running coupled.') set_vars(True, logger, wrf_ranks) else: produtil.log.postmsg('Ocean is disabled. Running uncoupled.') set_vars(False, logger, wrf_ranks) hwrf_expt.runwrf.run() produtil.log.postmsg('Forecast complete.')
def main(): hwrf_expt.init_module() conf = hwrf_expt.conf if not conf.has_option('archive', 'wrfout'): jlogger.info( 'No wrfout option in [archive] section. Will not make wrfout archive.' ) sys.exit(0) logger = conf.log() files = list() dt = hwrf.numerics.to_timedelta('6:00:00') t0 = conf.cycle wrf = hwrf_expt.runwrf.wrf() with produtil.cd.NamedDir(hwrf_expt.runwrf.location): for i in xrange(22): for dom in wrf: t = t0 + dt * i out = dom.get_output('auxhist3', t) if out is None: out = dom.get_output('history', t) if out is None: out = dom.get_output('auxhist2', t) if out is None: logger.error('%s: could not determine wrfout for ' 'domain %s' % (t.strftime('%Y%m%d%H'), str(dom))) if not os.path.exists(out.path()): logger.error('%s: does not exist' % (out.path(), )) if not produtil.fileop.isnonempty(out.path(), ): logger.error('%s: is empty' % (out.path(), )) files.append(out.path()) thearchive = conf.timestrinterp('archive', '{wrfout}', 0) if thearchive[0:5] != 'hpss:': logger.error('The wrfout archive path must begin with "hpss:": ' + thearchive) sys.exit(1) thearchive = thearchive[5:] adir = os.path.dirname(thearchive) mkdir = exe(conf.getexe('hsi'))['-P', 'mkdir', '-p', adir] run(mkdir, logger=logger) cmd = exe(conf.getexe('htar'))['-cpf', thearchive][files] checkrun(cmd, logger=logger)
def main(): import hwrf_expt hwrf_expt.init_module(make_ensemble_da=True) conf = hwrf_expt.conf cycle = hwrf_expt.conf.cycle input_catalog = conf.get('config', 'input_catalog') input_sources = conf.get('config', 'input_sources') logger = conf.log('exhwrf_input') WORKhwrf = conf.getdir('WORKhwrf') if input_catalog != 'hwrfdata': jlogger.info("Input catalog is %s, not \"hwrfdata\" so data should " "be staged on disk already. I have nothing to do, so " "I'll just exit. This is not an error.") sys.exit(0) # Make sure we're in the cycle's work directory, otherwise we might # pull archives and other big things to $HOME. produtil.fileop.chdir(WORKhwrf, logger=logger) # Figure out how to run htar: htar = exe(conf.getexe('htar')) # Figure out how to run hsi: hsi = exe(conf.getexe('hsi')) # Get the list of data to pull: data = list(d for d in hwrf_expt.inputiter()) # Decide where to put the data: cat = hwrf.input.DataCatalog(conf, "hwrfdata", cycle) # Now pull the data: getem = hwrf.input.InputSource(conf, input_sources, conf.cycle, htar=htar, hsi=hsi, logger=logger) bad = not getem.get(data, cat) if bad: jlogger.error('Missing data in exhwrf_input. Workflow may fail.') sys.exit(1)
def main(): hwrf_expt.init_module() logger = hwrf_expt.conf.log('exhwrf_gsi_post') if not hwrf_expt.conf.getbool('config', 'run_gsi'): jlogger.info('GSI is disabled. This job need not be run.') sys.exit(0) produtil.fileop.chdir(hwrf_expt.conf.getdir('WORKhwrf'), logger=logger) logger.info('Unrun GSI post and gribber') hwrf_expt.gsipost.unrun() hwrf_expt.gsigribber.unrun() logger.info('Run GSI post') hwrf_expt.gsipost.run() logger.info('Run GSI gribber, and deliver to com.') hwrf_expt.gsigribber.run(now=True)
def main(): logger = logging.getLogger('exhwrf_ensda') ENV = os.environ memb = ENV.get('ENSDA_MEMB', 'NOPE').lower() if memb == 'nope': fail('Aborting: you must specify ENSDA_MEMB') imemb = int(memb, 10) jlogger.info('HWRF ensda member %03d starting' % imemb) set_vars(logger) import hwrf_expt hwrf_expt.init_module(make_ensemble_da=True) omemb = hwrf_expt.ensda.member(hwrf_expt.conf.cycle, imemb) omemb.run() for prod in omemb.products(): if not prod.location: logger.error('No product: %s' % (prod.did, )) elif not prod.available: logger.error('Product %s not available (location %s)' % (repr(prod.did), repr(prod.location))) else: dest = '%s/%s.ensda_%03d.%s' % (hwrf_expt.conf.getdir('com'), hwrf_expt.conf.getstr( 'config', 'out_prefix'), imemb, os.path.basename(prod.location)) logger.info('%s %s: send to %s' % (str(prod.did), repr(imemb), str(dest))) assert (os.path.isabs(dest)) copier = hwrf_expt.wrfcopier.compression_copier(prod.location) if copier is None: logger.error('%s %s: not a NetCDF 3 file.' % (str(prod.did), str(prod.location))) sys.exit(1) produtil.fileop.deliver_file(prod.location, dest, logger=logger, copier=copier) jlogger.info('HWRF ensda member %03d has completed' % imemb)
def main(): ENV = os.environ init_model = ENV['INIT_MODEL'].lower() init_fhr = int(ENV.get('INIT_FHR', '0')) if init_model != 'gfs' and init_model != 'gdas1': fail('Aborting: init_model="%s" must be "gfs" or "gdas1"' % (init_model, )) if init_model == 'gdas1' and init_fhr < 1: fail( 'Aborting: when init_model=gdas1, init_fhr must be >= 1 (init_fhr=%d)' % (init_fhr, )) if init_model == 'gfs': init_fhr = 0 import hwrf_expt hwrf_expt.init_module() if init_model == 'gfs': jlogger.info('HWRF relocation for GFS fhr starting') init = hwrf_expt.gfs_init.run_relocate() jlogger.info('HWRF relocation for GFS fhr completed') elif not hwrf_expt.conf.getbool('config', 'run_gsi'): jlogger.info('GSI is disabled. This job need not be run.') sys.exit(0) else: init = None logger = hwrf_expt.fgat_init.log() logger.info('search for fgat hour %d' % (init_fhr, )) for fhr, init in hwrf_expt.fgat_init.fhr_and_init(): if abs(fhr - init_fhr) < 0.01: logger.info('fhr %d is init_fhr %d' % (fhr, init_fhr)) jlogger.info('HWRF relocation for GDAS1 fhr %d starting' % fhr) init.run_relocate() jlogger.info('HWRF relocation for GDAS1 fhr %d completed' % fhr) break else: logger.info('fhr %d is not init_fhr %d' % (fhr, init_fhr)) assert (init is not None)
cmd = exe(conf.getexe('htar'))[flags + 'f', path, '-L', '-'] << files elif archive[0:5] == 'hpsz:': topath = archive[5:] frompath = conf.strinterp('config', '{WORKhwrf}/stage-archive.tar.gz') cmd = exe(conf.getexe('hsi'))['put', frompath, ':', topath] checkrun(cmd, logger=logger) postmsg('hwrf_archive tape step completed') if __name__ == '__main__': try: acase = os.environ.get('ARCHIVE_STEP', 'BOTH').upper() produtil.setup.setup() hwrf_expt.init_module() if acase == 'DISK': main_disk() elif acase == 'TAPE': main_tape() elif acase == 'BOTH': main_disk() main_tape() else: postmsg('INVALID JHWRF_ARCHIVE STEP %s!! ABORTING!' % (repr(acase), )) except Exception as e: jlogger.critical('hwrf_archive is aborting: ' + str(e), exc_info=True) sys.exit(2)
def main(): import hwrf_expt hwrf_expt.init_module(make_ensemble_da=True) # Make sure DBN alerts and other such things are triggered: hwrf_alerts.add_nhc_alerts() hwrf_alerts.add_regrib_alerts() hwrf_alerts.add_wave_alerts() global copier copier = hwrf_expt.wrfcopier.compression_copier if 'NO' == os.environ.get('PARAFLAG', 'YES'): jlogger.info( 'Calling email_afos_to_sdm from output job to email the track.') afos = hwrf_expt.nhcp.product('afos') hwrf_alerts.email_afos_to_sdm(afos) jlogger.info( 'Done with email_afos_to_sdm. Will now celebrate by delivering many things to COM.' ) conf = hwrf_expt.conf relocation = conf.getbool('config', 'run_relocation', True) coupled = conf.getbool('config', 'run_ocean', True) GSI = conf.getbool('config', 'run_gsi') run_ensemble_da = conf.getbool('config', 'run_ensemble_da', False) extra_trackers = conf.getbool('config', 'extra_trackers', False) fcstlen = conf.getint('config', 'forecast_length', 126) logger = conf.log('output') if coupled and not hwrf.mpipomtc.get_ocstatus(conf, logger): coupled = False hwrf_expt.wrfcopier.run() D = Deliverer(logger, conf) D['wrfdir'] = hwrf_expt.runwrf.workdir D.deliver_file('{WORKhwrf}/tmpvit', '{out_prefix}.storm_vit') if GSI: D['gsi_d02'] = hwrf_expt.gsi_d02.outdir if hwrf_expt.gsid03_flag: D['gsi_d03'] = hwrf_expt.gsi_d03.outdir logger.info('WRF run directory is %s' % (repr(D['wrfdir']), )) D.deliver_file('{WORKhwrf}/jlogfile', optional=True) d01 = hwrf_expt.moad d02 = hwrf_expt.storm1outer d03 = hwrf_expt.storm1inner if coupled: D.deliver_file('{wrfdir}/MDstatus', optional=True) for ocrest in ('el_initial.nc', 'grid.nc', 'ts_clim.nc', 'ts_initial.nc', 'uv_initial.nc'): D.deliver_file('{wrfdir}/{vit[stormname]}.{ocrest}', '{out_prefix}.pom.{ocrest}', ocrest=ocrest) for iday in xrange(int(math.floor(fcstlen / 24.0 + 0.01))): ocrest = "%04d.nc" % iday D.deliver_file('{wrfdir}/{vit[stormname]}.{ocrest}', '{out_prefix}.pom.{ocrest}', ocrest=ocrest) logcount = 0 for ext in ('log', 'out', 'err'): globme = conf.getdir('WORKhwrf') + '/*.' + ext logger.info('Globbing for %s log files' % (globme, )) for log in glob.glob(globme): logcount += 1 D.deliver_file(log) logger.info('Found %d log file(s)' % (logcount, )) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Deliver GSI stuff next. # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # gsi_flag = conf.getbool('config', 'run_gsi') gsiop = True if gsi_flag: gsiop = ((not hwrf_expt.gsi_d02.completed) or (not hwrf_expt.gsi_d03.completed)) if gsiop: logger.warning('GSI failed, so all GSI products are optional.') else: logger.info('GSI ran, so its products are mandatory.') if GSI: # Copy the original wrfinput file before DA: org_d01 = hwrf_expt.gfs_init.realinit.wrfinput_at_time( hwrf_expt.cycle, d01) D.deliver_file(org_d01, '{out_prefix}.wrforg_d01', optional=gsiop) if GSI: # Get the FGAT initialization at the analysis time: ceninit = hwrf_expt.fgat_init.init_at_time(hwrf_expt.conf.cycle) # Copy the original wrfanl files before relocation: org_d02 = ceninit.runwrfanl.wrfanl_at_time(hwrf_expt.conf.cycle, d02) org_d03 = ceninit.runwrfanl.wrfanl_at_time(hwrf_expt.conf.cycle, d03) D.deliver_file(org_d02, '{out_prefix}.wrforg_d02', optional=gsiop) D.deliver_file(org_d03, '{out_prefix}.wrforg_d03', optional=gsiop) if relocation: # Copy the wrfanl files after relocation, but before GSI: ges_d02 = ceninit.rstage3.wrfanl_at_time(hwrf_expt.conf.cycle, d02) ges_d03 = ceninit.rstage3.wrfanl_at_time(hwrf_expt.conf.cycle, d03) D.deliver_file(ges_d02, '{out_prefix}.wrfges_d02', optional=gsiop) D.deliver_file(ges_d03, '{out_prefix}.wrfges_d03', optional=gsiop) # for domain in hwrf_expt.gfs_init.runwrfanl.sim: # if not domain.is_moad(): # org_prod=hwrf_expt.gfs_init.runwrfanl.wrfanl_at_time( # hwrf_expt.cycle,domain) # D.deliver_file(org_prod,'{out_prefix}.wrfanl_d{gid:02d}_org', # gid=int(domain.get_grid_id())) if GSI: D.deliver_file('{gsi_d02}/satbias_out', '{out_prefix}.gsi_cvs2.biascr', optional=gsiop) if hwrf_expt.gsid03_flag: D.deliver_file('{gsi_d03}/satbias_out', '{out_prefix}.gsi_cvs3.biascr', optional=gsiop) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Lastly, deliver the diag files # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # jlogger.info('Delivering wrfdiag files to com.') hwrf_expt.nhcp.deliver_wrfdiag() if D.failures > 0: jlogger.critical( 'HWRF: unable to deliver %d non-optional products to com.' % int(D.failures)) sys.exit(1) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Deliver things to noscrub for non-NCO runs # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # if conf.getbool('config', 'PARAFLAG'): logger.info('You are not NCO, so I will deliver files to noscrub.') else: logger.info('You are NCO so I will skip NOSCRUB deliveries.') D.reset() def fromcom(workpath, compath, optional=False): D.deliver_file(workpath, compath, from_com=True, optional=optional) def havedir(sdir): there = conf.get('dir', sdir, 'NOPE') if there == 'NOPE': return False produtil.fileop.makedirs(there) return True if havedir('outatcf'): fromcom('{outatcf}', '{out_prefix}.trak.hwrf.atcfunix') if havedir('outdiag'): fromcom('{outdiag}', '{out_prefix}.trak.hwrf.3hourly*') fromcom('{outdiag}', '{out_prefix}*resolution', True) fromcom('{outdiag}', '{out_prefix}*htcf*stats', True) fromcom('{outdiag}', '{out_prefix}*htcf', True) fromcom('{outdiag}', 'a*.dat') fromcom('{outdiag}', '{out_prefix}.stats.tpc', optional=True) if extra_trackers: fromcom('{outdiag}', '{com}/{out_prefix}.trak.hwrfd01.atcfunix') fromcom('{outdiag}', '{com}/{out_prefix}.trak.hwrfd02.atcfunix') if havedir('outships'): fromcom('{outships}', 'figures/*.txt', optional=True) if havedir('outstatus'): fromcom('{outstatus}', '{WORKhwrf}/submit.out', optional=True) timings = conf.strinterp('config', '{outstatus}/{out_prefix}.timings') inout = conf.strinterp('config', '{WORKhwrf}/hwrf_*.out') with open(timings, 'wt') as outf: for inoutfile in glob.glob(inout): if not os.path.exists(inoutfile): logger.warning('%s: file does not exist; skipping' % (inoutfile, )) with open(inoutfile, 'rt') as inf: for line in inf: if line.find('TIMING') >= 0: print >> outf, line.rstrip() if havedir('outatcfcorrected'): inatcf = conf.strinterp('config', '{com}/{out_prefix}.trak.hwrf.atcfunix') outatcf = conf.strinterp( 'config', '{outatcfcorrected}/{out_prefix}.trak.hwrf.atcfunix') hwrf.tracker.jtwc_rewrite(inatcf, outatcf, logger) #################################################################### # Create the "done file" if ensda is entirely disabled. This is # used by the workflow layer to know when the cycle is entirely # complete, and can be deleted. # NOTE FOR FUTURE DEVELOPMENT: When the graphics are added to the # workflow, we will need to move the creation of this "done file" # to a later step, after the graphics. The logical candidate # would be a new job whose purpose is to check the cycle's entire # workflow to make sure it is finished. make_done = True if run_ensemble_da: flag_file = conf.strinterp('tdrcheck', '{tdr_flag_file}') try: ensda_flag = hwrf.ensda.read_ensda_flag_file(flag_file) except (EnvironmentError) as e: logger.error('%s: unable to get ensda_flag; assume False: %s' % (flag_file, str(e)), exc_info=True) ensda_flag = False if ensda_flag: jlogger.info( 'Not creating donefile: ensda_output will do it instead.') make_done = not ensda_flag else: jlogger.info('ensda disabled: make donefile now') if make_done: donefile = os.path.join( conf.strinterp('config', '{com}/{stormlabel}.done')) with open(donefile, 'wt') as f: f.write('Cycle is complete.')