def __init__(self, args): """ Initialize the job state from the arguments dictionary. :param args: the forecast job arguments """ super(JobState, self).__init__(args) self.grib_source = self.resolve_grib_source(self.grib_source) self.start_utc = round_time_to_hour( self.start_utc, up=False, period_hours=self.grib_source.period_hours) self.end_utc = round_time_to_hour( self.end_utc, up=True, period_hours=self.grib_source.period_hours) self.fc_hrs = compute_fc_hours(self.start_utc, self.end_utc) if 'job_id' in args: logging.info('job_id given in the job description.') self.job_id = args['job_id'] else: logging.warning('job_id not given, creating.') self.job_id = 'wfc-' + self.grid_code + '-' + utc_to_esmf( self.start_utc) + '-{0:02d}'.format(self.fc_hrs) self.emails = self.parse_emails(args) self.domains = args['domains'] self.ignitions = args.get('ignitions', None) self.fmda = self.parse_fmda(args) self.postproc = args['postproc'] self.wrfxpy_dir = args['sys_install_path'] self.args = args
def postprocess_cycle(cycle, region_cfg, wksp_path): """ Build rasters from the computed fuel moisture. :param cycle: the UTC cycle time :param region_cfg: the region configuration :param wksp_path: the workspace path :return: the postprocessing path """ data_path = compute_model_path(cycle, region_cfg.code, wksp_path) year_month = '%04d%02d' % (cycle.year, cycle.month) cycle_dir = 'fmda-%s-%04d%02d%02d-%02d' % ( region_cfg.code, cycle.year, cycle.month, cycle.day, cycle.hour) postproc_path = osp.join(wksp_path, year_month, cycle_dir) # open and read in the fuel moisture values d = netCDF4.Dataset(data_path) fmc_gc = d.variables['FMC_GC'][:, :, :] d.close() # read in the longitudes and latitudes geo_path = osp.join(wksp_path, '%s-geo.nc' % region_cfg.code) d = netCDF4.Dataset(geo_path) lats = d.variables['XLAT'][:, :] lons = d.variables['XLONG'][:, :] d.close() fm_wisdom = { 'native_unit': '-', 'colorbar': '-', 'colormap': 'jet_r', 'scale': [0.0, 0.4] } esmf_cycle = utc_to_esmf(cycle) mf = {"1": {esmf_cycle: {}}} manifest_name = 'fmda-%s-%04d%02d%02d-%02d.json' % ( region_cfg.code, cycle.year, cycle.month, cycle.day, cycle.hour) ensure_dir(osp.join(postproc_path, manifest_name)) for i, name in [(0, '1-hr'), (1, '10-hr'), (2, '100-hr')]: fm_wisdom['name'] = '%s fuel moisture' % name raster_png, coords, cb_png = scalar_field_to_raster( fmc_gc[:, :, i], lats, lons, fm_wisdom) raster_name = 'fmda-%s-raster.png' % name cb_name = 'fmda-%s-raster-cb.png' % name with open(osp.join(postproc_path, raster_name), 'w') as f: f.write(raster_png) with open(osp.join(postproc_path, cb_name), 'w') as f: f.write(cb_png) mf["1"][esmf_cycle][name] = { 'raster': raster_name, 'coords': coords, 'colorbar': cb_name } logging.info('writing manifest file %s' % osp.join(postproc_path, manifest_name)) json.dump(mf, open(osp.join(postproc_path, manifest_name), 'w')) return postproc_path
def postprocess_cycle(cycle, region_cfg, wksp_path): """ Build rasters from the computed fuel moisture. :param cycle: the UTC cycle time :param region_cfg: the region configuration :param wksp_path: the workspace path :return: the postprocessing path """ data_path = compute_model_path(cycle, region_cfg.code, wksp_path) year_month = '%04d%02d' % (cycle.year, cycle.month) cycle_dir = 'fmda-%s-%04d%02d%02d-%02d' % (region_cfg.code, cycle.year, cycle.month, cycle.day, cycle.hour) postproc_path = osp.join(wksp_path, year_month, cycle_dir) # open and read in the fuel moisture values d = netCDF4.Dataset(data_path) fmc_gc = d.variables['FMC_GC'][:,:,:] d.close() # read in the longitudes and latitudes geo_path = osp.join(wksp_path, '%s-geo.nc' % region_cfg.code) d = netCDF4.Dataset(geo_path) lats = d.variables['XLAT'][:,:] lons = d.variables['XLONG'][:,:] d.close() fm_wisdom = { 'native_unit' : '-', 'colorbar' : '-', 'colormap' : 'jet_r', 'scale' : [0.0, 0.4] } esmf_cycle = utc_to_esmf(cycle) mf = { "1" : {esmf_cycle : {}}} manifest_name = 'fmda-%s-%04d%02d%02d-%02d.json' % (region_cfg.code, cycle.year, cycle.month, cycle.day, cycle.hour) ensure_dir(osp.join(postproc_path, manifest_name)) for i,name in [(0, '1-hr'), (1, '10-hr'), (2, '100-hr')]: fm_wisdom['name'] = '%s fuel moisture' % name raster_png, coords, cb_png = scalar_field_to_raster(fmc_gc[:,:,i], lats, lons, fm_wisdom) raster_name = 'fmda-%s-raster.png' % name cb_name = 'fmda-%s-raster-cb.png' % name with open(osp.join(postproc_path, raster_name), 'w') as f: f.write(raster_png) with open(osp.join(postproc_path, cb_name), 'w') as f: f.write(cb_png) mf["1"][esmf_cycle][name] = { 'raster' : raster_name, 'coords' : coords, 'colorbar' : cb_name } logging.info('writing manifest file %s' % osp.join(postproc_path, manifest_name) ) json.dump(mf, open(osp.join(postproc_path, manifest_name), 'w')) return postproc_path
def __init__(self, args): """ Initialize the job state from the arguments dictionary. :param args: the forecast job arguments """ super(JobState, self).__init__(args) self.fc_hrs = compute_fc_hours(self.start_utc, self.end_utc) self.grib_source = self.resolve_grib_source(self.grib_source) self.job_id = 'wfc-' + self.grid_code + '-' + utc_to_esmf(self.start_utc) + '-{0:02d}'.format(self.fc_hrs) self.emails = self.parse_emails(args) self.domains = args['domains'] self.ignitions = args.get('ignitions', None) self.fmda = self.parse_fmda(args) self.postproc = args['postproc'] self.wrfxpy_dir = args['sys_install_path']
def __init__(self, args): """ Initialize the job state from the arguments dictionary. :param args: the forecast job arguments """ super(JobState, self).__init__(args) self.fc_hrs = compute_fc_hours(self.start_utc, self.end_utc) self.grib_source = self.resolve_grib_source(self.grib_source) self.job_id = 'wfc-' + self.grid_code + '-' + utc_to_esmf( self.start_utc) + '-{0:02d}'.format(self.fc_hrs) self.emails = self.parse_emails(args) self.domains = args['domains'] self.ignitions = args.get('ignitions', None) self.fmda = self.parse_fmda(args) self.postproc = args['postproc'] self.wrfxpy_dir = args['sys_install_path']
def __init__(self, args): """ Initialize the job state from the arguments dictionary. :param args: the forecast job arguments """ super(JobState, self).__init__(args) #self.grib_source = [self.grib_source] if isinstance(self.grib_source, basestring) else self.grib_source #self.grib_source = [self.resolve_grib_source(g, args) for g in self.grib_source] self.grib_source = self.resolve_grib_source(self.grib_source, args) logging.info('Simulation requested from %s to %s' % (str(self.start_utc), str(self.end_utc))) self.start_utc = round_time_to_hour( self.start_utc, up=False, period_hours=self.grib_source[0].period_hours) self.end_utc = round_time_to_hour( self.end_utc, up=True, period_hours=self.grib_source[0].period_hours) self.cycle_start_utc = round_time_to_hour( self.get('cycle_start_utc', None), period_hours=self.grib_source[0].cycle_hours) logging.info('Simulation times rounded %s to %s' % (str(self.start_utc), str(self.end_utc))) #self.start_utc = round_time_to_hour(self.start_utc, up=False, period_hours=self.grib_source.period_hours); #self.end_utc = round_time_to_hour(self.end_utc, up=True, period_hours=self.grib_source.period_hours); self.fc_hrs = timedelta_hours(self.end_utc - self.start_utc) if 'job_id' in args: logging.info('job_id %s given in the job description' % args['job_id']) self.job_id = args['job_id'] else: logging.warning('job_id not given, creating.') self.job_id = 'wfc-' + self.grid_code + '-' + utc_to_esmf( self.start_utc) + '-{0:02d}'.format(self.fc_hrs) self.emails = self.parse_emails(args) self.domains = args['domains'] self.ignitions = args.get('ignitions', None) self.fmda = self.parse_fmda(args) self.postproc = args['postproc'] self.wrfxpy_dir = args['sys_install_path'] self.args = args logging.debug('JobState initialized: ' + str(self))
def __init__(self, args): """ Initialize the job state from the arguments dictionary. :param args: the forecast job arguments """ super(JobState, self).__init__(args) self.grib_source = self.resolve_grib_source(self.grib_source) self.start_utc = round_time_to_hour(self.start_utc, up=False, period_hours=self.grib_source.period_hours); self.end_utc = round_time_to_hour(self.end_utc, up=True, period_hours=self.grib_source.period_hours); self.fc_hrs = compute_fc_hours(self.start_utc, self.end_utc) if 'job_id' in args: logging.info('job_id given in the job description.') self.job_id = args['job_id'] else: logging.warning('job_id not given, creating.') self.job_id = 'wfc-' + self.grid_code + '-' + utc_to_esmf(self.start_utc) + '-{0:02d}'.format(self.fc_hrs) self.emails = self.parse_emails(args) self.domains = args['domains'] self.ignitions = args.get('ignitions', None) self.fmda = self.parse_fmda(args) self.postproc = args['postproc'] self.wrfxpy_dir = args['sys_install_path'] self.args = args
def execute(args): """ Executes a weather/fire simulation. The args dictionary contains :param args: a dictionary with the following keys :param grid_code: the (unique) code of the grid that is used :param sys_install_path: system installation directory :param start_utc: start time of simulation in UTC :param end_utc: end time of simulation in UTC :param workspace_path: workspace directory :param wps_install_path: installation directory of WPS that will be used :param wrf_install_path: installation directory of WRF that will be used :param grib_source: a string identifying a valid GRIB2 source :param wps_namelist_path: the path to the namelist.wps file that will be used as template :param wrf_namelist_path: the path to the namelist.input file that will be used as template :param fire_namelist_path: the path to the namelist.fire file that will be used as template :param wps_geog_path: the path to the geogrid data directory providing terrain/fuel data :param email_notification: dictionary containing keys address and events indicating when a mail should be fired off """ logging.basicConfig(level=logging.INFO) # initialize the job state from the arguments js = JobState(args) logging.info("job %s starting [%d hours to forecast]." % (js.job_id, js.fc_hrs)) send_email(js, 'start', 'Job %s started.' % js.job_id) # read in all namelists js.wps_nml = f90nml.read(args['wps_namelist_path']) js.wrf_nml = f90nml.read(args['wrf_namelist_path']) js.fire_nml = f90nml.read(args['fire_namelist_path']) js.ems_nml = None if 'emissions_namelist_path' in args: js.ems_nml = f90nml.read(args['emissions_namelist_path']) # Parse and setup the domain configuration js.domain_conf = WPSDomainConf(js.domains) num_doms = len(js.domain_conf) js.wps_nml['share']['start_date'] = [utc_to_esmf(js.start_utc)] * num_doms js.wps_nml['share']['end_date'] = [utc_to_esmf(js.end_utc)] * num_doms js.wps_nml['share']['interval_seconds'] = 3600 logging.info("number of domains defined is %d." % num_doms) # build directories in workspace js.wps_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wps')) js.wrf_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wrf')) logging.info("cloning WPS into %s" % js.wps_dir) # step 1: clone WPS and WRF directories cln = WRFCloner(args) cln.clone_wps(js.wps_dir, js.grib_source.vtables(), []) # step 2: process domain information and patch namelist for geogrid js.wps_nml['geogrid']['geog_data_path'] = args['wps_geog_path'] js.domain_conf.prepare_for_geogrid(js.wps_nml, js.wrf_nml, js.wrfxpy_dir, js.wps_dir) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) # do steps 2 & 3 & 4 in parallel (two execution streams) # -> GEOGRID -> # -> GRIB2 download -> UNGRIB -> proc_q = Queue() geogrid_proc = Process(target=run_geogrid, args=(js, proc_q)) grib_proc = Process(target=retrieve_gribs_and_run_ungrib, args=(js, proc_q)) geogrid_proc.start() grib_proc.start() # wait until both tasks are done geogrid_proc.join() grib_proc.join() if proc_q.get() != 'SUCCESS': return if proc_q.get() != 'SUCCESS': return proc_q.close() # step 5: execute metgrid after ensuring all grids will be processed js.domain_conf.prepare_for_metgrid(js.wps_nml) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) logging.info("running METGRID") Metgrid(js.wps_dir).execute().check_output() send_email(js, 'metgrid', 'Job %s - metgrid complete.' % js.job_id) logging.info("cloning WRF into %s" % js.wrf_dir) # step 6: clone wrf directory, symlink all met_em* files cln.clone_wrf(js.wrf_dir, []) symlink_matching_files(js.wrf_dir, js.wps_dir, "met_em*") logging.info("running REAL") # step 7: patch input namelist, fire namelist, emissions namelist (if required) # and execute real.exe time_ctrl = update_time_control(js.start_utc, js.end_utc, num_doms) js.wrf_nml['time_control'].update(time_ctrl) update_namelist(js.wrf_nml, js.grib_source.namelist_keys()) if 'ignitions' in args: update_namelist(js.wrf_nml, render_ignitions(js, num_doms)) # if we have an emissions namelist, automatically turn on the tracers if js.ems_nml is not None: f90nml.write(js.ems_nml, osp.join(js.wrf_dir, 'namelist.fire_emissions'), force=True) js.wrf_nml['dynamics']['tracer_opt'] = [2] * num_doms f90nml.write(js.wrf_nml, osp.join(js.wrf_dir, 'namelist.input'), force=True) f90nml.write(js.fire_nml, osp.join(js.wrf_dir, 'namelist.fire'), force=True) # try to run Real twice as it sometimes fails the first time # it's not clear why this error happens try: Real(js.wrf_dir).execute().check_output() except Exception as e: logging.error('Real step failed with exception %s, retrying ...' % str(e)) Real(js.wrf_dir).execute().check_output() # step 8: if requested, do fuel moisture DA if js.fmda is not None: logging.info('running fuel moisture data assimilation') for dom in js.fmda.domains: assimilate_fm10_observations(osp.join(wrf_dir, 'wrfinput_d%02d' % dom), None, js.fmda.token) logging.info('submitting WRF job') send_email(js, 'wrf_submit', 'Job %s - wrf job submitted.' % js.job_id) # step 8: execute wrf.exe on parallel backend js.task_id = "sim-" + js.grid_code + "-" + utc_to_esmf(js.start_utc)[:10] WRF(js.wrf_dir, js.qsys).submit(js.task_id, js.num_nodes, js.ppn, js.wall_time_hrs) send_email(js, 'wrf_exec', 'Job %s - wrf job starting now with id %s.' % (js.job_id, js.task_id)) logging.info("WRF job submitted with id %s, waiting for rsl.error.0000" % js.task_id) # step 9: wait for appearance of rsl.error.0000 and open it wrf_out = None while wrf_out is None: try: wrf_out = open(osp.join(js.wrf_dir, 'rsl.error.0000')) break except IOError: logging.info('forecast: waiting 10 seconds for rsl.error.0000 file') time.sleep(5) logging.info('Detected rsl.error.0000') # step 10: track log output and check for history writes fro WRF pp = None already_sent_files, max_pp_dom = [], -1 if js.postproc is not None: js.pp_dir = osp.join(js.workspace_path, js.job_id, "products") make_dir(js.pp_dir) pp = Postprocessor(js.pp_dir, 'wfc-' + js.grid_code) max_pp_dom = max([int(x) for x in filter(lambda x: len(x) == 1, js.postproc)]) while True: line = wrf_out.readline().strip() if not line: time.sleep(0.2) continue if "SUCCESS COMPLETE WRF" in line: send_email(js, 'complete', 'Job %s - wrf job complete SUCCESS.' % js.job_id) logging.info("WRF completion detected.") break if "Timing for Writing wrfout" in line: esmf_time,domain_str = re.match(r'.*wrfout_d.._([0-9_\-:]{19}) for domain\ +(\d+):' ,line).groups() dom_id = int(domain_str) logging.info("Detected history write for domain %d for time %s." % (dom_id, esmf_time)) if js.postproc is not None and str(dom_id) in js.postproc: var_list = [str(x) for x in js.postproc[str(dom_id)]] logging.info("Executing postproc instructions for vars %s for domain %d." % (str(var_list), dom_id)) wrfout_path = find_fresh_wrfout(js.wrf_dir, dom_id) try: pp.process_vars(wrfout_path, dom_id, esmf_time, var_list) except Exception as e: logging.warning('Failed to postprocess for time %s with error %s.' % (esmf_time, str(e))) # if this is the last processed domain for this timestamp in incremental mode, upload to server if dom_id == max_pp_dom and js.postproc.get('shuttle', None) == 'incremental': desc = js.postproc['description'] if 'description' in js.postproc else js.job_id sent_files_1 = send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, desc, already_sent_files) logging.info('sent %d files to visualization server.' % len(sent_files_1)) already_sent_files = filter(lambda x: not x.endswith('json'), already_sent_files + sent_files_1) # if we are to send out the postprocessed files after completion, this is the time if js.postproc.get('shuttle', None) == 'on_completion': desc = js.postproc['description'] if 'description' in js.postproc else js.job_id send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, desc)
def execute(args, job_args): """ Executes a weather/fire simulation. :param args: a dictionary with all to start the simulationfollowing keys :param job_args: a the original json given the forecast Keys in args: :param grid_code: the (unique) code of the grid that is used :param sys_install_path: system installation directory :param start_utc: start time of simulation in UTC :param end_utc: end time of simulation in UTC :param workspace_path: workspace directory :param wps_install_path: installation directory of WPS that will be used :param wrf_install_path: installation directory of WRF that will be used :param grib_source: a string identifying a valid GRIB2 source :param wps_namelist_path: the path to the namelist.wps file that will be used as template :param wrf_namelist_path: the path to the namelist.input file that will be used as template :param fire_namelist_path: the path to the namelist.fire file that will be used as template :param wps_geog_path: the path to the geogrid data directory providing terrain/fuel data :param email_notification: dictionary containing keys address and events indicating when a mail should be fired off """ logging.info('step 0 initialize the job state from the arguments') ## logging.info('args = %s' % json.dumps(jargs, open(osp.join(jobdir,'input.json'),'w'), indent=4, separators=(',', ': '))) js = JobState(args) ## logging.info('js = %s' % json.dumps(js, open(osp.join(jobdir,'input.json'),'w'), indent=4, separators=(',', ': '))) jobdir = osp.abspath(osp.join(js.workspace_path, js.job_id)) make_clean_dir(jobdir) json.dump(job_args, open(osp.join(jobdir, 'input.json'), 'w'), indent=4, separators=(',', ': ')) jsub = make_job_file(js) json.dump(jsub, open(jsub.jobfile, 'w'), indent=4, separators=(',', ': ')) logging.info("job %s starting [%d hours to forecast]." % (js.job_id, js.fc_hrs)) sys.stdout.flush() send_email(js, 'start', 'Job %s started.' % js.job_id) # read in all namelists js.wps_nml = read_namelist(js.args['wps_namelist_path']) js.wrf_nml = read_namelist(js.args['wrf_namelist_path']) js.fire_nml = read_namelist(js.args['fire_namelist_path']) js.ems_nml = None if 'emissions_namelist_path' in js.args: js.ems_nml = read_namelist(js.args['emissions_namelist_path']) # Parse and setup the domain configuration js.domain_conf = WPSDomainConf(js.domains) js.num_doms = len(js.domain_conf) js.wps_nml['share']['interval_seconds'] = js.grib_source[ 0].interval_seconds logging.info("number of domains defined is %d." % js.num_doms) # build directories in workspace js.wps_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wps')) js.wrf_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wrf')) #check_obj(args,'args') #check_obj(js,'Initial job state') logging.info("step 1: clone WPS and WRF directories") logging.info("cloning WPS into %s" % js.wps_dir) cln = WRFCloner(js.args) cln.clone_wps(js.wps_dir, []) js.grib_source[0].clone_vtables(js.wps_dir) logging.info( "step 2: process domain information and patch namelist for geogrid") js.wps_nml['share']['start_date'] = [utc_to_esmf(js.start_utc) ] * js.num_doms js.wps_nml['share']['end_date'] = [utc_to_esmf(js.end_utc)] * js.num_doms js.wps_nml['geogrid']['geog_data_path'] = js.args['wps_geog_path'] js.domain_conf.prepare_for_geogrid(js.wps_nml, js.wrf_nml, js.wrfxpy_dir, js.wps_dir) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) # do steps 2 & 3 & 4 in parallel (two execution streams) # -> GEOGRID -> # -> GRIB2 download -> UNGRIB -> proc_q = Queue() geogrid_proc = Process(target=run_geogrid, args=(js, proc_q)) # grib_proc = Process(target=retrieve_gribs_and_run_ungrib_all, args=(js, proc_q, ref_utc)) grib_proc = {} for grib_source in js.grib_source: grib_proc[grib_source.id] = Process( target=retrieve_gribs_and_run_ungrib, args=(js, grib_source, proc_q)) logging.info('starting GEOGRID and GRIB2/UNGRIB') if js.ungrib_only: logging.info( 'ungrib_only set, skipping GEOGRID, will exit after UNGRIB') else: geogrid_proc.start() for grib_source in js.grib_source: grib_proc[grib_source.id].start() # wait until all tasks are done logging.info('waiting until all tasks are done') for grib_source in js.grib_source: grib_proc[grib_source.id].join() if js.ungrib_only: return else: geogrid_proc.join() for grib_source in js.grib_source: if proc_q.get() != 'SUCCESS': return if proc_q.get() != 'SUCCESS': return proc_q.close() logging.info( "step 5: execute metgrid after ensuring all grids will be processed") update_namelist(js.wps_nml, js.grib_source[0].namelist_wps_keys()) js.domain_conf.prepare_for_metgrid(js.wps_nml) logging.info("namelist.wps for METGRID: %s" % json.dumps(js.wps_nml, indent=4, separators=(',', ': '))) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) logging.info("running METGRID") Metgrid(js.wps_dir).execute().check_output() send_email(js, 'metgrid', 'Job %s - metgrid complete.' % js.job_id) logging.info("METGRID complete") logging.info("cloning WRF into %s" % js.wrf_dir) logging.info( "step 6: clone wrf directory, symlink all met_em* files, make namelists" ) cln.clone_wrf(js.wrf_dir, []) symlink_matching_files(js.wrf_dir, js.wps_dir, "met_em*") time_ctrl = update_time_control(js.start_utc, js.end_utc, js.num_doms) js.wrf_nml['time_control'].update(time_ctrl) js.wrf_nml['time_control']['interval_seconds'] = js.grib_source[ 0].interval_seconds update_namelist(js.wrf_nml, js.grib_source[0].namelist_keys()) if 'ignitions' in js.args: update_namelist(js.wrf_nml, render_ignitions(js, js.num_doms)) # if we have an emissions namelist, automatically turn on the tracers if js.ems_nml is not None: logging.debug('namelist.fire_emissions given, turning on tracers') f90nml.write(js.ems_nml, osp.join(js.wrf_dir, 'namelist.fire_emissions'), force=True) js.wrf_nml['dynamics']['tracer_opt'] = [2] * js.num_doms f90nml.write(js.wrf_nml, osp.join(js.wrf_dir, 'namelist.input'), force=True) f90nml.write(js.fire_nml, osp.join(js.wrf_dir, 'namelist.fire'), force=True) # step 7: execute real.exe logging.info("running REAL") # try to run Real twice as it sometimes fails the first time # it's not clear why this error happens try: Real(js.wrf_dir).execute().check_output() except Exception as e: logging.error('Real step failed with exception %s, retrying ...' % str(e)) Real(js.wrf_dir).execute().check_output() logging.info('step 7b: if requested, do fuel moisture DA') logging.info('fmda = %s' % js.fmda) if js.fmda is not None: logging.info('running fuel moisture data assimilation') for dom in js.fmda.domains: logging.info('assimilate_fm10_observations for domain %s' % dom) assimilate_fm10_observations( osp.join(js.wrf_dir, 'wrfinput_d%02d' % int(dom)), None, js.fmda.token) # step 8: execute wrf.exe on parallel backend logging.info('submitting WRF job') send_email(js, 'wrf_submit', 'Job %s - wrf job submitted.' % js.job_id) js.task_id = "sim-" + js.grid_code + "-" + utc_to_esmf(js.start_utc)[:10] jsub.job_num = WRF(js.wrf_dir, js.qsys).submit(js.task_id, js.num_nodes, js.ppn, js.wall_time_hrs) send_email( js, 'wrf_exec', 'Job %s - wrf job starting now with id %s.' % (js.job_id, js.task_id)) logging.info( "WRF job %s submitted with id %s, waiting for rsl.error.0000" % (jsub.job_num, js.task_id)) jobfile = osp.abspath(osp.join(js.workspace_path, js.job_id, 'job.json')) json.dump(jsub, open(jobfile, 'w'), indent=4, separators=(',', ': ')) process_output(js.job_id)
def retrieve_gribs_and_run_ungrib(js, grib_source, q): """ This function retrieves required GRIB files and runs ungrib. It returns either 'SUCCESS' or 'FAILURE' on completion. :param js: the JobState object containing the forecast configuration :param grib_source: the GribSource object containing ungrib configuration :param q: the multiprocessing Queue into which we will send either 'SUCCESS' or 'FAILURE' """ wps_dir = osp.abspath(js.wps_dir) grib_dir = osp.join(wps_dir, grib_source.id) make_clean_dir(grib_dir) wps_nml = js.wps_nml try: logging.info("retrieving GRIB files from %s" % grib_source.id) download_whole_cycle = js.get('download_whole_cycle', False) manifest = grib_source.retrieve_gribs(js.start_utc, js.end_utc, js.ref_utc, js.cycle_start_utc, download_whole_cycle) # logging.info('manifest: ' + str(manifest)) cache_colmet = len(manifest) > 1 have_all_colmet = False if cache_colmet: have_all_colmet = len(manifest.colmet_missing) == 0 colmet_dir = osp.join(grib_source.cache_dir, manifest.colmet_prefix) logging.info('cache colmet %s, have all colmet %s' % (cache_colmet, have_all_colmet)) if not have_all_colmet: # this is also if we do not cache grib_source.symlink_gribs(manifest.grib_files, grib_dir) send_email( js, 'grib2', 'Job %s - %d GRIB2 files downloaded.' % (js.job_id, len(manifest))) logging.info("running UNGRIB for %s" % grib_source.id) logging.info( "step 4: patch namelist for ungrib end execute ungrib on %s files" % grib_source.id) update_namelist(wps_nml, grib_source.namelist_wps_keys()) if cache_colmet: wps_nml['share']['start_date'] = [ utc_to_esmf(manifest.colmet_files_utc[0]) ] * js.num_doms wps_nml['share']['end_date'] = [ utc_to_esmf(manifest.colmet_files_utc[-1]) ] * js.num_doms # logging.info("namelist.wps for UNGRIB: %s" % json.dumps(wps_nml, indent=4, separators=(',', ': '))) f90nml.write(wps_nml, osp.join(grib_dir, 'namelist.wps'), force=True) grib_source.clone_vtables(grib_dir) symlink_unless_exists(osp.join(wps_dir, 'ungrib.exe'), osp.join(grib_dir, 'ungrib.exe')) print(grib_dir + ':') os.system('ls -l %s' % grib_dir) Ungrib(grib_dir).execute().check_output() print(grib_dir + ':') os.system('ls -l %s' % grib_dir) if cache_colmet: # move output to cache directory make_dir(colmet_dir) for f in manifest.colmet_files: move(osp.join(grib_dir, f), osp.join(colmet_dir, f)) # now all colmet files should be in the cache if cache_colmet: for f in manifest.colmet_files: symlink_unless_exists(osp.join(colmet_dir, f), osp.join(wps_dir, f)) else: # move output for f in glob.glob(osp.join(grib_dir, grib_source.prefix() + '*')): move(f, wps_dir) send_email(js, 'ungrib', 'Job %s - ungrib complete.' % js.job_id) logging.info('UNGRIB complete for %s' % grib_source.id) q.put('SUCCESS') except Exception as e: logging.error('GRIB2/UNGRIB step failed with exception %s' % repr(e)) traceback.print_exc() q.put('FAILURE')
def postprocess_cycle(cycle, region_cfg, wksp_path, bounds=None): """ Build rasters from the computed fuel moisture. :param cycle: the UTC cycle time :param region_cfg: the region configuration :param wksp_path: the workspace path :param bounds: bounding box of the post-processing :return: the postprocessing path """ prev_cycle = cycle - timedelta(hours=1) post_cycle = cycle + timedelta(hours=1) model_path = compute_model_path(cycle, region_cfg.code, wksp_path) year_month = '%04d%02d' % (cycle.year, cycle.month) prev_year_month = '%04d%02d' % (prev_cycle.year, prev_cycle.month) cycle_dir = 'fmda-%s-%04d%02d%02d-%02d' % ( region_cfg.code, cycle.year, cycle.month, cycle.day, cycle.hour) prev_cycle_dir = 'fmda-%s-%04d%02d%02d-%02d' % ( region_cfg.code, prev_cycle.year, prev_cycle.month, prev_cycle.day, prev_cycle.hour) postproc_path = osp.join(wksp_path, year_month, cycle_dir) prev_postproc_path = osp.join(wksp_path, prev_year_month, prev_cycle_dir) manifest_name = cycle_dir + '.json' complete_manifest_name = 'fmda-%s.json' % region_cfg.code if not is_cycle_computed(cycle, region_cfg, wksp_path) and not osp.exists(prev_postproc_path): logging.warning('CYCLER postprocessing failed for time {}'.format( str(cycle))) return None var_wisdom = { 'dfm': { 'native_unit': '-', 'colorbar': '-', 'colormap': 'jet_r', 'scale': [0.0, 0.4] }, 'lfm': { 'native_unit': '-', 'colorbar': '-', 'colormap': 'jet_r', 'scale': [0.0, 3.0], 'marker': '^' }, 'EQUILd FM': { 'name': 'Drying equilibrium FM', 'native_unit': '-', 'colorbar': 'i-', 'colormap': 'jet_r', 'scale': [0.0, 0.4] }, 'EQUILw FM': { 'name': 'Wetting equilibrium FM', 'native_unit': '-', 'colorbar': 'i-', 'colormap': 'jet_r', 'scale': [0.0, 0.4] }, 'RH': { 'name': 'Relative humidity', 'native_unit': '%', 'colorbar': '%', 'colormap': 'jet_r', 'scale': [0.0, 100.0] }, 'TD': { 'name': 'Dew point temperature at 2m', 'native_unit': 'K', 'colorbar': 'F', 'colormap': 'jet', 'scale': [270.0, 320.0] }, 'T2': { 'name': 'Temperature at 2m', 'native_unit': 'K', 'colorbar': 'F', 'colormap': 'jet', 'scale': [270.0, 320.0] }, 'PRECIPA': { 'name': 'RTMA precipa', 'native_unit': 'kg/m^2/h', 'colorbar': 'kg/m^2/h', 'colormap': 'jet_r', 'scale': [0.0, 2.0] }, 'PRECIP': { 'name': 'Precipitation', 'native_unit': 'mm/h', 'colorbar': 'mm/h', 'colormap': 'jet_r', 'scale': [0.0, 2.0] }, 'HGT': { 'name': 'Terrain height', 'native_unit': 'm', 'colorbar': 'm', 'colormap': 'jet_r', 'scale': [-86.0, 4500.0] }, } show = [ 'TD', 'PRECIPA', 'T2', 'HGT', 'PRECIP', 'RH', 'EQUILd FM', 'EQUILw FM' ] show = ['T2', 'HGT', 'PRECIP', 'RH'] esmf_cycle = utc_to_esmf(cycle) mf = {"1": {esmf_cycle: {}}} ensure_dir(osp.join(postproc_path, manifest_name)) if not is_cycle_computed(cycle, region_cfg, wksp_path): logging.info( 'CYCLER copying postprocessing from cycle {} to cycle {}'.format( str(prev_cycle), str(cycle))) prev_manifest_name = prev_cycle_dir + '.json' prev_esmf_cycle = utc_to_esmf(prev_cycle) prev_mf = json.load( open(osp.join(prev_postproc_path, prev_manifest_name), 'r')) for name in prev_mf['1'][prev_esmf_cycle].keys(): prev_raster_name = prev_mf['1'][prev_esmf_cycle][name]['raster'] prev_cb_name = prev_mf['1'][prev_esmf_cycle][name]['colorbar'] raster_name = cycle_dir + '-%s-raster.png' % name cb_name = cycle_dir + '-%s-raster-cb.png' % name coords = prev_mf['1'][prev_esmf_cycle][name]['coords'] alpha = prev_mf['1'][prev_esmf_cycle][name].get('alpha', None) force_copy(osp.join(prev_postproc_path, prev_raster_name), osp.join(postproc_path, raster_name)) force_copy(osp.join(prev_postproc_path, prev_cb_name), osp.join(postproc_path, cb_name)) if alpha: mf["1"][esmf_cycle][name] = { 'raster': raster_name, 'coords': coords, 'colorbar': cb_name, 'alpha': alpha } else: mf["1"][esmf_cycle][name] = { 'raster': raster_name, 'coords': coords, 'colorbar': cb_name } else: if bounds is None: bounds = (region_cfg.bbox[1], region_cfg.bbox[3], region_cfg.bbox[0], region_cfg.bbox[2]) # read in the longitudes and latitudes geo_path = osp.join(wksp_path, '%s-geo.nc' % region_cfg.code) logging.info( 'CYCLER reading longitudes and latitudes from NetCDF file %s' % geo_path) d = netCDF4.Dataset(geo_path) lats = d.variables['XLAT'][:, :] lons = d.variables['XLONG'][:, :] d.close() # read and process model variables with netCDF4.Dataset(model_path) as d: for name in show: raster_png, coords, cb_png, levels = scalar_field_to_raster( d.variables[name][:, :], lats, lons, var_wisdom[name]) write_postprocess(mf, postproc_path, cycle_dir, esmf_cycle, name, raster_png, coords, cb_png, levels, .5) for i, name in [(0, '1-hr DFM'), (1, '10-hr DFM'), (2, '100-hr DFM')]: fm_wisdom = var_wisdom['dfm'] fm_wisdom['name'] = 'Estimated %s' % name raster_png, coords, cb_png, levels = scalar_field_to_raster( d.variables['FMC_GC'][:, :, i], lats, lons, fm_wisdom) write_postprocess(mf, postproc_path, cycle_dir, esmf_cycle, name, raster_png, coords, cb_png, levels, .5) if osp.exists('src/ingest/MesoDB'): from ingest.MesoDB.mesoDB import mesoDB db = mesoDB('ingest/MesoDB') db.update['startTime'] = cycle - timedelta(hours=1) db.update['endTime'] = cycle + timedelta(hours=1) db.params['startTime'] = cycle - timedelta(hours=1) db.params['endTime'] = cycle + timedelta(hours=1) db.params['longitude1'], db.params['longitude2'], db.params[ 'latitude1'], db.params['latitude2'] = bounds if is_cycle_computed(cycle, region_cfg, wksp_path): db.params['updateDB'] = False df = db.get_DB() st = db.sites() data = df.groupby('STID').mean().join(st[['LONGITUDE', 'LATITUDE']]) meso_wisdom = var_wisdom['dfm'] meso_wisdom['name'] = 'MesoWest 10-hr DFM' meso_wisdom['bbox'] = bounds meso_wisdom['text'] = False raster_png, coords, cb_png, levels = scatter_to_raster( np.array(data['fm10']) / 100., np.array(data['LATITUDE']).astype(float), np.array(data['LONGITUDE']).astype(float), meso_wisdom) name = 'MESO 10-hr DFM' write_postprocess(mf, postproc_path, cycle_dir, esmf_cycle, name, raster_png, coords, cb_png, levels, 1.) # NFMDB observations if osp.exists('src/ingest/FMDB'): from ingest.FMDB.FMDB import FMDB from ingest.FMDB.utils import filter_outliers period_length = 7 # period in days period_num = np.ceil(cycle.day / period_length) db = FMDB('ingest/NFMDB') db.params['startYear'] = 2019 data = db.get_data() data = filter_outliers(data) data['fuel_type'] = data['fuel_type'].fillna('None').str.upper() data['fuel_variation'] = data['fuel_variation'].fillna( 'None').str.upper() sts = db.sites() data = data.join(sts[['lng', 'lat']], 'site_number') # mask space lats = data['lat'] lons = data['lng'] data = data[np.logical_and( lats <= bounds[3], np.logical_and( lats >= bounds[2], np.logical_and(lons <= bounds[1], lons >= bounds[0])))] dates = data['date'].dt.tz_localize(pytz.UTC) # calculate top 5 LFM to always plot the same top = 5 hist_data = data[dates.dt.year <= 2020] hist_dfm_mask = np.array([ '-HOUR' in ft for ft in np.array(hist_data['fuel_type']) ]).astype(bool) hist_df_lfm = hist_data[~hist_dfm_mask].reset_index(drop=True) fts = np.array(hist_df_lfm[[ 'fuel_type', 'percent' ]].groupby('fuel_type').count().sort_values( by='percent', ascending=False).index[:top]) # mask time start = cycle.replace(day=int(period_length * (period_num - 1) + 1), hour=0, minute=0, second=0, microsecond=0) end = cycle data = data[np.logical_and(dates >= start, dates <= end)] cycle_dir = 'fmda-%s-%04d%02d%02d-%02d' % (region_cfg.code, start.year, start.month, start.day, start.hour) # mask dead and live fuel moisture dfm_mask = np.array([ '-HOUR' in ft for ft in np.array(data['fuel_type']) ]).astype(bool) df_dfm = data[dfm_mask].reset_index(drop=True) df_lfm = data[~dfm_mask].reset_index(drop=True) # plot NFMDB dead fuel moisture for i, name in [('1-HOUR', 'NFMDB 1-hr DFM'), ('10-HOUR', 'NFMDB 10-hr DFM'), ('100-HOUR', 'NFMDB 100-hr DFM'), ('1000-HOUR', 'NFMDB 1000-hr DFM')]: fmdb_wisdom = var_wisdom['dfm'] fmdb_wisdom['name'] = name fmdb_wisdom['bbox'] = bounds fmdb_wisdom['text'] = True fmdb_wisdom['size'] = 40 fmdb_wisdom['linewidth'] = 1. data = df_dfm[df_dfm['fuel_type'] == i] raster_png, coords, cb_png, levels = scatter_to_raster( np.array(data['percent']) / 100., np.array(data['lat']), np.array(data['lng']), fmdb_wisdom) write_postprocess(mf, postproc_path, cycle_dir, esmf_cycle, name, raster_png, coords, cb_png, levels, 1.) # plot NFMDB live fuel moisture df_lfm = df_lfm.sort_values('date').groupby( ['site_number', 'fuel_type']).last().reset_index() for ft in fts: name = 'NFMDB {} LFM'.format(ft) fmdb_wisdom = var_wisdom['lfm'] fmdb_wisdom['name'] = name fmdb_wisdom['bbox'] = bounds fmdb_wisdom['text'] = True fmdb_wisdom['size'] = 40 fmdb_wisdom['linewidth'] = 1. data = df_lfm[df_lfm['fuel_type'] == ft] raster_png, coords, cb_png, levels = scatter_to_raster( np.array(data['percent']) / 100., np.array(data['lat']), np.array(data['lng']), fmdb_wisdom) write_postprocess(mf, postproc_path, cycle_dir, esmf_cycle, name, raster_png, coords, cb_png, levels, 1.) name = 'NFMDB OTHERS LFM' fmdb_wisdom = var_wisdom['lfm'] fmdb_wisdom['name'] = name fmdb_wisdom['bbox'] = bounds fmdb_wisdom['text'] = True fmdb_wisdom['size'] = 40 fmdb_wisdom['linewidth'] = 1. data = df_lfm[~df_lfm['fuel_type'].isin(fts)] data = data.groupby('site_number').mean() raster_png, coords, cb_png, levels = scatter_to_raster( np.array(data['percent']) / 100., np.array(data['lat']), np.array(data['lng']), fmdb_wisdom) write_postprocess(mf, postproc_path, cycle_dir, esmf_cycle, name, raster_png, coords, cb_png, levels, 1.) logging.info('writing manifest file %s' % osp.join(postproc_path, manifest_name)) json.dump(mf, open(osp.join(postproc_path, manifest_name), 'w'), indent=1, separators=(',', ':')) logging.info(json.dumps(mf)) if osp.exists(osp.join(prev_postproc_path, complete_manifest_name)): complete_mf = json.load( open(osp.join(prev_postproc_path, complete_manifest_name), 'r')) complete_mf['1'].update(mf['1']) json.dump(complete_mf, open(osp.join(postproc_path, complete_manifest_name), 'w'), indent=1, separators=(',', ':')) else: json.dump(mf, open(osp.join(postproc_path, complete_manifest_name), 'w'), indent=1, separators=(',', ':')) return postproc_path
def questionnaire(): """ Give a questionnaire to the user (with sensible default) to create a simple domain configuration. :return: a dictionary with the configuration of a fire simulation """ cfg = {} cfg['wps_namelist_path'] = 'etc/nlists/default.wps' cfg['wrf_namelist_path'] = 'etc/nlists/default.input' cfg['fire_namelist_path'] = 'etc/nlists/default.fire' cfg['emissions_namelist_path'] = 'etc/nlists/default.fire_emissions' print_question('Enter a name for your job [default = experiment]:') cfg['grid_code'] = read_string('experiment') print_answer('Name is %s' % cfg['grid_code']) print_header('IGNITION section') newline() print_question( 'Enter the ignition point as lat, lon [default = 39.1, -104.3]:') ign_latlon = read_location('39.1, -104.3') print_answer('Ignition point is at latlon %g %g' % ign_latlon) print_question( 'Enter the ignition time in UTC timezone as an ESMF string or relative time' ) print( 'Examples: 2016-03-30_16:00:00 or T-60 or T+30 (minutes), [default = now]' ) ign_utc = read_time_indicator('T+0') print_answer('Ignition time is %s\n' % str(ign_utc)) print_question( 'Enter the duration of the ignition process in seconds [default = 240]' ) ign_dur = read_integer('240') print_answer('The ignition will remain active for %d seconds.' % ign_dur) newline() print_header('SIMULATION section') start_utc = utils.round_time_to_hour(ign_utc - timedelta(minutes=30)) while True: print_question( 'Enter the start time of the simulation in UTC timezone [default = 30 mins before ignition time]' ) start_utc = read_time_indicator(utils.utc_to_esmf(start_utc)) start_utc = utils.round_time_to_hour(start_utc) if start_utc < ign_utc: break print(('Simulation start must be before ignition time %s' % utils.utc_to_esmf(ign_utc))) cfg['start_utc'] = utils.utc_to_esmf(start_utc) print_answer('Simulation will start at %s.' % cfg['start_utc']) end_utc = start_utc + timedelta(hours=5) while True: print_question( 'Enter the end time of the simulation [default = start_time + 5 hours]' ) end_utc = read_time_indicator(utils.utc_to_esmf(end_utc)) end_utc = utils.round_time_to_hour(end_utc, True) if end_utc > ign_utc: break print(('Simulation end must be after ignition time %s' % utils.utc_to_esmf(ign_utc))) cfg['end_utc'] = utils.utc_to_esmf(end_utc) print_answer('Simulation will end at %s.' % cfg['end_utc']) print_question( 'Please enter the cell size in meters for the atmospheric mesh [default 1000]' ) cell_size = read_integer('1000') print_answer('The cell size is %d meters.' % cell_size) print_question( 'Enter the number of grid cells in the longitudinal and latitudinal position [default 61, 61]' ) domain_size = read_size('61, 61') print_answer('The domain size is %d x %d grid points.' % domain_size) print_question('Enter the refinement ratio for the fire grid [default=40]') refinement = read_integer('40') print_answer( 'The refinement ratio is %d for a fire mesh size of %g meters.' % (refinement, float(cell_size) / refinement)) print_question( 'Enter the interval between output frames in minutes [default=15]') history_interval = read_integer('15') print_answer('The interval between output frames is %d minutes.' % history_interval) cfg['grib_source'] = select_grib_source(start_utc) print_answer('Selected GRIB2 source %s' % cfg['grib_source']) print_question('Process satellite data? [default=no]') sat = read_boolean('no') if sat: cfg['satellite_source'] = ["Aqua", "Terra", "SNPP"] print_answer('Selected Satellite sources %s' % cfg['satellite_source']) else: print_answer('No Satellite sources selected.') def_geog_path = None try: def_geog_path = json.load(open('etc/conf.json'))['wps_geog_path'] except Exception as e: print(e) pass print_question( 'Enter the path to geogrid information (WPS-GEOG) [default=%s]' % def_geog_path) cfg['wps_geog_path'] = read_string(def_geog_path) print_answer('The WPS-GEOG path is %s' % cfg['wps_geog_path']) cfg['domains'] = { '1': { 'cell_size': (cell_size, cell_size), 'domain_size': domain_size, 'subgrid_ratio': (refinement, refinement), 'geog_res': '.3s', 'center_latlon': ign_latlon, 'truelats': (ign_latlon[0], ign_latlon[0]), 'stand_lon': ign_latlon[1], 'history_interval': history_interval, 'time_step': max(1, 5 * cell_size // 1000) } } cfg['ignitions'] = { '1': [{ 'time_utc': utils.utc_to_esmf(ign_utc), 'duration_s': ign_dur, 'latlon': ign_latlon }] } print_header('PARALLEL JOB configuration') print_question('Enter number of parallel nodes [default=8]') cfg['num_nodes'] = read_integer('8') print_answer('Parallel job will use %d nodes.' % cfg['num_nodes']) print_question('Enter number of cores per node [default=12]') cfg['ppn'] = read_integer('12') print_answer('Parallel job will use %d cores per node.' % cfg['ppn']) print_question('Enter the max walltime in hours [default=2]') cfg['wall_time_hrs'] = read_integer('2') print_answer('Parallel job will reserve %d hours of walltime.' % cfg['wall_time_hrs']) qsys_opts = queuing_systems() while True: def_qsys = socket.gethostname().split('.')[0] print(('Enter queuing system [choices are %s, default is %s]' % (qsys_opts, def_qsys))) cfg['qsys'] = read_string(def_qsys) if cfg['qsys'] in qsys_opts: break print('Invalid queuing system selected, please try again') print_answer('Parallel job will submit for %s' % cfg['qsys']) print_header('POSTPROCESSING') print_question( 'Which variables should wrfxpy postprocess? [default T2,PSFC,WINDSPD,WINDVEC,FIRE_AREA,FGRNHFX,FLINEINT,SMOKE_INT]' ) pp_vars = read_string( 'T2,PSFC,WINDSPD,WINDVEC,FIRE_AREA,FGRNHFX,FLINEINT,SMOKE_INT').split( ',') print_answer('Will postprocess %d variables.' % len(pp_vars)) print_question('Send variables to visualization server? [default=no]') shuttle = read_boolean('no') desc = '' if shuttle: print_question( 'Enter a short description of your job [default=experimental run]') desc = read_string('experimental run') cfg['postproc'] = {'1': pp_vars} if shuttle: cfg['postproc']['shuttle'] = 'incremental' cfg['postproc']['description'] = desc return cfg
def execute(args,job_args): """ Executes a weather/fire simulation. :param args: a dictionary with all to start the simulationfollowing keys :param job_args: a the original json given the forecast Keys in args: :param grid_code: the (unique) code of the grid that is used :param sys_install_path: system installation directory :param start_utc: start time of simulation in UTC :param end_utc: end time of simulation in UTC :param workspace_path: workspace directory :param wps_install_path: installation directory of WPS that will be used :param wrf_install_path: installation directory of WRF that will be used :param grib_source: a string identifying a valid GRIB2 source :param wps_namelist_path: the path to the namelist.wps file that will be used as template :param wrf_namelist_path: the path to the namelist.input file that will be used as template :param fire_namelist_path: the path to the namelist.fire file that will be used as template :param wps_geog_path: the path to the geogrid data directory providing terrain/fuel data :param email_notification: dictionary containing keys address and events indicating when a mail should be fired off """ # step 0 initialize the job state from the arguments js = JobState(args) jobdir = osp.abspath(osp.join(js.workspace_path, js.job_id)) make_clean_dir(jobdir) json.dump(job_args, open(osp.join(jobdir,'input.json'),'w'), indent=4, separators=(',', ': ')) jsub = make_job_file(js) json.dump(jsub, open(jsub.jobfile,'w'), indent=4, separators=(',', ': ')) logging.info("job %s starting [%d hours to forecast]." % (js.job_id, js.fc_hrs)) sys.stdout.flush() send_email(js, 'start', 'Job %s started.' % js.job_id) # read in all namelists js.wps_nml = f90nml.read(js.args['wps_namelist_path']) js.wrf_nml = f90nml.read(js.args['wrf_namelist_path']) js.fire_nml = f90nml.read(js.args['fire_namelist_path']) js.ems_nml = None if 'emissions_namelist_path' in js.args: js.ems_nml = f90nml.read(js.args['emissions_namelist_path']) # Parse and setup the domain configuration js.domain_conf = WPSDomainConf(js.domains) num_doms = len(js.domain_conf) js.wps_nml['share']['start_date'] = [utc_to_esmf(js.start_utc)] * num_doms js.wps_nml['share']['end_date'] = [utc_to_esmf(js.end_utc)] * num_doms js.wps_nml['share']['interval_seconds'] = 3600 logging.info("number of domains defined is %d." % num_doms) # build directories in workspace js.wps_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wps')) js.wrf_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wrf')) #check_obj(args,'args') #check_obj(js,'Initial job state') # step 1: clone WPS and WRF directories logging.info("cloning WPS into %s" % js.wps_dir) cln = WRFCloner(js.args) cln.clone_wps(js.wps_dir, js.grib_source.vtables(), []) # step 2: process domain information and patch namelist for geogrid js.wps_nml['geogrid']['geog_data_path'] = js.args['wps_geog_path'] js.domain_conf.prepare_for_geogrid(js.wps_nml, js.wrf_nml, js.wrfxpy_dir, js.wps_dir) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) # do steps 2 & 3 & 4 in parallel (two execution streams) # -> GEOGRID -> # -> GRIB2 download -> UNGRIB -> proc_q = Queue() geogrid_proc = Process(target=run_geogrid, args=(js, proc_q)) grib_proc = Process(target=retrieve_gribs_and_run_ungrib, args=(js, proc_q)) logging.info('starting GEOGRID and GRIB2/UNGRIB') geogrid_proc.start() grib_proc.start() # wait until both tasks are done logging.info('waiting until both tasks are done') grib_proc.join() geogrid_proc.join() if proc_q.get() != 'SUCCESS': return if proc_q.get() != 'SUCCESS': return proc_q.close() # step 5: execute metgrid after ensuring all grids will be processed js.domain_conf.prepare_for_metgrid(js.wps_nml) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) logging.info("running METGRID") Metgrid(js.wps_dir).execute().check_output() send_email(js, 'metgrid', 'Job %s - metgrid complete.' % js.job_id) logging.info("cloning WRF into %s" % js.wrf_dir) # step 6: clone wrf directory, symlink all met_em* files, make namelists cln.clone_wrf(js.wrf_dir, []) symlink_matching_files(js.wrf_dir, js.wps_dir, "met_em*") time_ctrl = update_time_control(js.start_utc, js.end_utc, num_doms) js.wrf_nml['time_control'].update(time_ctrl) update_namelist(js.wrf_nml, js.grib_source.namelist_keys()) if 'ignitions' in js.args: update_namelist(js.wrf_nml, render_ignitions(js, num_doms)) # if we have an emissions namelist, automatically turn on the tracers if js.ems_nml is not None: logging.debug('namelist.fire_emissions given, turning on tracers') f90nml.write(js.ems_nml, osp.join(js.wrf_dir, 'namelist.fire_emissions'), force=True) js.wrf_nml['dynamics']['tracer_opt'] = [2] * num_doms f90nml.write(js.wrf_nml, osp.join(js.wrf_dir, 'namelist.input'), force=True) f90nml.write(js.fire_nml, osp.join(js.wrf_dir, 'namelist.fire'), force=True) # step 7: execute real.exe logging.info("running REAL") # try to run Real twice as it sometimes fails the first time # it's not clear why this error happens try: Real(js.wrf_dir).execute().check_output() except Exception as e: logging.error('Real step failed with exception %s, retrying ...' % str(e)) Real(js.wrf_dir).execute().check_output() # step 7b: if requested, do fuel moisture DA if js.fmda is not None: logging.info('running fuel moisture data assimilation') for dom in js.fmda.domains: assimilate_fm10_observations(osp.join(wrf_dir, 'wrfinput_d%02d' % dom), None, js.fmda.token) # step 8: execute wrf.exe on parallel backend logging.info('submitting WRF job') send_email(js, 'wrf_submit', 'Job %s - wrf job submitted.' % js.job_id) js.task_id = "sim-" + js.grid_code + "-" + utc_to_esmf(js.start_utc)[:10] jsub.job_num=WRF(js.wrf_dir, js.qsys).submit(js.task_id, js.num_nodes, js.ppn, js.wall_time_hrs) send_email(js, 'wrf_exec', 'Job %s - wrf job starting now with id %s.' % (js.job_id, js.task_id)) logging.info("WRF job %s submitted with id %s, waiting for rsl.error.0000" % (jsub.job_num, js.task_id)) jobfile = osp.abspath(osp.join(js.workspace_path, js.job_id,'job.json')) json.dump(jsub, open(jobfile,'w'), indent=4, separators=(',', ': ')) process_output(js.job_id)
def execute(args): """ Executes a weather/fire simulation. The args dictionary contains :param args: a dictionary with the following keys :param grid_code: the (unique) code of the grid that is used :param sys_install_path: system installation directory :param start_utc: start time of simulation in UTC :param end_utc: end time of simulation in UTC :param workspace_path: workspace directory :param wps_install_path: installation directory of WPS that will be used :param wrf_install_path: installation directory of WRF that will be used :param grib_source: a string identifying a valid GRIB2 source :param wps_namelist_path: the path to the namelist.wps file that will be used as template :param wrf_namelist_path: the path to the namelist.input file that will be used as template :param fire_namelist_path: the path to the namelist.fire file that will be used as template :param wps_geog_path: the path to the geogrid data directory providing terrain/fuel data :param email_notification: dictionary containing keys address and events indicating when a mail should be fired off """ logging.basicConfig(level=logging.INFO) # initialize the job state from the arguments js = JobState(args) logging.info("job %s starting [%d hours to forecast]." % (js.job_id, js.fc_hrs)) send_email(js, 'start', 'Job %s started.' % js.job_id) # read in all namelists js.wps_nml = f90nml.read(args['wps_namelist_path']) js.wrf_nml = f90nml.read(args['wrf_namelist_path']) js.fire_nml = f90nml.read(args['fire_namelist_path']) js.ems_nml = None if 'emissions_namelist_path' in args: js.ems_nml = f90nml.read(args['emissions_namelist_path']) # Parse and setup the domain configuration js.domain_conf = WPSDomainConf(js.domains) num_doms = len(js.domain_conf) js.wps_nml['share']['start_date'] = [utc_to_esmf(js.start_utc)] * num_doms js.wps_nml['share']['end_date'] = [utc_to_esmf(js.end_utc)] * num_doms js.wps_nml['share']['interval_seconds'] = 3600 logging.info("number of domains defined is %d." % num_doms) # build directories in workspace js.wps_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wps')) js.wrf_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wrf')) logging.info("cloning WPS into %s" % js.wps_dir) # step 1: clone WPS and WRF directories cln = WRFCloner(args) cln.clone_wps(js.wps_dir, js.grib_source.vtables(), []) # step 2: process domain information and patch namelist for geogrid js.wps_nml['geogrid']['geog_data_path'] = args['wps_geog_path'] js.domain_conf.prepare_for_geogrid(js.wps_nml, js.wrf_nml, js.wrfxpy_dir, js.wps_dir) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) # do steps 2 & 3 & 4 in parallel (two execution streams) # -> GEOGRID -> # -> GRIB2 download -> UNGRIB -> proc_q = Queue() geogrid_proc = Process(target=run_geogrid, args=(js, proc_q)) grib_proc = Process(target=retrieve_gribs_and_run_ungrib, args=(js, proc_q)) geogrid_proc.start() grib_proc.start() # wait until both tasks are done geogrid_proc.join() grib_proc.join() if proc_q.get() != 'SUCCESS': return if proc_q.get() != 'SUCCESS': return proc_q.close() # step 5: execute metgrid after ensuring all grids will be processed js.domain_conf.prepare_for_metgrid(js.wps_nml) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) logging.info("running METGRID") Metgrid(js.wps_dir).execute().check_output() send_email(js, 'metgrid', 'Job %s - metgrid complete.' % js.job_id) logging.info("cloning WRF into %s" % js.wrf_dir) # step 6: clone wrf directory, symlink all met_em* files cln.clone_wrf(js.wrf_dir, []) symlink_matching_files(js.wrf_dir, js.wps_dir, "met_em*") logging.info("running REAL") # step 7: patch input namelist, fire namelist, emissions namelist (if required) # and execute real.exe time_ctrl = update_time_control(js.start_utc, js.end_utc, num_doms) js.wrf_nml['time_control'].update(time_ctrl) update_namelist(js.wrf_nml, js.grib_source.namelist_keys()) if 'ignitions' in args: update_namelist(js.wrf_nml, render_ignitions(js, num_doms)) # if we have an emissions namelist, automatically turn on the tracers if js.ems_nml is not None: f90nml.write(js.ems_nml, osp.join(js.wrf_dir, 'namelist.fire_emissions'), force=True) js.wrf_nml['dynamics']['tracer_opt'] = [2] * num_doms f90nml.write(js.wrf_nml, osp.join(js.wrf_dir, 'namelist.input'), force=True) f90nml.write(js.fire_nml, osp.join(js.wrf_dir, 'namelist.fire'), force=True) # try to run Real twice as it sometimes fails the first time # it's not clear why this error happens try: Real(js.wrf_dir).execute().check_output() except Exception as e: logging.error('Real step failed with exception %s, retrying ...' % str(e)) Real(js.wrf_dir).execute().check_output() # step 8: if requested, do fuel moisture DA if js.fmda is not None: logging.info('running fuel moisture data assimilation') for dom in js.fmda.domains: assimilate_fm10_observations( osp.join(wrf_dir, 'wrfinput_d%02d' % dom), None, js.fmda.token) logging.info('submitting WRF job') send_email(js, 'wrf_submit', 'Job %s - wrf job submitted.' % js.job_id) # step 8: execute wrf.exe on parallel backend js.task_id = "sim-" + js.grid_code + "-" + utc_to_esmf(js.start_utc)[:10] WRF(js.wrf_dir, js.qsys).submit(js.task_id, js.num_nodes, js.ppn, js.wall_time_hrs) send_email( js, 'wrf_exec', 'Job %s - wrf job starting now with id %s.' % (js.job_id, js.task_id)) logging.info("WRF job submitted with id %s, waiting for rsl.error.0000" % js.task_id) # step 9: wait for appearance of rsl.error.0000 and open it wrf_out = None while wrf_out is None: try: wrf_out = open(osp.join(js.wrf_dir, 'rsl.error.0000')) break except IOError: logging.info( 'forecast: waiting 10 seconds for rsl.error.0000 file') time.sleep(5) logging.info('Detected rsl.error.0000') # step 10: track log output and check for history writes fro WRF pp = None already_sent_files, max_pp_dom = [], -1 if js.postproc is not None: js.pp_dir = osp.join(js.workspace_path, js.job_id, "products") make_dir(js.pp_dir) pp = Postprocessor(js.pp_dir, 'wfc-' + js.grid_code) max_pp_dom = max( [int(x) for x in filter(lambda x: len(x) == 1, js.postproc)]) while True: line = wrf_out.readline().strip() if not line: time.sleep(0.2) continue if "SUCCESS COMPLETE WRF" in line: send_email(js, 'complete', 'Job %s - wrf job complete SUCCESS.' % js.job_id) logging.info("WRF completion detected.") break if "Timing for Writing wrfout" in line: esmf_time, domain_str = re.match( r'.*wrfout_d.._([0-9_\-:]{19}) for domain\ +(\d+):', line).groups() dom_id = int(domain_str) logging.info("Detected history write for domain %d for time %s." % (dom_id, esmf_time)) if js.postproc is not None and str(dom_id) in js.postproc: var_list = [str(x) for x in js.postproc[str(dom_id)]] logging.info( "Executing postproc instructions for vars %s for domain %d." % (str(var_list), dom_id)) wrfout_path = find_fresh_wrfout(js.wrf_dir, dom_id) try: pp.process_vars(wrfout_path, dom_id, esmf_time, var_list) except Exception as e: logging.warning( 'Failed to postprocess for time %s with error %s.' % (esmf_time, str(e))) # if this is the last processed domain for this timestamp in incremental mode, upload to server if dom_id == max_pp_dom and js.postproc.get('shuttle', None) == 'incremental': desc = js.postproc[ 'description'] if 'description' in js.postproc else js.job_id sent_files_1 = send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, desc, already_sent_files) logging.info('sent %d files to visualization server.' % len(sent_files_1)) already_sent_files = filter(lambda x: not x.endswith('json'), already_sent_files + sent_files_1) # if we are to send out the postprocessed files after completion, this is the time if js.postproc.get('shuttle', None) == 'on_completion': desc = js.postproc[ 'description'] if 'description' in js.postproc else js.job_id send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, desc)
def questionnaire(): """ Give a questionnaire to the user (with sensible default) to create a simple domain configuration. :return: a dictionary with the configuration of a fire simulation """ cfg = {} cfg['wps_namelist_path'] = 'etc/nlists/default.wps' cfg['wrf_namelist_path'] = 'etc/nlists/default.input' cfg['fire_namelist_path'] = 'etc/nlists/default.fire' cfg['emissions_namelist_path'] = 'etc/nlists/default.fire_emissions' print_question('Enter a name for your job [default = experiment]:') cfg['grid_code'] = read_string('experiment') print_answer('Name is %s' % cfg['grid_code']) print_header('IGNITION section') newline() print_question('Enter the ignition point as lat, lon [default = 39.1, -104.3]:') ign_latlon = read_location('39.1, -104.3') print_answer('Ignition point is at latlon %g %g' % ign_latlon) print_question('Enter the ignition time in UTC timezone as an ESMF string or relative time') print('Examples: 2016-03-30_16:00:00 or T-60 or T+30 (minutes), [default = now]') ign_utc = read_time_indicator('T+0') print_answer('Ignition time is %s\n' % str(ign_utc)) print_question('Enter the duration of the ignition process in seconds [default = 240]') ign_dur = read_integer('240') print_answer('The ignition will remain active for %d seconds.' % ign_dur) newline() print_header('SIMULATION section') start_utc = utils.round_time_to_hour(ign_utc - timedelta(minutes=30)) while True: print_question('Enter the start time of the simulation in UTC timezone [default = 30 mins before ignition time]') start_utc = read_time_indicator(utils.utc_to_esmf(start_utc)) start_utc = utils.round_time_to_hour(start_utc) if start_utc < ign_utc: break print('Simulation start must be before ignition time %s' % utils.utc_to_esmf(ign_utc)) cfg['start_utc'] = utils.utc_to_esmf(start_utc) print_answer('Simulation will start at %s.' % cfg['start_utc']) end_utc = start_utc + timedelta(hours=5) while True: print_question('Enter the end time of the simulation [default = start_time + 5 hours]') end_utc = read_time_indicator(utils.utc_to_esmf(end_utc)) end_utc = utils.round_time_to_hour(end_utc, True) if end_utc > ign_utc: break print('Simulation end must be after ignition time %s' % utils.utc_to_esmf(ign_utc)) cfg['end_utc'] = utils.utc_to_esmf(end_utc) print_answer('Simulation will end at %s.' % cfg['end_utc']) print_question('Please enter the cell size in meters for the atmospheric mesh [default 1000]') cell_size = read_integer('1000') print_answer('The cell size is %d meters.' % cell_size) print_question('Enter the number of grid cells in the longitudinal and latitudinal position [default 61, 61]') domain_size = read_size('61, 61') print_answer('The domain size is %d x %d grid points.' % domain_size) print_question('Enter the refinement ratio for the fire grid [default=40]') refinement = read_integer('40') print_answer('The refinement ratio is %d for a fire mesh size of %g meters.' % (refinement, float(cell_size)/refinement)) print_question('Enter the interval between output frames in minutes [default=15]') history_interval = read_integer('15') print_answer('The interval between output frames is %d minutes.' % history_interval) cfg['grib_source'] = select_grib_source(start_utc) print_answer('Selected GRIB2 source %s' % cfg['grib_source']) def_geog_path = None try: def_geog_path = json.load(open('etc/conf.json'))['wps_geog_path'] except Exception as e: print(e) pass print_question('Enter the path to geogrid information (WPS-GEOG) [default=%s]' % def_geog_path) cfg['wps_geog_path'] = read_string(def_geog_path) print_answer('The WPS-GEOG path is %s' % cfg['wps_geog_path']) cfg['domains'] = { '1' : { 'cell_size' : (cell_size,cell_size), 'domain_size' : domain_size, 'subgrid_ratio' : (refinement, refinement), 'geog_res' : '.3s', 'center_latlon' : ign_latlon, 'truelats' : (ign_latlon[0], ign_latlon[0]), 'stand_lon' : ign_latlon[1], 'history_interval' : history_interval, 'time_step' : max(1, 5 * cell_size / 1000) } } cfg['ignitions'] = { '1' : [ { 'time_utc' : utils.utc_to_esmf(ign_utc), 'duration_s' : ign_dur, 'latlon' : ign_latlon } ] } print_header('PARALLEL JOB configuration') print_question('Enter number of parallel nodes [default=8]') cfg['num_nodes'] = read_integer('8') print_answer('Parallel job will use %d nodes.' % cfg['num_nodes']) print_question('Enter number of cores per node [default=12]') cfg['ppn'] = read_integer('12') print_answer('Parallel job will use %d cores per node.' % cfg['ppn']) print_question('Enter the max walltime in hours [default=2]') cfg['wall_time_hrs'] = read_integer('2') print_answer('Parallel job will reserve %d hours of walltime.' % cfg['wall_time_hrs']) qsys_opts = queuing_systems() while True: def_qsys = socket.gethostname().split('.')[0] print('Enter queuing system [choices are %s, default is %s]' % (qsys_opts, def_qsys)) cfg['qsys'] = read_string(def_qsys) if cfg['qsys'] in qsys_opts: break print('Invalid queuing system selected, please try again') print_answer('Parallel job will submit for %s' % cfg['qsys']) print_header('POSTPROCESSING') print_question('Which variables should wrfxpy postprocess? [default T2,PSFC,WINDSPD,WINDVEC,FIRE_AREA,FGRNHFX,FLINEINT,SMOKE_INT]') pp_vars = read_string('T2,PSFC,WINDSPD,WINDVEC,FIRE_AREA,FGRNHFX,FLINEINT,SMOKE_INT').split(',') print_answer('Will postprocess %d variables.' % len(pp_vars)) print_question('Send variables to visualization server? [default=no]') shuttle = read_boolean('no') desc = '' if shuttle: print_question('Enter a short description of your job [default=experimental run]') desc = read_string('experimental run') cfg['postproc'] = { '1' : pp_vars } if shuttle: cfg['postproc']['shuttle'] = 'incremental' cfg['postproc']['description'] = desc return cfg
def postprocess_cycle(cycle, region_cfg, wksp_path): """ Build rasters from the computed fuel moisture. :param cycle: the UTC cycle time :param region_cfg: the region configuration :param wksp_path: the workspace path :return: the postprocessing path """ model_path = compute_model_path(cycle, region_cfg.code, wksp_path) year_month = '%04d%02d' % (cycle.year, cycle.month) cycle_dir = 'fmda-%s-%04d%02d%02d-%02d' % (region_cfg.code, cycle.year, cycle.month, cycle.day, cycle.hour) postproc_path = osp.join(wksp_path, year_month, cycle_dir) # read in the longitudes and latitudes geo_path = osp.join(wksp_path, '%s-geo.nc' % region_cfg.code) logging.info('CYCLER reading longitudes and latitudes from NetCDF file %s' % geo_path ) d = netCDF4.Dataset(geo_path) lats = d.variables['XLAT'][:,:] lons = d.variables['XLONG'][:,:] d.close() var_wisdom = { 'fm' : { 'native_unit' : '-', 'colorbar' : '-', 'colormap' : 'jet_r', 'scale' : [0.0, 0.4] }, 'EQUILd FM' : { 'name' : 'Drying equilibrium FM', 'native_unit' : '-', 'colorbar' : 'i-', 'colormap' : 'jet_r', 'scale' : [0.0, 0.4] }, 'EQUILw FM' : { 'name' : 'Wetting equilibrium FM', 'native_unit' : '-', 'colorbar' : 'i-', 'colormap' : 'jet_r', 'scale' : [0.0, 0.4] }, 'RH' : { 'name' : 'Relative humidity', 'native_unit' : '%', 'colorbar' : '%', 'colormap' : 'jet_r', 'scale' : [0.0, 100.0] }, 'TD' : { 'name' : 'Dew point temperature at 2m', 'native_unit' : 'K', 'colorbar' : 'F', 'colormap' : 'jet', 'scale' : [270.0, 320.0] }, 'T2' : { 'name' : 'Temperature at 2m', 'native_unit' : 'K', 'colorbar' : 'F', 'colormap' : 'jet', 'scale' : [270.0, 320.0] }, 'PRECIPA' : { 'name' : 'RTMA precipa', 'native_unit' : 'kg/m^2/h', 'colorbar' : 'kg/m^2/h', 'colormap' : 'jet_r', 'scale' : [0.0, 2.0] }, 'PRECIP' : { 'name' : 'Precipitation', 'native_unit' : 'mm/h', 'colorbar' : 'mm/h', 'colormap' : 'jet_r', 'scale' : [0.0, 2.0] }, 'HGT' : { 'name' : 'Terrain height', 'native_unit' : 'm', 'colorbar' : 'm', 'colormap' : 'jet_r', 'scale' : [-86.0, 4500.0] }, } show = ['TD','PRECIPA','T2','HGT','PRECIP','RH','EQUILd FM','EQUILw FM'] show = ['T2','HGT','PRECIP','RH'] esmf_cycle = utc_to_esmf(cycle) mf = { "1" : {esmf_cycle : {}}} manifest_name = 'fmda-%s-%04d%02d%02d-%02d.json' % (region_cfg.code, cycle.year, cycle.month, cycle.day, cycle.hour) ensure_dir(osp.join(postproc_path, manifest_name)) # read and process model variables with netCDF4.Dataset(model_path) as d: for i,name in [(0, '1-hr FM'), (1, '10-hr FM'), (2, '100-hr FM')]: fm_wisdom = var_wisdom['fm'] fm_wisdom['name'] = '%s fuel moisture' % name raster_png, coords, cb_png = scalar_field_to_raster(d.variables['FMC_GC'][:,:,i], lats, lons, fm_wisdom) raster_name = 'fmda-%s-raster.png' % name cb_name = 'fmda-%s-raster-cb.png' % name with open(osp.join(postproc_path, raster_name), 'wb') as f: f.write(raster_png) with open(osp.join(postproc_path, cb_name), 'wb') as f: f.write(cb_png) mf["1"][esmf_cycle][name] = { 'raster' : raster_name, 'coords' : coords, 'colorbar' : cb_name } for name in show: raster_png, coords, cb_png = scalar_field_to_raster(d.variables[name][:,:], lats, lons, var_wisdom[name]) raster_name = 'fmda-%s-raster.png' % name cb_name = 'fmda-%s-raster-cb.png' % name with open(osp.join(postproc_path, raster_name), 'wb') as f: f.write(raster_png) with open(osp.join(postproc_path, cb_name), 'wb') as f: f.write(cb_png) mf["1"][esmf_cycle][name] = { 'raster' : raster_name, 'coords' : coords, 'colorbar' : cb_name } logging.info('writing manifest file %s' % osp.join(postproc_path, manifest_name) ) json.dump(mf, open(osp.join(postproc_path, manifest_name), 'w'), indent=1, separators=(',',':')) #logging.info(json.dumps(mf, indent=1, separators=(',',':'))) logging.info(json.dumps(mf)) return postproc_path