def do_L1_batch(main_ui, cf_level): for i in list(cf_level.keys()): # check the stop flag if main_ui.stop_flag: # break out of the loop if user requested stop break cf_file_name = os.path.split(cf_level[i]) msg = "Starting L1 processing with " + cf_file_name[1] logger.info(msg) if not check_file_exits(cf_level[i]): return 0 try: cf_l1 = pfp_io.get_controlfilecontents(cf_level[i]) if not pfp_compliance.l1_update_controlfile(cf_l1): continue ds1 = pfp_levels.l1qc(cf_l1) outfilename = pfp_io.get_outfilenamefromcf(cf_l1) pfp_io.NetCDFWrite(outfilename, ds1) msg = "Finished L1 processing with " + cf_file_name[1] logger.info(msg) logger.info("") except Exception: msg = "Error occurred during L1 processing " + cf_file_name[1] logger.error(msg) error_message = traceback.format_exc() logger.error(error_message) continue return 1
def do_climatology_batch(main_ui, cf_level): for i in list(cf_level.keys()): # check the stop flag if main_ui.stop_flag: # break out of the loop if user requested stop break cf_file_name = os.path.split(cf_level[i]) msg = "Starting climatology with " + cf_file_name[1] logger.info(msg) if not check_file_exits(cf_level[i]): return 0 try: cf_ct = pfp_io.get_controlfilecontents(cf_level[i]) if not pfp_compliance.climatology_update_controlfile(cf_ct): continue pfp_clim.climatology(cf_ct) msg = "Finished climatology with " + cf_file_name[1] logger.info(msg) logger.info("") except Exception: msg = "Error occurred during climatology with " + cf_file_name[1] logger.error(msg) error_message = traceback.format_exc() logger.error(error_message) continue return 1
def do_mpt_batch(main_ui, cf_level): for i in list(cf_level.keys()): # check the stop flag if main_ui.stop_flag: # break out of the loop if user requested stop break cf_file_name = os.path.split(cf_level[i]) msg = "Starting MPT with " + cf_file_name[1] logger.info(msg) if not check_file_exits(cf_level[i]): return 0 try: cf = pfp_io.get_controlfilecontents(cf_level[i]) if not pfp_compliance.mpt_update_controlfile(cf): continue if "Options" not in cf: cf["Options"] = {} cf["Options"]["call_mode"] = "batch" cf["Options"]["show_plots"] = "No" pfp_mpt.mpt_main(cf) msg = "Finished MPT with " + cf_file_name[1] logger.info(msg) logger.info("") except Exception: msg = "Error occurred during MPT with " + cf_file_name[1] logger.error(msg) error_message = traceback.format_exc() logger.error(error_message) continue return 1
def do_L2_batch(main_ui, cf_level): for i in list(cf_level.keys()): # check the stop flag if main_ui.stop_flag: # break out of the loop if user requested stop break cf_file_name = os.path.split(cf_level[i]) msg = "Starting L2 processing with " + cf_file_name[1] logger.info(msg) if not check_file_exits(cf_level[i]): return 0 try: cf_l2 = pfp_io.get_controlfilecontents(cf_level[i]) if not pfp_compliance.l2_update_controlfile(cf_l2): continue if "Options" not in cf_l2: cf_l2["Options"] = {} cf_l2["Options"]["call_mode"] = "batch" cf_l2["Options"]["show_plots"] = "No" infilename = pfp_io.get_infilenamefromcf(cf_l2) ds1 = pfp_io.NetCDFRead(infilename) if ds1.info["returncodes"]["value"] != 0: return ds2 = pfp_levels.l2qc(cf_l2, ds1) outfilename = pfp_io.get_outfilenamefromcf(cf_l2) pfp_io.NetCDFWrite(outfilename, ds2) msg = "Finished L2 processing with " + cf_file_name[1] logger.info(msg) if "Plots" in list(cf_l2.keys()): logger.info("Plotting L1 and L2 data") for nFig in list(cf_l2['Plots'].keys()): if "(disabled)" in nFig: continue plt_cf = cf_l2['Plots'][str(nFig)] if 'type' in plt_cf.keys(): if str(plt_cf['type']).lower() == 'xy': pfp_plot.plotxy(cf_l2, nFig, plt_cf, ds1, ds2) else: pfp_plot.plottimeseries(cf_l2, nFig, ds1, ds2) else: pfp_plot.plottimeseries(cf_l2, nFig, ds1, ds2) logger.info("Finished plotting L1 and L2 data") logger.info("") except Exception: msg = "Error occurred during L2 processing " + cf_file_name[1] logger.error(msg) error_message = traceback.format_exc() logger.error(error_message) continue return 1
def do_reddyproc_batch(main_ui, cf_level): for i in list(cf_level.keys()): # check the stop flag if main_ui.stop_flag: # break out of the loop if user requested stop break cf_file_name = os.path.split(cf_level[i]) msg = "Starting REddyProc output with " + cf_file_name[1] logger.info(msg) if not check_file_exits(cf_level[i]): return 0 cf = pfp_io.get_controlfilecontents(cf_level[i]) pfp_io.write_tsv_reddyproc(cf) msg = "Finished REddyProc output with " + cf_file_name[1] logger.info(msg) logger.info("") return 1
def do_batch_fingerprints(cfg): """ Purpose: Plot fingerprints at the end of conatenation, L4 and L5. Author: PRI Date: Back in the day """ cfg_fp_uri = os.path.join("controlfiles", "standard", "fingerprint.txt") cfg_fp = pfp_io.get_controlfilecontents(cfg_fp_uri) file_name = pfp_io.get_outfilenamefromcf(cfg) file_path = os.path.join(os.path.split(file_name)[0], "") plot_path = pfp_utils.get_keyvaluefromcf(cfg, ["Files"], "plot_path", default="plots/") cfg_fp["Files"] = {"file_path": file_path, "in_filename": os.path.split(file_name)[1], "plot_path": plot_path} cfg_fp["Options"] = {"call_mode": "batch", "show_plots": "No"} msg = "Doing fingerprint plots using " + cfg_fp["Files"]["in_filename"] logger.info(msg) pfp_plot.plot_fingerprint(cfg_fp) logger.info("Finished fingerprint plots") return
def do_L5_batch(main_ui, cf_level): sites = sorted(list(cf_level.keys()), key=int) for i in sites: # check the stop flag if main_ui.stop_flag: # break out of the loop if user requested stop break cf_file_name = os.path.split(cf_level[i]) msg = "Starting L5 processing with " + cf_file_name[1] logger.info(msg) if not check_file_exits(cf_level[i]): return 0 try: cf_l5 = pfp_io.get_controlfilecontents(cf_level[i]) if not pfp_compliance.l5_update_controlfile(cf_l5): continue if "Options" not in cf_l5: cf_l5["Options"] = {} cf_l5["Options"]["call_mode"] = "batch" cf_l5["Options"]["show_plots"] = "No" infilename = pfp_io.get_infilenamefromcf(cf_l5) ds4 = pfp_io.NetCDFRead(infilename) if ds4.info["returncodes"]["value"] != 0: return ds5 = pfp_levels.l5qc(None, cf_l5, ds4) outfilename = pfp_io.get_outfilenamefromcf(cf_l5) pfp_io.NetCDFWrite(outfilename, ds5) msg = "Finished L5 processing with " + cf_file_name[1] logger.info(msg) # do the CF compliance check #do_batch_cfcheck(cf_l5) # plot the L5 fingerprints do_batch_fingerprints(cf_l5) logger.info("") except Exception: msg = "Error occurred during L5 with " + cf_file_name[1] logger.error(msg) error_message = traceback.format_exc() logger.error(error_message) continue return 1
def do_concatenate_batch(main_ui, cf_level): sites = sorted(list(cf_level.keys()), key=int) for i in sites: # check the stop flag if main_ui.stop_flag: # break out of the loop if user requested stop break cf_file_name = os.path.split(cf_level[i]) msg = "Starting concatenation with " + cf_file_name[1] logger.info(msg) if not check_file_exits(cf_level[i]): return 0 try: cf_cc = pfp_io.get_controlfilecontents(cf_level[i]) if not pfp_compliance.concatenate_update_controlfile(cf_cc): continue info = pfp_compliance.ParseConcatenateControlFile(cf_cc) if not info["NetCDFConcatenate"]["OK"]: msg = " Error occurred parsing the control file " + cf_file_name[1] logger.error(msg) continue pfp_io.NetCDFConcatenate(info) msg = "Finished concatenation with " + cf_file_name[1] logger.info(msg) # do the CF compliance check #do_batch_cfcheck(cf_cc) # and then plot the fingerprints for the concatenated files do_batch_fingerprints(cf_cc) logger.info("") except Exception: msg = "Error occurred during concatenation with " + cf_file_name[1] logger.error(msg) error_message = traceback.format_exc() logger.error(error_message) continue return 1
def do_ecostress_batch(main_ui, cf_level): for i in list(cf_level.keys()): # check the stop flag if main_ui.stop_flag: # break out of the loop if user requested stop break cf_file_name = os.path.split(cf_level[i]) msg = "Starting ECOSTRESS output with " + cf_file_name[1] logger.info(msg) if not check_file_exits(cf_level[i]): return 0 try: cf = pfp_io.get_controlfilecontents(cf_level[i]) pfp_io.write_csv_ecostress(cf) msg = "Finished ECOSTRESS output with " + cf_file_name[1] logger.info(msg) logger.info("") except Exception: msg = "Error occurred during ECOSTRESS output with " + cf_file_name[1] logger.error(msg) error_message = traceback.format_exc() logger.error(error_message) continue return 1
if len(sys.argv) == 1: # not on the command line, so ask the user cfg_file_path = input("Enter the control file name: ") # exit if nothing selected if len(cfg_file_path) == 0: sys.exit() else: # control file name on the command line if not os.path.exists(sys.argv[1]): # control file doesn't exist logger.error("Control file %s does not exist", sys.argv[1]) sys.exit() else: cfg_file_path = sys.argv[1] cfg = pfp_io.get_controlfilecontents(cfg_file_path, mode="verbose") xl_file_path = cfg["Files"]["xl_file_path"] xl_sheet_name = cfg["Files"]["xl_sheet_name"] isd_base_path = cfg["Files"]["isd_base_path"] out_base_path = cfg["Files"]["out_base_path"] # read the site master spreadsheet site_info = read_site_master(xl_file_path, xl_sheet_name) # get a list of sites site_list = list(site_info.keys()) # creat a dictionary to hold the ISD site time steps isd_time_steps = OrderedDict() for site in site_list: # construct the output file path fluxnet_id = site_info[site]["FluxNet ID"] if len(fluxnet_id) == 0:
"standard_name": "wind_from_direction", "long_name": "Wind direction", "units": "degrees", "statistic_type": "average" } pfp_utils.CreateVariable(ds, Ws) pfp_utils.CreateVariable(ds, Wd) return logger = logging.getLogger("pfp_log") # read the control file cfg_file_path = "process_access2nc.txt" msg = " Loading the control file" logger.info(msg) cfg = pfp_io.get_controlfilecontents(cfg_file_path) cfg_labels = [ l for l in list(cfg["Variables"].keys()) if "nc" in list(cfg["Variables"][l].keys()) ] # read the site master workbook site_info = read_site_master(cfg["Files"]["site_master_file_path"], cfg["Files"]["xl_sheet_name"]) sites = list(site_info.keys()) #sites = ["Calperum"] # build the information dictionary for concatenation concatenation_info = build_concatenation_dictionary(site_info, cfg) dt_utc = [] data = init_data(cfg, site_info) new_access_base_path = cfg["Files"]["new_access_base_path"]
def do_sites_batch_dispatcher(item): """ Purpose: This function loops over the control files for a given site listed in the batch control file, opens the control file to get the processing level and then calls the appropriate processing routine. Usage: Side effects: Author: PRI Date: April 2022 """ # local pointers # get the control files for this site cfg_site = item.cfg["Sites"][item.site] # loop over the control files for n in sorted(list(cfg_site.keys()), key=int): # get the control file name cfg_name = cfg_site[n] # get the control file contents cfg = pfp_io.get_controlfilecontents(cfg_name) # get the processing level level = str(cfg["level"]) # get the batch routine argument, this is a dictionary with a single # entry containing the name of the control file to process cf_level = {n: item.cfg["Sites"][item.site][n]} # call the batch routine based on the processing level if level.lower() == "l1": # L1 processing pfp_batch.do_L1_batch(item, cf_level) elif level.lower() == "l2": # L2 processing pfp_batch.do_L2_batch(item, cf_level) elif level.lower() == "l3": # L3 processing pfp_batch.do_L3_batch(item, cf_level) elif level.lower() == "concatenate": # concatenate netCDF files pfp_batch.do_concatenate_batch(item, cf_level) elif level.lower() == "climatology": # climatology pfp_batch.do_climatology_batch(item, cf_level) elif level.lower() == "cpd_barr": # ustar threshold from change point detection pfp_batch.do_cpd_barr_batch(item, cf_level) elif level.lower() == "cpd_mchugh": # ustar threshold from change point detection pfp_batch.do_cpd_mchugh_batch(item, cf_level) elif level.lower() == "cpd_mcnew": # ustar threshold from change point detection pfp_batch.do_cpd_mcnew_batch(item, cf_level) elif level.lower() == "mpt": # ustar threshold from change point detection pfp_batch.do_mpt_batch(item, cf_level) elif level.lower() == "l4": # L4 processing pfp_batch.do_L4_batch(item, cf_level) elif level.lower() == "l5": # L5 processing pfp_batch.do_L5_batch(item, cf_level) elif level.lower() == "l6": # L6 processing pfp_batch.do_L6_batch(item, cf_level) else: msg = " Unrecognised batch processing level " + str(level) logger.error(msg)