예제 #1
0
def do_L3_batch(cf_level):
    #logger = pfp_log.change_logger_filename("pfp_log", "L3")
    for i in list(cf_level.keys()):
        cf_file_name = os.path.split(cf_level[i])
        msg = "Starting L3 processing with " + cf_file_name[1]
        logger.info(msg)
        try:
            cf = pfp_io.get_controlfilecontents(cf_level[i])
            infilename = pfp_io.get_infilenamefromcf(cf)
            ds2 = pfp_io.nc_read_series(infilename)
            if ds2.returncodes["value"] != 0: return
            ds3 = pfp_levels.l3qc(cf, ds2)
            outfilename = pfp_io.get_outfilenamefromcf(cf)
            nc_file = pfp_io.nc_open_write(outfilename)
            if nc_file is None: return
            pfp_io.nc_write_series(nc_file, ds3)
            msg = "Finished L3 processing with " + cf_file_name[1]
            logger.info(msg)
            logger.info("")
        except Exception:
            msg = "Error occurred during L3 processing " + cf_file_name[1]
            logger.error(msg)
            error_message = traceback.format_exc()
            logger.error(error_message)
            continue
    return
예제 #2
0
def do_plot_fcvsustar():
    """
    Purpose:
     Plot Fc versus u*.
    Usage:
     pfp_top_level.do_plot_fcvsustar()
    Side effects:
     Annual and seasonal plots of Fc versus u* to the screen and creates .PNG
     hardcopies of the plots.
    Author: PRI
    Date: Back in the day
    Mods:
     December 2017: rewrite for use with new GUI
    """
    logger.info("Starting Fc versus u* plots")
    try:
        file_path = pfp_io.get_filename_dialog(file_path="../Sites",
                                               title="Choose a netCDF file")
        if len(file_path) == 0 or not os.path.isfile(file_path):
            return
        # read the netCDF file
        ds = pfp_io.nc_read_series(file_path)
        if ds.returncodes["value"] != 0: return
        logger.info("Plotting Fc versus u* ...")
        pfp_plot.plot_fcvsustar(ds)
        logger.info(" Finished plotting Fc versus u*")
        logger.info("")
    except Exception:
        error_message = " An error occured while plotting Fc versus u*, see below for details ..."
        logger.error(error_message)
        error_message = traceback.format_exc()
        logger.error(error_message)
    return
예제 #3
0
def do_audit_analysis(base_path):
    sites = sorted(os.listdir(base_path))
    for item in sites:
        if not os.path.isdir(os.path.join(base_path, item)):
            sites.remove(item)

    site_info = OrderedDict()
    all_sites = {"start_date":datetime.datetime(3000,1,1,0,0),
                 "end_date": datetime.datetime(2000,1,1,0,0)}
    n = 0
    for site in sites:
        #portal_dir = os.path.join(base_path, site, "Data", "Processed")
        portal_dir = os.path.join(base_path, site, "Data", "All")
        file_mask = os.path.join(portal_dir, "*.nc")
        files = sorted(glob.glob(file_mask))
        l3_name = os.path.join(portal_dir, site + "_L3.nc")
        if os.path.isfile(l3_name):
            print("Processing ", site)
            site_info[site] = {"file_name":l3_name}
            ds = pfp_io.nc_read_series(l3_name)
            site_info[site]["site_name"] = ds.globalattributes["site_name"]
            start_date = dateutil.parser.parse(ds.globalattributes["start_date"])
            site_info[site]["start_date"] = start_date
            end_date = dateutil.parser.parse(ds.globalattributes["end_date"])
            site_info[site]["end_date"] = end_date
            site_info[site]["X"] = numpy.array([start_date, end_date])
            site_info[site]["Y"] = numpy.array([n+1, n+1])
            n = n + 1
            all_sites["start_date"] = min([all_sites["start_date"], site_info[site]["start_date"]])
            all_sites["end_date"] = max([all_sites["end_date"], site_info[site]["end_date"]])

    with open('audit_analysis.pickle', 'wb') as handle:
        pickle.dump([all_sites, site_info], handle, protocol=pickle.HIGHEST_PROTOCOL)

    return all_sites, site_info
예제 #4
0
def do_L6_batch(cf_level):
    logger = pfp_log.change_logger_filename("pfp_log", "L6")
    for i in cf_level.keys():
        if not os.path.isfile(cf_level[i]):
            msg = " Control file " + cf_level[i] + " not found"
            logger.error(msg)
            continue
        cf_file_name = os.path.split(cf_level[i])
        msg = "Starting L6 processing with " + cf_file_name[1]
        logger.info(msg)
        try:
            cf = pfp_io.get_controlfilecontents(cf_level[i])
            if "Options" not in cf:
                cf["Options"] = {}
            cf["Options"]["call_mode"] = "batch"
            cf["Options"]["show_plots"] = "No"
            infilename = pfp_io.get_infilenamefromcf(cf)
            ds5 = pfp_io.nc_read_series(infilename)
            ds6 = pfp_levels.l6qc(None, cf, ds5)
            outfilename = pfp_io.get_outfilenamefromcf(cf)
            ncFile = pfp_io.nc_open_write(outfilename)
            pfp_io.nc_write_series(ncFile, ds6)
            msg = "Finished L6 processing with " + cf_file_name[1]
            logger.info(msg)
            logger.info("")
        except Exception:
            msg = "Error occurred during L6 with " + cf_file_name[1]
            logger.error(msg)
            error_message = traceback.format_exc()
            logger.error(error_message)
            continue
    return
예제 #5
0
def ImportSeries(cf,ds):
    # check to see if there is an Imports section
    if "Imports" not in cf.keys(): return
    # number of records
    nRecs = int(ds.globalattributes["nc_nrecs"])
    # get the start and end datetime
    ldt = ds.series["DateTime"]["Data"]
    start_date = ldt[0]
    end_date = ldt[-1]
    # loop over the series in the Imports section
    for label in cf["Imports"].keys():
        import_filename = pfp_utils.get_keyvaluefromcf(cf,["Imports",label],"file_name",default="")
        if import_filename=="":
            msg = " ImportSeries: import filename not found in control file, skipping ..."
            logger.warning(msg)
            continue
        var_name = pfp_utils.get_keyvaluefromcf(cf,["Imports",label],"var_name",default="")
        if var_name=="":
            msg = " ImportSeries: variable name not found in control file, skipping ..."
            logger.warning(msg)
            continue
        ds_import = pfp_io.nc_read_series(import_filename)
        ts_import = ds_import.globalattributes["time_step"]
        ldt_import = ds_import.series["DateTime"]["Data"]
        si = pfp_utils.GetDateIndex(ldt_import,str(start_date),ts=ts_import,default=0,match="exact")
        ei = pfp_utils.GetDateIndex(ldt_import,str(end_date),ts=ts_import,default=len(ldt_import)-1,match="exact")
        data = numpy.ma.ones(nRecs)*float(c.missing_value)
        flag = numpy.ma.ones(nRecs)
        data_import,flag_import,attr_import = pfp_utils.GetSeriesasMA(ds_import,var_name,si=si,ei=ei)
        ldt_import = ldt_import[si:ei+1]
        index = pfp_utils.FindIndicesOfBInA(ldt_import,ldt)
        data[index] = data_import
        flag[index] = flag_import
        pfp_utils.CreateSeries(ds,label,data,flag,attr_import)
예제 #6
0
def mpt_main(cf):
    base_file_path = cf["Files"]["file_path"]
    nc_file_name = cf["Files"]["in_filename"]
    nc_file_path = os.path.join(base_file_path, nc_file_name)
    ds = pfp_io.nc_read_series(nc_file_path)
    out_file_paths = run_mpt_code(ds, nc_file_name)
    ustar_results = read_mpt_output(out_file_paths)
    mpt_file_path = nc_file_path.replace(".nc", "_MPT.xls")
    xl_write_mpt(mpt_file_path, ustar_results)
    return
예제 #7
0
def do_L5_batch(cf_level):
    #logger = pfp_log.change_logger_filename("pfp_log", "L5")
    for i in list(cf_level.keys()):
        if not os.path.isfile(cf_level[i]):
            msg = " Control file " + cf_level[i] + " not found"
            logger.error(msg)
            continue
        cf_file_name = os.path.split(cf_level[i])
        msg = "Starting L5 processing with " + cf_file_name[1]
        logger.info(msg)
        try:
            cf_l5 = pfp_io.get_controlfilecontents(cf_level[i])
            if "Options" not in cf_l5:
                cf_l5["Options"] = {}
            cf_l5["Options"]["call_mode"] = "batch"
            cf_l5["Options"]["show_plots"] = "No"
            infilename = pfp_io.get_infilenamefromcf(cf_l5)
            ds4 = pfp_io.nc_read_series(infilename)
            if ds4.returncodes["value"] != 0: return
            ds5 = pfp_levels.l5qc(None, cf_l5, ds4)
            outfilename = pfp_io.get_outfilenamefromcf(cf_l5)
            nc_file = pfp_io.nc_open_write(outfilename)
            if nc_file is None: return
            pfp_io.nc_write_series(nc_file, ds5)
            msg = "Finished L5 processing with " + cf_file_name[1]
            logger.info(msg)
            # now plot the fingerprints for the L5 files
            cf_fp = pfp_io.get_controlfilecontents("controlfiles/standard/fingerprint.txt")
            if "Files" not in dir(cf_fp):
                cf_fp["Files"] = {}
            file_name = pfp_io.get_outfilenamefromcf(cf_l5)
            file_path = ntpath.split(file_name)[0] + "/"
            cf_fp["Files"]["file_path"] = file_path
            cf_fp["Files"]["in_filename"] = ntpath.split(file_name)[1]
            if "plot_path" in cf_l5["Files"]:
                cf_fp["Files"]["plot_path"] = cf_l5["Files"]["plot_path"]
            else:
                cf_fp["Files"]["plot_path"] = file_path[:file_path.index("Data")] + "Plots/"
            if "Options" not in cf_fp:
                cf_fp["Options"] = {}
            cf_fp["Options"]["call_mode"] = "batch"
            cf_fp["Options"]["show_plots"] = "No"
            msg = "Doing fingerprint plots using " + cf_fp["Files"]["in_filename"]
            logger.info(msg)
            pfp_plot.plot_fingerprint(cf_fp)
            msg = "Finished fingerprint plots"
            logger.info(msg)
            logger.info("")
        except Exception:
            msg = "Error occurred during L5 with " + cf_file_name[1]
            logger.error(msg)
            error_message = traceback.format_exc()
            logger.error(error_message)
            continue
    return
예제 #8
0
def do_run_l3(cfg=None):
    """
    Purpose:
     Top level routine for running the L23 post-processing.
    Usage:
     pfp_top_level.do_l3()
    Side effects:
     Creates an L3 netCDF file.
    Author: PRI
    Date: Back in the day
    Mods:
     December 2017: rewrite for use with new GUI
    """
    try:
        logger.info("Starting L3 processing")
        if not cfg:
            cfg = pfp_io.load_controlfile()
            if len(cfg) == 0:
                logger.info("Quiting L3 processing (no control file)")
                return
        in_filepath = pfp_io.get_infilenamefromcf(cfg)
        if not pfp_utils.file_exists(in_filepath):
            in_filename = os.path.split(in_filepath)
            logger.error("File " + in_filename[1] + " not found")
            return
        ds2 = pfp_io.nc_read_series(in_filepath)
        ds3 = pfp_levels.l3qc(cfg, ds2)
        if ds3.returncodes["value"] != 0:
            logger.error("An error occurred during L3 processing")
            logger.error("")
            return
        out_filepath = pfp_io.get_outfilenamefromcf(cfg)
        nc_file = pfp_io.nc_open_write(out_filepath)
        pfp_io.nc_write_series(nc_file, ds3)
        logger.info("Finished L3 processing")
        if "Plots" in list(cfg.keys()):
            logger.info("Plotting L3 data")
            for nFig in cfg['Plots'].keys():
                plt_cf = cfg['Plots'][str(nFig)]
                if 'Type' in plt_cf.keys():
                    if str(plt_cf['Type']).lower() == 'xy':
                        pfp_plot.plotxy(cfg, nFig, plt_cf, ds2, ds3)
                    else:
                        pfp_plot.plottimeseries(cfg, nFig, ds2, ds3)
                else:
                    pfp_plot.plottimeseries(cfg, nFig, ds2, ds3)
            logger.info("Finished plotting L3 data")
    except Exception:
        msg = " Error running L3, see below for details ..."
        logger.error(msg)
        error_message = traceback.format_exc()
        logger.error(error_message)
    logger.info("")
    return
예제 #9
0
def do_run_l5(main_gui, cfg):
    """
    Purpose:
     Top level routine for running the L5 gap filling.
    Usage:
     pfp_top_level.do_run_l5()
    Side effects:
     Creates an L5 netCDF file with gap filled meteorology.
    Author: PRI
    Date: Back in the day
    Mods:
     December 2017: rewrite for use with new GUI
    """
    try:
        logger.info("Starting L5 processing")
        in_filepath = pfp_io.get_infilenamefromcf(cfg)
        if not pfp_utils.file_exists(in_filepath):
            in_filename = os.path.split(in_filepath)
            logger.error("File " + in_filename[1] + " not found")
            return
        ds4 = pfp_io.nc_read_series(in_filepath)
        if ds4.returncodes["value"] != 0: return
        #ds4.globalattributes['controlfile_name'] = cfg['controlfile_name']
        sitename = ds4.globalattributes['site_name']
        if "Options" not in cfg:
            cfg["Options"] = {}
        cfg["Options"]["call_mode"] = "interactive"
        ds5 = pfp_levels.l5qc(main_gui, cfg, ds4)
        # check to see if all went well
        if ds5.returncodes["value"] != 0:
            # tell the user something went wrong
            logger.info("Quitting L5: " + sitename)
            # delete the output file if it exists
            out_filepath = pfp_io.get_outfilenamefromcf(cfg)
            if os.path.isfile(out_filepath):
                os.remove(out_filepath)
        else:
            # tell the user we are finished
            logger.info("Finished L5: " + sitename)
            # get the output file name from the control file
            out_filepath = pfp_io.get_outfilenamefromcf(cfg)
            # open it for writing
            nc_file = pfp_io.nc_open_write(out_filepath)
            if nc_file is None: return
            # write the output file
            pfp_io.nc_write_series(nc_file, ds5)
            logger.info("Finished saving L5 gap filled data")
        logger.info("")
    except Exception:
        msg = " Error running L5, see below for details ..."
        logger.error(msg)
        error_message = traceback.format_exc()
        logger.error(error_message)
    return
예제 #10
0
def do_L4_batch(cf_level):
    for i in cf_level.keys():
        if not os.path.isfile(cf_level[i]):
            msg = " Control file " + cf_level[i] + " not found"
            logger.error(msg)
            continue
        cf_file_name = os.path.split(cf_level[i])
        msg = "Starting L4 processing with " + cf_file_name[1]
        logger.info(msg)
        try:
            cf_l4 = pfp_io.get_controlfilecontents(cf_level[i])
            if "Options" not in cf_l4:
                cf_l4["Options"] = {}
            cf_l4["Options"]["call_mode"] = "batch"
            cf_l4["Options"]["show_plots"] = "No"
            infilename = pfp_io.get_infilenamefromcf(cf_l4)
            ds3 = pfp_io.nc_read_series(infilename)
            ds4 = pfp_levels.l4qc(None, cf_l4, ds3)
            outfilename = pfp_io.get_outfilenamefromcf(cf_l4)
            outputlist = pfp_io.get_outputlistfromcf(cf_l4, "nc")
            ncFile = pfp_io.nc_open_write(outfilename)
            pfp_io.nc_write_series(ncFile, ds4, outputlist=outputlist)
            msg = "Finished L4 processing with " + cf_file_name[1]
            logger.info(msg)
            # now plot the fingerprints for the L4 files
            cf_fp = pfp_io.get_controlfilecontents("controlfiles/standard/fingerprint.txt")
            if "Files" not in dir(cf_fp):
                cf_fp["Files"] = {}
            file_name = pfp_io.get_outfilenamefromcf(cf_l4)
            file_path = ntpath.split(file_name)[0] + "/"
            cf_fp["Files"]["file_path"] = file_path
            cf_fp["Files"]["in_filename"] = ntpath.split(file_name)[1]
            if "plot_path" in cf_l4["Files"]:
                cf_fp["Files"]["plot_path"] = cf_l4["Files"]["plot_path"]
            else:
                cf_fp["Files"]["plot_path"] = file_path[:file_path.index("Data")] + "Plots/"
            if "Options" not in cf_fp:
                cf_fp["Options"] = {}
            cf_fp["Options"]["call_mode"] = "batch"
            cf_fp["Options"]["show_plots"] = "No"
            msg = "Doing fingerprint plots using " + cf_fp["Files"]["in_filename"]
            logger.info(msg)
            pfp_plot.plot_fingerprint(cf_fp)
            logger.info("Finished fingerprint plots")
            logger.info("")
        except Exception:
            msg = "Error occurred during L4 with " + cf_file_name[1]
            logger.error(msg)
            error_message = traceback.format_exc()
            logger.error(error_message)
            continue
    return
예제 #11
0
def do_run_l6(main_gui, cfg=None):
    """
    Purpose:
     Top level routine for running the L6 gap filling.
    Usage:
     pfp_top_level.do_run_l6()
    Side effects:
     Creates an L6 netCDF file with NEE partitioned into GPP and ER.
    Author: PRI
    Date: Back in the day
    Mods:
     December 2017: rewrite for use with new GUI
    """
    try:
        logger.info("Starting L6 processing")
        if not cfg:
            cfg = pfp_io.load_controlfile(path='controlfiles')
            if len(cfg) == 0:
                logger.info("Quiting L6 processing (no control file)")
                return
        in_filepath = pfp_io.get_infilenamefromcf(cfg)
        if not pfp_utils.file_exists(in_filepath):
            in_filename = os.path.split(in_filepath)
            logger.error("File " + in_filename[1] + " not found")
            return
        ds5 = pfp_io.nc_read_series(in_filepath)
        #ds5.globalattributes['controlfile_name'] = cfg['controlfile_name']
        sitename = ds5.globalattributes['site_name']
        if "Options" not in cfg:
            cfg["Options"] = {}
        cfg["Options"]["call_mode"] = "interactive"
        cfg["Options"]["show_plots"] = "Yes"
        ds6 = pfp_levels.l6qc(main_gui, cfg, ds5)
        if ds6.returncodes["value"] != 0:
            logger.info("Quitting L6: " + sitename)
        else:
            logger.info("Finished L6: " + sitename)
            out_filepath = pfp_io.get_outfilenamefromcf(cfg)
            nc_file = pfp_io.nc_open_write(out_filepath)
            pfp_io.nc_write_series(nc_file, ds6)
            logger.info("Finished saving L6 gap filled data")
        logger.info("")
    except Exception:
        msg = " Error running L6, see below for details ..."
        logger.error(msg)
        error_message = traceback.format_exc()
        logger.error(error_message)
    return
예제 #12
0
def nc_update(cfg):
    """
    Purpose:
     Update a PFP-style netCDF file by changing variable names and attributes.
    Usage:
    Author: PRI
    Date: October 2018
    """
    nc_file_path = pfp_io.get_infilenamefromcf(cfg)
    ds = pfp_io.nc_read_series(nc_file_path)
    change_variable_names(cfg, ds)
    copy_ws_wd(ds)
    remove_variables(cfg, ds)
    change_global_attributes(cfg, ds)
    nc_file = pfp_io.nc_open_write(nc_file_path)
    pfp_io.nc_write_series(nc_file, ds)
    return 0
예제 #13
0
def compare_eddypro():
    epname = pfp_io.get_filename_dialog(title='Choose an EddyPro full output file')
    ofname = pfp_io.get_filename_dialog(title='Choose an L3 output file')

    ds_ep = pfp_io.read_eddypro_full(epname)
    ds_of = pfp_io.nc_read_series(ofname)

    dt_ep = ds_ep.series['DateTime']['Data']
    dt_of = ds_of.series['DateTime']['Data']

    start_datetime = max([dt_ep[0],dt_of[0]])
    end_datetime = min([dt_ep[-1],dt_of[-1]])

    si_of = pfp_utils.GetDateIndex(dt_of, str(start_datetime), ts=30, default=0, match='exact')
    ei_of = pfp_utils.GetDateIndex(dt_of, str(end_datetime), ts=30, default=len(dt_of), match='exact')
    si_ep = pfp_utils.GetDateIndex(dt_ep, str(start_datetime), ts=30, default=0, match='exact')
    ei_ep = pfp_utils.GetDateIndex(dt_ep, str(end_datetime), ts=30, default=len(dt_ep), match='exact')

    us_of = pfp_utils.GetVariable(ds_of,'ustar',start=si_of,end=ei_of)
    us_ep = pfp_utils.GetVariable(ds_ep,'ustar',start=si_ep,end=ei_ep)
    Fh_of = pfp_utils.GetVariable(ds_of,'Fh',start=si_of,end=ei_of)
    Fh_ep = pfp_utils.GetVariable(ds_ep,'Fh',start=si_ep,end=ei_ep)
    Fe_of = pfp_utils.GetVariable(ds_of,'Fe',start=si_of,end=ei_of)
    Fe_ep = pfp_utils.GetVariable(ds_ep,'Fe',start=si_ep,end=ei_ep)
    Fc_of = pfp_utils.GetVariable(ds_of,'Fc',start=si_of,end=ei_of)
    Fc_ep = pfp_utils.GetVariable(ds_ep,'Fc',start=si_ep,end=ei_ep)
    # copy the range check values from the OFQC attributes to the EP attributes
    for of, ep in zip([us_of, Fh_of, Fe_of, Fc_of], [us_ep, Fh_ep, Fe_ep, Fc_ep]):
        for item in ["rangecheck_upper", "rangecheck_lower"]:
            if item in of["Attr"]:
                ep["Attr"][item] = of["Attr"][item]
    # apply QC to the EddyPro data
    pfp_ck.ApplyRangeCheckToVariable(us_ep)
    pfp_ck.ApplyRangeCheckToVariable(Fc_ep)
    pfp_ck.ApplyRangeCheckToVariable(Fe_ep)
    pfp_ck.ApplyRangeCheckToVariable(Fh_ep)
    # plot the comparison
    plt.ion()
    fig = plt.figure(1,figsize=(8,8))
    pfp_plot.xyplot(us_ep["Data"],us_of["Data"],sub=[2,2,1],regr=2,xlabel='u*_EP (m/s)',ylabel='u*_OF (m/s)')
    pfp_plot.xyplot(Fh_ep["Data"],Fh_of["Data"],sub=[2,2,2],regr=2,xlabel='Fh_EP (W/m2)',ylabel='Fh_OF (W/m2)')
    pfp_plot.xyplot(Fe_ep["Data"],Fe_of["Data"],sub=[2,2,3],regr=2,xlabel='Fe_EP (W/m2)',ylabel='Fe_OF (W/m2)')
    pfp_plot.xyplot(Fc_ep["Data"],Fc_of["Data"],sub=[2,2,4],regr=2,xlabel='Fc_EP (umol/m2/s)',ylabel='Fc_OF (umol/m2/s)')
    plt.tight_layout()
    plt.draw()
    plt.ioff()
예제 #14
0
def do_run_l4(main_gui, cfg):
    """
    Purpose:
     Top level routine for running the L4 gap filling.
    Usage:
     pfp_top_level.do_run_l4()
    Side effects:
     Creates an L4 netCDF file with gap filled meteorology.
    Author: PRI
    Date: Back in the day
    Mods:
     December 2017: rewrite for use with new GUI
    """
    try:
        logger.info("Starting L4 processing")
        in_filepath = pfp_io.get_infilenamefromcf(cfg)
        if not pfp_utils.file_exists(in_filepath):
            in_filename = os.path.split(in_filepath)
            logger.error("File " + in_filename[1] + " not found")
            return
        ds3 = pfp_io.nc_read_series(in_filepath)
        if ds3.returncodes["value"] != 0: return
        #ds3.globalattributes['controlfile_name'] = cfg['controlfile_name']
        sitename = ds3.globalattributes['site_name']
        if "Options" not in cfg:
            cfg["Options"] = {}
        cfg["Options"]["call_mode"] = "interactive"
        ds4 = pfp_levels.l4qc(main_gui, cfg, ds3)
        if ds4.returncodes["value"] != 0:
            logger.info("Quitting L4: " + sitename)
        else:
            logger.info("Finished L4: " + sitename)
            out_filepath = pfp_io.get_outfilenamefromcf(cfg)
            nc_file = pfp_io.nc_open_write(out_filepath)
            if nc_file is None: return
            pfp_io.nc_write_series(nc_file, ds4)  # save the L4 data
            logger.info("Finished saving L4 gap filled data")
        logger.info("")
    except Exception:
        msg = " Error running L4, see below for details ..."
        logger.error(msg)
        error_message = traceback.format_exc()
        logger.error(error_message)
    return
예제 #15
0
def do_L2_batch(cf_level):
    for i in cf_level.keys():
        cf_file_name = os.path.split(cf_level[i])
        msg = "Starting L2 processing with " + cf_file_name[1]
        logger.info(msg)
        try:
            cf = pfp_io.get_controlfilecontents(cf_level[i])
            infilename = pfp_io.get_infilenamefromcf(cf)
            ds1 = pfp_io.nc_read_series(infilename)
            ds2 = pfp_levels.l2qc(cf, ds1)
            outfilename = pfp_io.get_outfilenamefromcf(cf)
            ncFile = pfp_io.nc_open_write(outfilename)
            pfp_io.nc_write_series(ncFile, ds2)
            msg = "Finished L2 processing with " + cf_file_name[1]
            logger.info(msg)
            logger.info("")
        except Exception:
            msg = "Error occurred during L2 processing " + cf_file_name[1]
            logger.error(msg)
            error_message = traceback.format_exc()
            logger.error(error_message)
            continue
    return
예제 #16
0
def climatology(cf):
    nc_filename = pfp_io.get_infilenamefromcf(cf)
    if not pfp_utils.file_exists(nc_filename): return
    xl_filename = nc_filename.replace(".nc","_Climatology.xls")
    xlFile = xlwt.Workbook()
    ds = pfp_io.nc_read_series(nc_filename)
    # calculate Fa if it is not in the data structure
    got_Fa = True
    if "Fa" not in ds.series.keys():
        if "Fn" in ds.series.keys() and "Fg" in ds.series.keys():
            pfp_ts.CalculateAvailableEnergy(ds,Fa_out='Fa',Fn_in='Fn',Fg_in='Fg')
        else:
            got_Fa = False
            logger.warning(" Fn or Fg not in data struicture")
    # get the time step
    ts = int(ds.globalattributes['time_step'])
    # get the site name
    SiteName = ds.globalattributes['site_name']
    # get the datetime series
    dt = ds.series['DateTime']['Data']
    Hdh = numpy.array([(d.hour + d.minute/float(60)) for d in dt])
    Month = numpy.array([d.month for d in dt])
    # get the initial start and end dates
    StartDate = str(dt[0])
    EndDate = str(dt[-1])
    # find the start index of the first whole day (time=00:30)
    si = pfp_utils.GetDateIndex(dt,StartDate,ts=ts,default=0,match='startnextday')
    # find the end index of the last whole day (time=00:00)
    ei = pfp_utils.GetDateIndex(dt,EndDate,ts=ts,default=-1,match='endpreviousday')
    # get local views of the datetime series
    ldt = dt[si:ei+1]
    Hdh = Hdh[si:ei+1]
    Month = Month[si:ei+1]
    # get the number of time steps in a day and the number of days in the data
    ntsInDay = int(24.0*60.0/float(ts))
    nDays = int(len(ldt))/ntsInDay

    for ThisOne in cf['Variables'].keys():
        if "AltVarName" in cf['Variables'][ThisOne].keys(): ThisOne = cf['Variables'][ThisOne]["AltVarName"]
        if ThisOne in ds.series.keys():
            logger.info(" Doing climatology for "+ThisOne)
            data,f,a = pfp_utils.GetSeriesasMA(ds,ThisOne,si=si,ei=ei)
            if numpy.ma.count(data)==0:
                logger.warning(" No data for "+ThisOne+", skipping ...")
                continue
            fmt_str = get_formatstring(cf,ThisOne,fmt_def='')
            xlSheet = xlFile.add_sheet(ThisOne)
            Av_all = do_diurnalstats(Month,Hdh,data,xlSheet,format_string=fmt_str,ts=ts)
            # now do it for each day
            # we want to preserve any data that has been truncated by the use of the "startnextday"
            # and "endpreviousday" match options used above.  Here we revisit the start and end indices
            # and adjust these backwards and forwards respectively if data has been truncated.
            nDays_daily = nDays
            ei_daily = ei
            si_daily = si
            sdate = ldt[0]
            edate = ldt[-1]
            # is there data after the current end date?
            if dt[-1]>ldt[-1]:
                # if so, push the end index back by 1 day so it is included
                ei_daily = ei + ntsInDay
                nDays_daily = nDays_daily + 1
                edate = ldt[-1]+datetime.timedelta(days=1)
            # is there data before the current start date?
            if dt[0]<ldt[0]:
                # if so, push the start index back by 1 day so it is included
                si_daily = si - ntsInDay
                nDays_daily = nDays_daily + 1
                sdate = ldt[0]-datetime.timedelta(days=1)
            # get the data and use the "pad" option to add missing data if required to
            # complete the extra days
            data,f,a = pfp_utils.GetSeriesasMA(ds,ThisOne,si=si_daily,ei=ei_daily,mode="pad")
            data_daily = data.reshape(nDays_daily,ntsInDay)
            xlSheet = xlFile.add_sheet(ThisOne+'(day)')
            write_data_1columnpertimestep(xlSheet, data_daily, ts, startdate=sdate, format_string=fmt_str)
            data_daily_i = do_2dinterpolation(data_daily)
            xlSheet = xlFile.add_sheet(ThisOne+'i(day)')
            write_data_1columnpertimestep(xlSheet, data_daily_i, ts, startdate=sdate, format_string=fmt_str)
        else:
            logger.warning(" Requested variable "+ThisOne+" not in data structure")
            continue
    logger.info(" Saving Excel file "+os.path.split(xl_filename)[1])
    xlFile.save(xl_filename)
예제 #17
0
for site in sites:
    sp = os.path.join(rp, site, "Data", "Portal")
    op = os.path.join(rp, site, "Data", "Processed")
    if not os.path.isdir(sp):
        print(sp + " , skipping site ...")
        continue
    files = sorted([f for f in os.listdir(sp) if ("L3" in f and ".nc" in f)])
    if len(files) == 0:
        print("No files found in " + sp + " , skipping ...")
        continue
    for fn in files:
        ifp = os.path.join(sp, fn)
        print("Converting " + fn)
        cfg["Files"]["in_filename"] = ifp
        # read the input file
        ds1 = pfp_io.nc_read_series(ifp)
        # update the variable names
        change_variable_names(cfg, ds1)
        # make sure there are Ws and Wd series
        copy_ws_wd(ds1)
        # make sure we have all the variables we want ...
        ds2 = include_variables(cfg, ds1)
        # ... but not the ones we don't
        exclude_variables(cfg, ds2)
        # update the global attributes
        change_global_attributes(cfg, ds2)
        # update the variable attributes
        change_variable_attributes(cfg, ds2)
        # Fc single point storage
        consistent_Fc_storage(cfg, ds2, site)
        ofp = os.path.join(op, fn)
예제 #18
0

cfg_name = os.path.join("..", "controlfiles", "standard", "map_old_to_new.txt")
if os.path.exists(cfg_name):
    cfg = ConfigObj(cfg_name)
else:
    print " 'map_old_to_new' control file not found"

rp = os.path.join(os.sep, "mnt", "OzFlux", "Sites")
sites = sorted(
    [d for d in os.listdir(rp) if os.path.isdir(os.path.join(rp, d))])

for site in sites:
    sp = os.path.join(rp, site, "Data", "Portal")
    if not os.path.isdir(sp):
        print sp + " , skipping site ..."
        continue
    files = sorted([f for f in os.listdir(sp) if ("L3" in f and ".nc" in f)])
    if len(files) == 0:
        print "No files found in " + sp + " , skipping ..."
        continue
    for file in files:
        fp = os.path.join(sp, file)
        print "Converting " + file
        ds = pfp_io.nc_read_series(fp)
        change_variable_names(cfg, ds)
        copy_ws_wd(ds)
        remove_variables(cfg, ds)
        change_global_attributes(cfg, ds)
        nf = pfp_io.nc_open_write(fp)
        pfp_io.nc_write_series(nf, ds)
예제 #19
0
def climatology(cf):
    nc_filename = pfp_io.get_infilenamefromcf(cf)
    if not pfp_utils.file_exists(nc_filename): return
    xl_filename = nc_filename.replace(".nc", "_Climatology.xls")
    xlFile = xlwt.Workbook()
    ds = pfp_io.nc_read_series(nc_filename)
    # calculate Fa if it is not in the data structure
    got_Fa = True
    if "Fa" not in ds.series.keys():
        if "Fn" in ds.series.keys() and "Fg" in ds.series.keys():
            pfp_ts.CalculateAvailableEnergy(ds,
                                            Fa_out='Fa',
                                            Fn_in='Fn',
                                            Fg_in='Fg')
        else:
            got_Fa = False
            logger.warning(" Fn or Fg not in data struicture")
    # get the time step
    ts = int(ds.globalattributes['time_step'])
    # get the site name
    SiteName = ds.globalattributes['site_name']
    # get the datetime series
    dt = ds.series['DateTime']['Data']
    Hdh = numpy.array([(d.hour + d.minute / float(60)) for d in dt])
    Month = numpy.array([d.month for d in dt])
    # get the initial start and end dates
    StartDate = str(dt[0])
    EndDate = str(dt[-1])
    # find the start index of the first whole day (time=00:30)
    si = pfp_utils.GetDateIndex(dt,
                                StartDate,
                                ts=ts,
                                default=0,
                                match='startnextday')
    # find the end index of the last whole day (time=00:00)
    ei = pfp_utils.GetDateIndex(dt,
                                EndDate,
                                ts=ts,
                                default=-1,
                                match='endpreviousday')
    # get local views of the datetime series
    ldt = dt[si:ei + 1]
    Hdh = Hdh[si:ei + 1]
    Month = Month[si:ei + 1]
    # get the number of time steps in a day and the number of days in the data
    ntsInDay = int(24.0 * 60.0 / float(ts))
    nDays = int(len(ldt)) / ntsInDay

    for ThisOne in cf['Variables'].keys():
        if "AltVarName" in cf['Variables'][ThisOne].keys():
            ThisOne = cf['Variables'][ThisOne]["AltVarName"]
        if ThisOne in ds.series.keys():
            logger.info(" Doing climatology for " + ThisOne)
            data, f, a = pfp_utils.GetSeriesasMA(ds, ThisOne, si=si, ei=ei)
            if numpy.ma.count(data) == 0:
                logger.warning(" No data for " + ThisOne + ", skipping ...")
                continue
            fmt_str = get_formatstring(cf, ThisOne, fmt_def='')
            xlSheet = xlFile.add_sheet(ThisOne)
            Av_all = do_diurnalstats(Month,
                                     Hdh,
                                     data,
                                     xlSheet,
                                     format_string=fmt_str,
                                     ts=ts)
            # now do it for each day
            # we want to preserve any data that has been truncated by the use of the "startnextday"
            # and "endpreviousday" match options used above.  Here we revisit the start and end indices
            # and adjust these backwards and forwards respectively if data has been truncated.
            nDays_daily = nDays
            ei_daily = ei
            si_daily = si
            sdate = ldt[0]
            edate = ldt[-1]
            # is there data after the current end date?
            if dt[-1] > ldt[-1]:
                # if so, push the end index back by 1 day so it is included
                ei_daily = ei + ntsInDay
                nDays_daily = nDays_daily + 1
                edate = ldt[-1] + datetime.timedelta(days=1)
            # is there data before the current start date?
            if dt[0] < ldt[0]:
                # if so, push the start index back by 1 day so it is included
                si_daily = si - ntsInDay
                nDays_daily = nDays_daily + 1
                sdate = ldt[0] - datetime.timedelta(days=1)
            # get the data and use the "pad" option to add missing data if required to
            # complete the extra days
            data, f, a = pfp_utils.GetSeriesasMA(ds,
                                                 ThisOne,
                                                 si=si_daily,
                                                 ei=ei_daily,
                                                 mode="pad")
            data_daily = data.reshape(nDays_daily, ntsInDay)
            xlSheet = xlFile.add_sheet(ThisOne + '(day)')
            write_data_1columnpertimestep(xlSheet,
                                          data_daily,
                                          ts,
                                          startdate=sdate,
                                          format_string=fmt_str)
            data_daily_i = do_2dinterpolation(data_daily)
            xlSheet = xlFile.add_sheet(ThisOne + 'i(day)')
            write_data_1columnpertimestep(xlSheet,
                                          data_daily_i,
                                          ts,
                                          startdate=sdate,
                                          format_string=fmt_str)
        elif ThisOne == "EF" and got_Fa:
            logger.info(" Doing evaporative fraction")
            EF = numpy.ma.zeros([48, 12]) + float(c.missing_value)
            Hdh, f, a = pfp_utils.GetSeriesasMA(ds, 'Hdh', si=si, ei=ei)
            Fa, f, a = pfp_utils.GetSeriesasMA(ds, 'Fa', si=si, ei=ei)
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            for m in range(1, 13):
                mi = numpy.where(Month == m)[0]
                Fa_Num, Hr, Fa_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fa[mi], ts)
                Fe_Num, Hr, Fe_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fe[mi], ts)
                index = numpy.ma.where((Fa_Num > 4) & (Fe_Num > 4))
                EF[:, m - 1][index] = Fe_Av[index] / Fa_Av[index]
            # reject EF values greater than upper limit or less than lower limit
            upr, lwr = get_rangecheck_limit(cf, 'EF')
            EF = numpy.ma.filled(
                numpy.ma.masked_where((EF > upr) | (EF < lwr), EF),
                float(c.missing_value))
            # write the EF to the Excel file
            xlSheet = xlFile.add_sheet('EF')
            write_data_1columnpermonth(xlSheet, EF, ts, format_string='0.00')
            # do the 2D interpolation to fill missing EF values
            EFi = do_2dinterpolation(EF)
            xlSheet = xlFile.add_sheet('EFi')
            write_data_1columnpermonth(xlSheet, EFi, ts, format_string='0.00')
            # now do EF for each day
            Fa, f, a = pfp_utils.GetSeriesasMA(ds, 'Fa', si=si, ei=ei)
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            EF = Fe / Fa
            EF = numpy.ma.filled(
                numpy.ma.masked_where((EF > upr) | (EF < lwr), EF),
                float(c.missing_value))
            EF_daily = EF.reshape(nDays, ntsInDay)
            xlSheet = xlFile.add_sheet('EF(day)')
            write_data_1columnpertimestep(xlSheet,
                                          EF_daily,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00')
            EFi = do_2dinterpolation(EF_daily)
            xlSheet = xlFile.add_sheet('EFi(day)')
            write_data_1columnpertimestep(xlSheet,
                                          EFi,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00')
        elif ThisOne == "BR":
            logger.info(" Doing Bowen ratio")
            BR = numpy.ma.zeros([48, 12]) + float(c.missing_value)
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            Fh, f, a = pfp_utils.GetSeriesasMA(ds, 'Fh', si=si, ei=ei)
            for m in range(1, 13):
                mi = numpy.where(Month == m)[0]
                Fh_Num, Hr, Fh_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fh[mi], ts)
                Fe_Num, Hr, Fe_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fe[mi], ts)
                index = numpy.ma.where((Fh_Num > 4) & (Fe_Num > 4))
                BR[:, m - 1][index] = Fh_Av[index] / Fe_Av[index]
            # reject BR values greater than upper limit or less than lower limit
            upr, lwr = get_rangecheck_limit(cf, 'BR')
            BR = numpy.ma.filled(
                numpy.ma.masked_where((BR > upr) | (BR < lwr), BR),
                float(c.missing_value))
            # write the BR to the Excel file
            xlSheet = xlFile.add_sheet('BR')
            write_data_1columnpermonth(xlSheet, BR, ts, format_string='0.00')
            # do the 2D interpolation to fill missing EF values
            BRi = do_2dinterpolation(BR)
            xlSheet = xlFile.add_sheet('BRi')
            write_data_1columnpermonth(xlSheet, BRi, ts, format_string='0.00')
            # now do BR for each day ...
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            Fh, f, a = pfp_utils.GetSeriesasMA(ds, 'Fh', si=si, ei=ei)
            BR = Fh / Fe
            BR = numpy.ma.filled(
                numpy.ma.masked_where((BR > upr) | (BR < lwr), BR),
                float(c.missing_value))
            BR_daily = BR.reshape(nDays, ntsInDay)
            xlSheet = xlFile.add_sheet('BR(day)')
            write_data_1columnpertimestep(xlSheet,
                                          BR_daily,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00')
            BRi = do_2dinterpolation(BR_daily)
            xlSheet = xlFile.add_sheet('BRi(day)')
            write_data_1columnpertimestep(xlSheet,
                                          BRi,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00')
        elif ThisOne == "WUE":
            logger.info(" Doing ecosystem WUE")
            WUE = numpy.ma.zeros([48, 12]) + float(c.missing_value)
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            Fc, f, a = pfp_utils.GetSeriesasMA(ds, 'Fc', si=si, ei=ei)
            for m in range(1, 13):
                mi = numpy.where(Month == m)[0]
                Fc_Num, Hr, Fc_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fc[mi], ts)
                Fe_Num, Hr, Fe_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fe[mi], ts)
                index = numpy.ma.where((Fc_Num > 4) & (Fe_Num > 4))
                WUE[:, m - 1][index] = Fc_Av[index] / Fe_Av[index]
            # reject WUE values greater than upper limit or less than lower limit
            upr, lwr = get_rangecheck_limit(cf, 'WUE')
            WUE = numpy.ma.filled(
                numpy.ma.masked_where((WUE > upr) | (WUE < lwr), WUE),
                float(c.missing_value))
            # write the WUE to the Excel file
            xlSheet = xlFile.add_sheet('WUE')
            write_data_1columnpermonth(xlSheet,
                                       WUE,
                                       ts,
                                       format_string='0.00000')
            # do the 2D interpolation to fill missing EF values
            WUEi = do_2dinterpolation(WUE)
            xlSheet = xlFile.add_sheet('WUEi')
            write_data_1columnpermonth(xlSheet,
                                       WUEi,
                                       ts,
                                       format_string='0.00000')
            # now do WUE for each day ...
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            Fc, f, a = pfp_utils.GetSeriesasMA(ds, 'Fc', si=si, ei=ei)
            WUE = Fc / Fe
            WUE = numpy.ma.filled(
                numpy.ma.masked_where((WUE > upr) | (WUE < lwr), WUE),
                float(c.missing_value))
            WUE_daily = WUE.reshape(nDays, ntsInDay)
            xlSheet = xlFile.add_sheet('WUE(day)')
            write_data_1columnpertimestep(xlSheet,
                                          WUE_daily,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00000')
            WUEi = do_2dinterpolation(WUE_daily)
            xlSheet = xlFile.add_sheet('WUEi(day)')
            write_data_1columnpertimestep(xlSheet,
                                          WUEi,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00000')
        else:
            logger.warning(" Requested variable " + ThisOne +
                           " not in data structure")
            continue
    logger.info(" Saving Excel file " + os.path.split(xl_filename)[1])
    xlFile.save(xl_filename)
예제 #20
0
def gfalternate_createdict(cf, ds, series, ds_alt):
    """
    Purpose:
     Creates a dictionary in ds to hold information about the alternate data used to gap fill the tower data.
    Usage:
    Side effects:
    Author: PRI
    Date: August 2014
    """
    # get the section of the control file containing the series
    section = pfp_utils.get_cfsection(cf, series=series, mode="quiet")
    # return without doing anything if the series isn't in a control file section
    if len(section)==0:
        msg = "GapFillFromAlternate: Series %s not found in control file, skipping ...", series
        logger.error(msg)
        return
    # create the alternate directory in the data structure
    if "alternate" not in dir(ds):
        ds.alternate = {}
    # name of alternate output series in ds
    output_list = cf[section][series]["GapFillFromAlternate"].keys()
    # loop over the outputs listed in the control file
    for output in output_list:
        # create the dictionary keys for this output
        ds.alternate[output] = {}
        ds.alternate[output]["label_tower"] = series
        # source name
        ds.alternate[output]["source"] = cf[section][series]["GapFillFromAlternate"][output]["source"]
        # site name
        ds.alternate[output]["site_name"] = ds.globalattributes["site_name"]
        # alternate data file name
        # first, look in the [Files] section for a generic file name
        file_list = cf["Files"].keys()
        lower_file_list = [item.lower() for item in file_list]
        if ds.alternate[output]["source"].lower() in lower_file_list:
            # found a generic file name
            i = lower_file_list.index(ds.alternate[output]["source"].lower())
            ds.alternate[output]["file_name"] = cf["Files"][file_list[i]]
        else:
            # no generic file name found, look for a file name in the variable section
            ds.alternate[output]["file_name"] = cf[section][series]["GapFillFromAlternate"][output]["file_name"]
        # if the file has not already been read, do it now
        if ds.alternate[output]["file_name"] not in ds_alt:
            ds_alternate = pfp_io.nc_read_series(ds.alternate[output]["file_name"],fixtimestepmethod="round")
            gfalternate_matchstartendtimes(ds,ds_alternate)
            ds_alt[ds.alternate[output]["file_name"]] = ds_alternate
        # get the type of fit
        ds.alternate[output]["fit_type"] = "OLS"
        if "fit" in cf[section][series]["GapFillFromAlternate"][output]:
            if cf[section][series]["GapFillFromAlternate"][output]["fit"].lower() in ["ols","ols_thru0","mrev","replace","rma","odr"]:
                ds.alternate[output]["fit_type"] = cf[section][series]["GapFillFromAlternate"][output]["fit"]
            else:
                logger.info("gfAlternate: unrecognised fit option for series %s, used OLS", output)
        # correct for lag?
        if "lag" in cf[section][series]["GapFillFromAlternate"][output]:
            if cf[section][series]["GapFillFromAlternate"][output]["lag"].lower() in ["no","false"]:
                ds.alternate[output]["lag"] = "no"
            elif cf[section][series]["GapFillFromAlternate"][output]["lag"].lower() in ["yes","true"]:
                ds.alternate[output]["lag"] = "yes"
            else:
                logger.info("gfAlternate: unrecognised lag option for series %s", output)
        else:
            ds.alternate[output]["lag"] = "yes"
        # choose specific alternate variable?
        if "usevars" in cf[section][series]["GapFillFromAlternate"][output]:
            ds.alternate[output]["usevars"] = ast.literal_eval(cf[section][series]["GapFillFromAlternate"][output]["usevars"])
        # alternate data variable name if different from name used in control file
        if "alternate_name" in cf[section][series]["GapFillFromAlternate"][output]:
            ds.alternate[output]["alternate_name"] = cf[section][series]["GapFillFromAlternate"][output]["alternate_name"]
        else:
            ds.alternate[output]["alternate_name"] = series
        # results of best fit for plotting later on
        ds.alternate[output]["results"] = {"startdate":[],"enddate":[],"No. points":[],"No. filled":[],
                                           "r":[],"Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[],
                                           "Avg (Tower)":[],"Avg (Alt)":[],
                                           "Var (Tower)":[],"Var (Alt)":[],"Var ratio":[]}
        # create an empty series in ds if the alternate output series doesn't exist yet
        if output not in ds.series.keys():
            data,flag,attr = pfp_utils.MakeEmptySeries(ds,output)
            pfp_utils.CreateSeries(ds,output,data,flag,attr)
            pfp_utils.CreateSeries(ds,series+"_composite",data,flag,attr)
예제 #21
0
def CPD_run(cf):
    # Set input file and output path and create directories for plots and results
    path_out = cf['Files']['file_path']
    file_in = os.path.join(cf['Files']['file_path'],cf['Files']['in_filename'])
    #
    if "out_filename" in cf['Files']:
        file_out = os.path.join(cf['Files']['file_path'],cf['Files']['out_filename'])
    else:
        file_out = os.path.join(cf['Files']['file_path'],cf['Files']['in_filename'].replace(".nc","_CPD.xls"))
    plot_path = "plots/"
    if "plot_path" in cf["Files"]: plot_path = os.path.join(cf["Files"]["plot_path"],"CPD/")
    if not os.path.isdir(plot_path): os.makedirs(plot_path)
    results_path = path_out
    if not os.path.isdir(results_path): os.makedirs(results_path)
    # get a dictionary of the variable names
    var_list = cf["Variables"].keys()
    names = {}
    for item in var_list:
        if "AltVarName" in cf["Variables"][item].keys():
            names[item] = cf["Variables"][item]["AltVarName"]
        else:
            names[item] = item
    # read the netcdf file
    logger.info(' Reading netCDF file '+file_in)
    ds = pfp_io.nc_read_series(file_in)
    nrecs = int(ds.globalattributes["nc_nrecs"])
    ts = int(ds.globalattributes["time_step"])
    # get the datetime
    dt = ds.series["DateTime"]["Data"]
    # adjust the datetime so that the last time period in a year is correctly assigned.
    # e.g. last period for 2013 is 2014-01-01 00:00, here we make the year 2013
    dt = dt - datetime.timedelta(minutes=ts)
    # now get the data
    d = {}
    f = {}
    for item in names.keys():
        data,flag,attr = pfp_utils.GetSeries(ds,names[item])
        d[item] = np.where(data==c.missing_value,np.nan,data)
        f[item] = flag
    # set all data to NaNs if any flag not 0 or 10
    for item in f.keys():
        for f_OK in [0,10]:
            idx = np.where(f[item]!=0)[0]
            if len(idx)!=0:
                for itemd in d.keys():
                    d[itemd][idx] = np.nan
    d["Year"] = np.array([ldt.year for ldt in dt])
    df=pd.DataFrame(d,index=dt)
    # replace missing values with NaN
    df.replace(c.missing_value,np.nan)
    # Build dictionary of additional configs
    d={}
    d['radiation_threshold']=int(cf['Options']['Fsd_threshold'])
    d['num_bootstraps']=int(cf['Options']['Num_bootstraps'])
    d['flux_period']=int(ds.globalattributes["time_step"])
    d['site_name']=ds.globalattributes["site_name"]
    d["call_mode"]=pfp_utils.get_keyvaluefromcf(cf,["Options"],"call_mode",default="interactive",mode="quiet")
    d["show_plots"] = pfp_utils.get_optionskeyaslogical(cf, "show_plots", default=True)
    d['plot_tclass'] = False
    if cf['Options']['Plot_TClass'] == 'True': d['plot_tclass'] = True
    if cf['Options']['Output_plots']=='True':
        d['plot_path']=plot_path
    if cf['Options']['Output_results']=='True':
        d['results_path']=results_path
        d["file_out"]=file_out

    return df,d
예제 #22
0
         cf = pfp_io.get_controlfilecontents(cfname)
         ds1 = pfp_ls.l1qc(cf)
         outfilename = pfp_io.get_outfilenamefromcf(cf)
         ncFile = pfp_io.nc_open_write(outfilename)
         pfp_io.nc_write_series(ncFile, ds1)
         logger.info('Finished L1 processing with ' + cf_file_name[1])
         logger.info('')
 elif level.lower() == "l2":
     # L2 processing
     for i in cf_batch["Levels"][level].keys():
         cfname = cf_batch["Levels"][level][i]
         cf_file_name = os.path.split(cfname)
         logger.info('Starting L2 processing with ' + cf_file_name[1])
         cf = pfp_io.get_controlfilecontents(cfname)
         infilename = pfp_io.get_infilenamefromcf(cf)
         ds1 = pfp_io.nc_read_series(infilename)
         ds2 = pfp_ls.l2qc(cf, ds1)
         outfilename = pfp_io.get_outfilenamefromcf(cf)
         ncFile = pfp_io.nc_open_write(outfilename)
         pfp_io.nc_write_series(ncFile, ds2)
         logger.info('Finished L2 processing with ' + cf_file_name[1])
         logger.info('')
 elif level.lower() == "l3":
     # L3 processing
     for i in cf_batch["Levels"][level].keys():
         cfname = cf_batch["Levels"][level][i]
         cf_file_name = os.path.split(cfname)
         logger.info('Starting L3 processing with ' + cf_file_name[1])
         cf = pfp_io.get_controlfilecontents(cfname)
         infilename = pfp_io.get_infilenamefromcf(cf)
         ds2 = pfp_io.nc_read_series(infilename)