コード例 #1
0
def MRfromRH(ds, MR_out, RH_in, Ta_in, ps_in):
    """
    Purpose:
     Calculate H2O mixing ratio from RH.
    """
    nRecs = int(ds.globalattributes["nc_nrecs"])
    zeros = numpy.zeros(nRecs, dtype=numpy.int32)
    ones = numpy.ones(nRecs, dtype=numpy.int32)
    for item in [RH_in, Ta_in, ps_in]:
        if item not in ds.series.keys():
            msg = " MRfromRH: Requested series " + item + " not found, " + MR_out + " not calculated"
            logger.error(msg)
            return 0
    if MR_out in ds.series.keys():
        msg = " MRfromRH: Output series " + MR_out + " already exists, skipping ..."
        logger.error(msg)
        return 0
    RH_data, RH_flag, RH_attr = pfp_utils.GetSeriesasMA(ds, RH_in)
    Ta_data, Ta_flag, Ta_attr = pfp_utils.GetSeriesasMA(ds, Ta_in)
    Ah_data = pfp_mf.absolutehumidityfromRH(Ta_data, RH_data)
    ps_data, ps_flag, ps_attr = pfp_utils.GetSeriesasMA(ds, ps_in)
    MR_data = pfp_mf.h2o_mmolpmolfromgpm3(Ah_data, Ta_data, ps_data)
    MR_attr = pfp_utils.MakeAttributeDictionary(
        long_name="H2O mixing ratio calculated from " + RH_in + ", " + Ta_in +
        " and " + ps_in,
        height=RH_attr["height"],
        units="mmol/mol")
    flag = numpy.where(numpy.ma.getmaskarray(MR_data) == True, ones, zeros)
    pfp_utils.CreateSeries(ds, MR_out, MR_data, flag, MR_attr)
    return 1
コード例 #2
0
def DateTimeFromDoY(ds, dt_out, Year_in, DoY_in, Hdh_in):
    year, f, a = pfp_utils.GetSeriesasMA(ds, Year_in)
    doy, f, a = pfp_utils.GetSeriesasMA(ds, DoY_in)
    hdh, f, a = pfp_utils.GetSeriesasMA(ds, Hdh_in)
    idx = numpy.ma.where((numpy.ma.getmaskarray(year) == False)
                         & (numpy.ma.getmaskarray(doy) == False)
                         & (numpy.ma.getmaskarray(hdh) == False))[0]
    year = year[idx]
    doy = doy[idx]
    hdh = hdh[idx]
    hour = numpy.array(hdh, dtype=numpy.integer)
    minute = numpy.array((hdh - hour) * 60, dtype=numpy.integer)
    dt = [
        datetime.datetime(int(y), 1, 1, h, m) + datetime.timedelta(int(d) - 1)
        for y, d, h, m in zip(year, doy, hour, minute)
    ]
    nRecs = len(dt)
    ds.series[dt_out] = {}
    ds.series[dt_out]["Data"] = dt
    ds.series[dt_out]["Flag"] = numpy.zeros(len(dt), dtype=numpy.int32)
    ds.series[dt_out]["Attr"] = {}
    ds.series[dt_out]["Attr"]["long_name"] = "Datetime in local timezone"
    ds.series[dt_out]["Attr"]["units"] = "None"
    # now remove any "data"" from empty lines
    series_list = ds.series.keys()
    if dt_out in series_list: series_list.remove(dt_out)
    for item in series_list:
        ds.series[item]["Data"] = ds.series[item]["Data"][idx]
        ds.series[item]["Flag"] = ds.series[item]["Flag"][idx]
    ds.globalattributes["nc_nrecs"] = nRecs
    return 1
コード例 #3
0
def AhfromRH(ds, Ah_out, RH_in, Ta_in):
    """
    Purpose:
     Function to calculate absolute humidity given relative humidity and
     air temperature.  Absolute humidity is not calculated if any of the
     input series are missing or if the specified output series already
     exists in the data structure.
     The calculated absolute humidity is created as a new series in the
     data structure.
    Usage:
     pfp_func.AhfromRH(ds,"Ah_HMP_2m","RH_HMP_2m","Ta_HMP_2m")
    Author: PRI
    Date: September 2015
    """
    nRecs = int(ds.globalattributes["nc_nrecs"])
    zeros = numpy.zeros(nRecs,dtype=numpy.int32)
    ones = numpy.ones(nRecs,dtype=numpy.int32)
    for item in [RH_in,Ta_in]:
        if item not in ds.series.keys():
            msg = " AhfromRH: Requested series "+item+" not found, "+Ah_out+" not calculated"
            logger.error(msg)
            return 0
    if Ah_out in ds.series.keys():
        msg = " AhfromRH: Output series "+Ah_out+" already exists, skipping ..."
        logger.error(msg)
        return 0
    RH_data,RH_flag,RH_attr = pfp_utils.GetSeriesasMA(ds,RH_in)
    Ta_data,Ta_flag,Ta_attr = pfp_utils.GetSeriesasMA(ds,Ta_in)
    Ah_data = pfp_mf.absolutehumidityfromRH(Ta_data,RH_data)
    Ah_attr = pfp_utils.MakeAttributeDictionary(long_name="Absolute humidity calculated from "+RH_in+" and "+Ta_in,
                                              height=RH_attr["height"],
                                              units="g/m3")
    flag = numpy.where(numpy.ma.getmaskarray(Ah_data)==True,ones,zeros)
    pfp_utils.CreateSeries(ds,Ah_out,Ah_data,flag,Ah_attr)
    return 1
コード例 #4
0
def ImportSeries(cf,ds):
    # check to see if there is an Imports section
    if "Imports" not in cf.keys(): return
    # number of records
    nRecs = int(ds.globalattributes["nc_nrecs"])
    # get the start and end datetime
    ldt = ds.series["DateTime"]["Data"]
    start_date = ldt[0]
    end_date = ldt[-1]
    # loop over the series in the Imports section
    for label in cf["Imports"].keys():
        import_filename = pfp_utils.get_keyvaluefromcf(cf,["Imports",label],"file_name",default="")
        if import_filename=="":
            msg = " ImportSeries: import filename not found in control file, skipping ..."
            logger.warning(msg)
            continue
        var_name = pfp_utils.get_keyvaluefromcf(cf,["Imports",label],"var_name",default="")
        if var_name=="":
            msg = " ImportSeries: variable name not found in control file, skipping ..."
            logger.warning(msg)
            continue
        ds_import = pfp_io.nc_read_series(import_filename)
        ts_import = ds_import.globalattributes["time_step"]
        ldt_import = ds_import.series["DateTime"]["Data"]
        si = pfp_utils.GetDateIndex(ldt_import,str(start_date),ts=ts_import,default=0,match="exact")
        ei = pfp_utils.GetDateIndex(ldt_import,str(end_date),ts=ts_import,default=len(ldt_import)-1,match="exact")
        data = numpy.ma.ones(nRecs)*float(c.missing_value)
        flag = numpy.ma.ones(nRecs)
        data_import,flag_import,attr_import = pfp_utils.GetSeriesasMA(ds_import,var_name,si=si,ei=ei)
        ldt_import = ldt_import[si:ei+1]
        index = pfp_utils.FindIndicesOfBInA(ldt_import,ldt)
        data[index] = data_import
        flag[index] = flag_import
        pfp_utils.CreateSeries(ds,label,data,flag,attr_import)
コード例 #5
0
def gfClimatology_interpolateddaily(ds,series,output,xlbooks):
    """
    Gap fill using data interpolated over a 2D array where the days are
    the rows and the time of day is the columns.
    """
    # gap fill from interpolated 30 minute data
    xlfilename = ds.climatology[output]["file_name"]
    sheet_name = series+'i(day)'
    if sheet_name not in xlbooks[xlfilename].sheet_names():
        msg = " gfClimatology: sheet "+sheet_name+" not found, skipping ..."
        logger.warning(msg)
        return
    ldt = ds.series["DateTime"]["Data"]
    thissheet = xlbooks[xlfilename].sheet_by_name(sheet_name)
    datemode = xlbooks[xlfilename].datemode
    basedate = datetime.datetime(1899, 12, 30)
    nts = thissheet.ncols - 1
    ndays = thissheet.nrows - 2
    # read the time stamp values from the climatology worksheet
    tsteps = thissheet.row_values(1,start_colx=1,end_colx=nts+1)
    # read the data from the climatology workbook
    val1d = numpy.ma.zeros(ndays*nts,dtype=numpy.float64)
    # initialise an array for the datetime of the climatological values
    cdt = [None]*nts*ndays
    # loop over the rows (days) of data
    for xlRow in range(ndays):
        # get the Excel datetime value
        xldatenumber = int(thissheet.cell_value(xlRow+2,0))
        # convert this to a Python Datetime
        xldatetime = basedate+datetime.timedelta(days=xldatenumber+1462*datemode)
        # fill the climatology datetime array
        cdt[xlRow*nts:(xlRow+1)*nts] = [xldatetime+datetime.timedelta(hours=hh) for hh in tsteps]
        # fill the climatological value array
        val1d[xlRow*nts:(xlRow+1)*nts] = thissheet.row_values(xlRow+2,start_colx=1,end_colx=nts+1)
    # get the data to be filled with climatological values
    data,flag,attr = pfp_utils.GetSeriesasMA(ds,series)
    # get an index of missing values
    idx = numpy.where(numpy.ma.getmaskarray(data)==True)[0]
    #idx = numpy.ma.where(numpy.ma.getmaskarray(data)==True)[0]
    # there must be a better way to do this ...
    # simply using the index (idx) to set a slice of the data array to the gap filled values in val1d
    # does not seem to work (mask stays true on replaced values in data), the work around is to
    # step through the indices, find the time of the missing value in data, find the same time in the
    # gap filled values val1d and set the missing element of data to this element of val1d
    # actually ...
    # this may not be the fastest but it may be the most robust because it matches dates of missing data
    # to dates in the climatology file
    for ii in idx:
        try:
            jj = pfp_utils.find_nearest_value(cdt, ldt[ii])
            data[ii] = val1d[jj]
            flag[ii] = numpy.int32(40)
        except ValueError:
            data[ii] = numpy.float64(c.missing_value)
            flag[ii] = numpy.int32(41)
    # put the gap filled data back into the data structure
    pfp_utils.CreateSeries(ds,output,data,flag,attr)
コード例 #6
0
ファイル: pfp_ck.py プロジェクト: OzFlux/PFP_Classic
def do_lowercheck(cf, ds, section, series, code=2):
    """
    Purpose:
    Usage:
    Author: PRI
    Date: February 2017
    """
    # check to see if LowerCheck requested for this variable
    if "LowerCheck" not in cf[section][series]:
        return
    # Check to see if limits have been specified
    if len(cf[section][series]["LowerCheck"].keys()) == 0:
        msg = "do_lowercheck: no date ranges specified"
        logger.info(msg)
        return

    ldt = ds.series["DateTime"]["Data"]
    ts = ds.globalattributes["time_step"]
    data, flag, attr = pfp_utils.GetSeriesasMA(ds, series)

    lc_list = list(cf[section][series]["LowerCheck"].keys())
    for n, item in enumerate(lc_list):
        # this should be a list and we should probably check for compliance
        lwr_info = cf[section][series]["LowerCheck"][item]
        attr["lowercheck_" + str(n)] = str(lwr_info)
        start_date = dateutil.parser.parse(lwr_info[0])
        su = float(lwr_info[1])
        end_date = dateutil.parser.parse(lwr_info[2])
        eu = float(lwr_info[3])
        # get the start and end indices
        si = pfp_utils.GetDateIndex(ldt,
                                    start_date,
                                    ts=ts,
                                    default=0,
                                    match="exact")
        ei = pfp_utils.GetDateIndex(ldt,
                                    end_date,
                                    ts=ts,
                                    default=len(ldt) - 1,
                                    match="exact")
        # get the segment of data between this start and end date
        seg_data = data[si:ei + 1]
        seg_flag = flag[si:ei + 1]
        x = numpy.arange(si, ei + 1, 1)
        lower = numpy.interp(x, [si, ei], [su, eu])
        index = numpy.ma.where((seg_data < lower))[0]
        seg_data[index] = numpy.ma.masked
        seg_flag[index] = numpy.int32(code)
        data[si:ei + 1] = seg_data
        flag[si:ei + 1] = seg_flag
    # now put the data back into the data structure
    pfp_utils.CreateSeries(ds, series, data, Flag=flag, Attr=attr)
    return
コード例 #7
0
def AhfromMR(ds, Ah_out, MR_in, Ta_in, ps_in):
    """
    Purpose:
     Function to calculate absolute humidity given the water vapour mixing
     ratio, air temperature and pressure.  Absolute humidity is not calculated
     if any of the input series are missing or if the specified output series
     already exists in the data structure.
     The calculated absolute humidity is created as a new series in the
     data structure.
    Usage:
     pfp_func.AhfromMR(ds,"Ah_IRGA_Av","H2O_IRGA_Av","Ta_HMP_2m","ps")
    Author: PRI
    Date: September 2015
    """
    nRecs = int(ds.globalattributes["nc_nrecs"])
    zeros = numpy.zeros(nRecs, dtype=numpy.int32)
    ones = numpy.ones(nRecs, dtype=numpy.int32)
    for item in [MR_in, Ta_in, ps_in]:
        if item not in ds.series.keys():
            msg = " AhfromMR: Requested series " + item + " not found, " + Ah_out + " not calculated"
            logger.error(msg)
            return 0
    if Ah_out in ds.series.keys():
        msg = " AhfromMR: Output series " + Ah_out + " already exists, skipping ..."
        logger.error(msg)
        return 0
    MR_data, MR_flag, MR_attr = pfp_utils.GetSeriesasMA(ds, MR_in)
    Ta_data, Ta_flag, Ta_attr = pfp_utils.GetSeriesasMA(ds, Ta_in)
    ps_data, ps_flag, ps_attr = pfp_utils.GetSeriesasMA(ds, ps_in)
    Ah_data = pfp_mf.h2o_gpm3frommmolpmol(MR_data, Ta_data, ps_data)
    long_name = "Absolute humidity calculated from " + MR_in + ", " + Ta_in + " and " + ps_in
    Ah_attr = pfp_utils.MakeAttributeDictionary(long_name=long_name,
                                                height=MR_attr["height"],
                                                units="g/m3")
    flag = numpy.where(numpy.ma.getmaskarray(Ah_data) == True, ones, zeros)
    pfp_utils.CreateSeries(ds, Ah_out, Ah_data, flag, Ah_attr)
    return 1
コード例 #8
0
ファイル: pfp_ck.py プロジェクト: OzFlux/PFP_Classic
def CoordinateFluxGaps(cf, ds, Fc_in='Fc', Fe_in='Fe', Fh_in='Fh'):
    if not pfp_utils.cfoptionskeylogical(cf, Key='CoordinateFluxGaps'): return
    if pfp_utils.cfkeycheck(cf, Base='FunctionArgs', ThisOne='gapsvars'):
        vars = ast.literal_eval(cf['FunctionArgs']['gapsvars'])
        Fc_in = vars[0]
        Fe_in = vars[1]
        Fh_in = vars[2]
    Fc, f, a = pfp_utils.GetSeriesasMA(ds, Fc_in)
    Fe, f, a = pfp_utils.GetSeriesasMA(ds, Fe_in)
    Fh, f, a = pfp_utils.GetSeriesasMA(ds, Fh_in)
    # April 2015 PRI - changed numpy.ma.where to numpy.where
    index = numpy.where((numpy.ma.getmaskarray(Fc) == True)
                        | (numpy.ma.getmaskarray(Fe) == True)
                        | (numpy.ma.getmaskarray(Fh) == True))[0]
    #index = numpy.ma.where((numpy.ma.getmaskarray(Fc)==True)|
    #(numpy.ma.getmaskarray(Fe)==True)|
    #(numpy.ma.getmaskarray(Fh)==True))[0]
    # the following for ... in loop is not necessary
    for i in range(len(index)):
        j = index[i]
        if Fc.mask[j] == False:
            Fc.mask[j] = True
            Fc[j] = numpy.float64(c.missing_value)
            ds.series[Fc_in]['Flag'][j] = numpy.int32(19)
        if Fe.mask[j] == False:
            Fe.mask[j] = True
            Fe[j] = numpy.float64(c.missing_value)
            ds.series[Fe_in]['Flag'][j] = numpy.int32(19)
        if Fh.mask[j] == False:
            Fh.mask[j] = True
            Fh[j] = numpy.float64(c.missing_value)
            ds.series[Fh_in]['Flag'][j] = numpy.int32(19)
    ds.series[Fc_in]['Data'] = numpy.ma.filled(Fc, float(c.missing_value))
    ds.series[Fe_in]['Data'] = numpy.ma.filled(Fe, float(c.missing_value))
    ds.series[Fh_in]['Data'] = numpy.ma.filled(Fh, float(c.missing_value))
    logger.info(' Finished gap co-ordination')
コード例 #9
0
ファイル: pfp_ck.py プロジェクト: OzFlux/PFP_Classic
def do_rangecheck(cf, ds, section, series, code=2):
    """
    Purpose:
     Applies a range check to data series listed in the control file.  Data values that
     are less than the lower limit or greater than the upper limit are replaced with
     c.missing_value and the corresponding QC flag element is set to 2.
    Usage:
    Author: PRI
    Date: Back in the day
    """
    # check that RangeCheck has been requested for this series
    if 'RangeCheck' not in cf[section][series].keys():
        return
    # check that the upper and lower limits have been given
    if ("Lower" not in cf[section][series]["RangeCheck"].keys()
            or "Upper" not in cf[section][series]["RangeCheck"].keys()):
        msg = "RangeCheck: key not found in control file for " + series + ", skipping ..."
        logger.warning(msg)
        return
    # get the upper and lower limits
    upr = numpy.array(eval(cf[section][series]['RangeCheck']['Upper']))
    valid_upper = numpy.min(upr)
    upr = upr[ds.series['Month']['Data'] - 1]
    lwr = numpy.array(eval(cf[section][series]['RangeCheck']['Lower']))
    valid_lower = numpy.min(lwr)
    lwr = lwr[ds.series['Month']['Data'] - 1]
    # get the data, flag and attributes
    data, flag, attr = pfp_utils.GetSeriesasMA(ds, series)
    # convert the data from a masked array to an ndarray so the range check works
    data = numpy.ma.filled(data, fill_value=c.missing_value)
    # get the indices of elements outside this range
    idx = numpy.where((data < lwr) | (data > upr))[0]
    # set elements outside range to missing and set the QC flag
    data[idx] = numpy.float64(c.missing_value)
    flag[idx] = numpy.int32(code)
    # update the variable attributes
    attr["rangecheck_lower"] = cf[section][series]["RangeCheck"]["Lower"]
    attr["rangecheck_upper"] = cf[section][series]["RangeCheck"]["Upper"]
    attr["valid_range"] = str(valid_lower) + "," + str(valid_upper)
    # and now put the data back into the data structure
    pfp_utils.CreateSeries(ds, series, data, Flag=flag, Attr=attr)
    # now we can return
    return
コード例 #10
0
ファイル: pfp_clim.py プロジェクト: OzFlux/PyFluxPro2.7
def climatology(cf):
    nc_filename = pfp_io.get_infilenamefromcf(cf)
    if not pfp_utils.file_exists(nc_filename): return
    xl_filename = nc_filename.replace(".nc","_Climatology.xls")
    xlFile = xlwt.Workbook()
    ds = pfp_io.nc_read_series(nc_filename)
    # calculate Fa if it is not in the data structure
    got_Fa = True
    if "Fa" not in ds.series.keys():
        if "Fn" in ds.series.keys() and "Fg" in ds.series.keys():
            pfp_ts.CalculateAvailableEnergy(ds,Fa_out='Fa',Fn_in='Fn',Fg_in='Fg')
        else:
            got_Fa = False
            logger.warning(" Fn or Fg not in data struicture")
    # get the time step
    ts = int(ds.globalattributes['time_step'])
    # get the site name
    SiteName = ds.globalattributes['site_name']
    # get the datetime series
    dt = ds.series['DateTime']['Data']
    Hdh = numpy.array([(d.hour + d.minute/float(60)) for d in dt])
    Month = numpy.array([d.month for d in dt])
    # get the initial start and end dates
    StartDate = str(dt[0])
    EndDate = str(dt[-1])
    # find the start index of the first whole day (time=00:30)
    si = pfp_utils.GetDateIndex(dt,StartDate,ts=ts,default=0,match='startnextday')
    # find the end index of the last whole day (time=00:00)
    ei = pfp_utils.GetDateIndex(dt,EndDate,ts=ts,default=-1,match='endpreviousday')
    # get local views of the datetime series
    ldt = dt[si:ei+1]
    Hdh = Hdh[si:ei+1]
    Month = Month[si:ei+1]
    # get the number of time steps in a day and the number of days in the data
    ntsInDay = int(24.0*60.0/float(ts))
    nDays = int(len(ldt))/ntsInDay

    for ThisOne in cf['Variables'].keys():
        if "AltVarName" in cf['Variables'][ThisOne].keys(): ThisOne = cf['Variables'][ThisOne]["AltVarName"]
        if ThisOne in ds.series.keys():
            logger.info(" Doing climatology for "+ThisOne)
            data,f,a = pfp_utils.GetSeriesasMA(ds,ThisOne,si=si,ei=ei)
            if numpy.ma.count(data)==0:
                logger.warning(" No data for "+ThisOne+", skipping ...")
                continue
            fmt_str = get_formatstring(cf,ThisOne,fmt_def='')
            xlSheet = xlFile.add_sheet(ThisOne)
            Av_all = do_diurnalstats(Month,Hdh,data,xlSheet,format_string=fmt_str,ts=ts)
            # now do it for each day
            # we want to preserve any data that has been truncated by the use of the "startnextday"
            # and "endpreviousday" match options used above.  Here we revisit the start and end indices
            # and adjust these backwards and forwards respectively if data has been truncated.
            nDays_daily = nDays
            ei_daily = ei
            si_daily = si
            sdate = ldt[0]
            edate = ldt[-1]
            # is there data after the current end date?
            if dt[-1]>ldt[-1]:
                # if so, push the end index back by 1 day so it is included
                ei_daily = ei + ntsInDay
                nDays_daily = nDays_daily + 1
                edate = ldt[-1]+datetime.timedelta(days=1)
            # is there data before the current start date?
            if dt[0]<ldt[0]:
                # if so, push the start index back by 1 day so it is included
                si_daily = si - ntsInDay
                nDays_daily = nDays_daily + 1
                sdate = ldt[0]-datetime.timedelta(days=1)
            # get the data and use the "pad" option to add missing data if required to
            # complete the extra days
            data,f,a = pfp_utils.GetSeriesasMA(ds,ThisOne,si=si_daily,ei=ei_daily,mode="pad")
            data_daily = data.reshape(nDays_daily,ntsInDay)
            xlSheet = xlFile.add_sheet(ThisOne+'(day)')
            write_data_1columnpertimestep(xlSheet, data_daily, ts, startdate=sdate, format_string=fmt_str)
            data_daily_i = do_2dinterpolation(data_daily)
            xlSheet = xlFile.add_sheet(ThisOne+'i(day)')
            write_data_1columnpertimestep(xlSheet, data_daily_i, ts, startdate=sdate, format_string=fmt_str)
        else:
            logger.warning(" Requested variable "+ThisOne+" not in data structure")
            continue
    logger.info(" Saving Excel file "+os.path.split(xl_filename)[1])
    xlFile.save(xl_filename)
コード例 #11
0
def rpLT_plot(pd, ds, output, drivers, target, iel, si=0, ei=-1):
    """ Plot the results of the Lloyd-Taylor run. """
    ieli = iel["info"]
    ielo = iel["outputs"]
    # get a local copy of the datetime series
    if ei == -1:
        dt = ds.series['DateTime']['Data'][si:]
    else:
        dt = ds.series['DateTime']['Data'][si:ei + 1]
    xdt = numpy.array(dt)
    #Hdh, f, a = pfp_utils.GetSeriesasMA(ds, 'Hdh', si=si, ei=ei)
    Hdh = numpy.array(
        [d.hour + (d.minute + d.second / float(60)) / float(60) for d in xdt])
    # get the observed and modelled values
    obs, f, a = pfp_utils.GetSeriesasMA(ds, target, si=si, ei=ei)
    mod, f, a = pfp_utils.GetSeriesasMA(ds, output, si=si, ei=ei)
    # make the figure
    if iel["gui"]["show_plots"]:
        plt.ion()
    else:
        plt.ioff()
    fig = plt.figure(pd["fig_num"], figsize=(13, 8))
    fig.clf()
    fig.canvas.set_window_title(target + " (LT): " + pd["startdate"] + " to " +
                                pd["enddate"])
    plt.figtext(0.5, 0.95, pd["title"], ha='center', size=16)
    # XY plot of the diurnal variation
    rect1 = [0.10, pd["margin_bottom"], pd["xy_width"], pd["xy_height"]]
    ax1 = plt.axes(rect1)
    # get the diurnal stats of the observations
    mask = numpy.ma.mask_or(obs.mask, mod.mask)
    obs_mor = numpy.ma.array(obs, mask=mask)
    dstats = pfp_utils.get_diurnalstats(xdt, obs_mor, ieli)
    ax1.plot(dstats["Hr"], dstats["Av"], 'b-', label="Obs")
    # get the diurnal stats of all SOLO predictions
    dstats = pfp_utils.get_diurnalstats(xdt, mod, ieli)
    ax1.plot(dstats["Hr"], dstats["Av"], 'r-', label="LT(all)")
    mod_mor = numpy.ma.masked_where(numpy.ma.getmaskarray(obs) == True,
                                    mod,
                                    copy=True)
    dstats = pfp_utils.get_diurnalstats(xdt, mod_mor, ieli)
    ax1.plot(dstats["Hr"], dstats["Av"], 'g-', label="LT(obs)")
    plt.xlim(0, 24)
    plt.xticks([0, 6, 12, 18, 24])
    ax1.set_ylabel(target)
    ax1.set_xlabel('Hour')
    ax1.legend(loc='upper right', frameon=False, prop={'size': 8})
    # XY plot of the 30 minute data
    rect2 = [0.40, pd["margin_bottom"], pd["xy_width"], pd["xy_height"]]
    ax2 = plt.axes(rect2)
    ax2.plot(mod, obs, 'b.')
    ax2.set_ylabel(target + '_obs')
    ax2.set_xlabel(target + '_LT')
    # plot the best fit line
    coefs = numpy.ma.polyfit(numpy.ma.copy(mod), numpy.ma.copy(obs), 1)
    xfit = numpy.ma.array(
        [numpy.ma.minimum.reduce(mod),
         numpy.ma.maximum.reduce(mod)])
    yfit = numpy.polyval(coefs, xfit)
    r = numpy.ma.corrcoef(mod, obs)
    ax2.plot(xfit, yfit, 'r--', linewidth=3)
    eqnstr = 'y = %.3fx + %.3f, r = %.3f' % (coefs[0], coefs[1], r[0][1])
    ax2.text(0.5,
             0.875,
             eqnstr,
             fontsize=8,
             horizontalalignment='center',
             transform=ax2.transAxes)
    # write the fit statistics to the plot
    numpoints = numpy.ma.count(obs)
    numfilled = numpy.ma.count(mod) - numpy.ma.count(obs)
    diff = mod - obs
    bias = numpy.ma.average(diff)
    ielo[output]["results"]["Bias"].append(bias)
    rmse = numpy.ma.sqrt(numpy.ma.mean((obs - mod) * (obs - mod)))
    plt.figtext(0.725, 0.225, 'No. points')
    plt.figtext(0.825, 0.225, str(numpoints))
    ielo[output]["results"]["No. points"].append(numpoints)
    plt.figtext(0.725, 0.200, 'No. filled')
    plt.figtext(0.825, 0.200, str(numfilled))
    plt.figtext(0.725, 0.175, 'Slope')
    plt.figtext(0.825, 0.175, str(pfp_utils.round2sig(coefs[0], sig=4)))
    ielo[output]["results"]["m_ols"].append(coefs[0])
    plt.figtext(0.725, 0.150, 'Offset')
    plt.figtext(0.825, 0.150, str(pfp_utils.round2sig(coefs[1], sig=4)))
    ielo[output]["results"]["b_ols"].append(coefs[1])
    plt.figtext(0.725, 0.125, 'r')
    plt.figtext(0.825, 0.125, str(pfp_utils.round2sig(r[0][1], sig=4)))
    ielo[output]["results"]["r"].append(r[0][1])
    plt.figtext(0.725, 0.100, 'RMSE')
    plt.figtext(0.825, 0.100, str(pfp_utils.round2sig(rmse, sig=4)))
    ielo[output]["results"]["RMSE"].append(rmse)
    var_obs = numpy.ma.var(obs)
    ielo[output]["results"]["Var (obs)"].append(var_obs)
    var_mod = numpy.ma.var(mod)
    ielo[output]["results"]["Var (LT)"].append(var_mod)
    ielo[output]["results"]["Var ratio"].append(var_obs / var_mod)
    ielo[output]["results"]["Avg (obs)"].append(numpy.ma.average(obs))
    ielo[output]["results"]["Avg (LT)"].append(numpy.ma.average(mod))
    # time series of drivers and target
    ts_axes = []
    rect = [
        pd["margin_left"], pd["ts_bottom"], pd["ts_width"], pd["ts_height"]
    ]
    ts_axes.append(plt.axes(rect))
    #ts_axes[0].plot(xdt,obs,'b.',xdt,mod,'r-')
    ts_axes[0].scatter(xdt, obs, c=Hdh)
    ts_axes[0].plot(xdt, mod, 'r-')
    plt.axhline(0)
    ts_axes[0].set_xlim(xdt[0], xdt[-1])
    TextStr = target + '_obs (' + ds.series[target]['Attr']['units'] + ')'
    ts_axes[0].text(0.05,
                    0.85,
                    TextStr,
                    color='b',
                    horizontalalignment='left',
                    transform=ts_axes[0].transAxes)
    TextStr = output + '(' + ds.series[output]['Attr']['units'] + ')'
    ts_axes[0].text(0.85,
                    0.85,
                    TextStr,
                    color='r',
                    horizontalalignment='right',
                    transform=ts_axes[0].transAxes)
    for ThisOne, i in zip(drivers, range(1, pd["nDrivers"] + 1)):
        this_bottom = pd["ts_bottom"] + i * pd["ts_height"]
        rect = [
            pd["margin_left"], this_bottom, pd["ts_width"], pd["ts_height"]
        ]
        ts_axes.append(plt.axes(rect, sharex=ts_axes[0]))
        data, flag, attr = pfp_utils.GetSeriesasMA(ds, ThisOne, si=si, ei=ei)
        data_notgf = numpy.ma.masked_where(flag != 0, data)
        data_gf = numpy.ma.masked_where(flag == 0, data)
        ts_axes[i].plot(xdt, data_notgf, 'b-')
        ts_axes[i].plot(xdt, data_gf, 'r-')
        plt.setp(ts_axes[i].get_xticklabels(), visible=False)
        TextStr = ThisOne + '(' + ds.series[ThisOne]['Attr']['units'] + ')'
        ts_axes[i].text(0.05,
                        0.85,
                        TextStr,
                        color='b',
                        horizontalalignment='left',
                        transform=ts_axes[i].transAxes)
    # save a hard copy of the plot
    sdt = xdt[0].strftime("%Y%m%d")
    edt = xdt[-1].strftime("%Y%m%d")
    if not os.path.exists(ieli["plot_path"]):
        os.makedirs(ieli["plot_path"])
    figname = ieli["plot_path"] + pd["site_name"].replace(
        " ", "") + "_LT_" + pd["label"]
    figname = figname + "_" + sdt + "_" + edt + '.png'
    fig.savefig(figname, format='png')
    # draw the plot on the screen
    if iel["gui"]["show_plots"]:
        plt.draw()
        #plt.pause(1)
        mypause(1)
        plt.ioff()
    else:
        plt.close(fig)
        plt.ion()
コード例 #12
0
ファイル: pfp_ck.py プロジェクト: OzFlux/PFP_Classic
def ApplyTurbulenceFilter(cf, ds, ustar_threshold=None):
    """
    Purpose:
    Usage:
    Author:
    Date:
    """
    opt = ApplyTurbulenceFilter_checks(cf, ds)
    if not opt["OK"]: return
    # local point to datetime series
    ldt = ds.series["DateTime"]["Data"]
    # time step
    ts = int(ds.globalattributes["time_step"])
    # dictionary of utar thresold values
    if ustar_threshold == None:
        ustar_dict = pfp_rp.get_ustar_thresholds(cf, ldt)
    else:
        ustar_dict = pfp_rp.get_ustar_thresholds_annual(ldt, ustar_threshold)
    # initialise a dictionary for the indicator series
    indicators = {}
    # get data for the indicator series
    ustar, ustar_flag, ustar_attr = pfp_utils.GetSeriesasMA(ds, "ustar")
    Fsd, f, a = pfp_utils.GetSeriesasMA(ds, "Fsd")
    if "solar_altitude" not in ds.series.keys():
        pfp_ts.get_synthetic_fsd(ds)
    Fsd_syn, f, a = pfp_utils.GetSeriesasMA(ds, "Fsd_syn")
    sa, f, a = pfp_utils.GetSeriesasMA(ds, "solar_altitude")
    # get the day/night indicator series
    # indicators["day"] = 1 ==> day time, indicators["day"] = 0 ==> night time
    indicators["day"] = pfp_rp.get_day_indicator(cf, Fsd, Fsd_syn, sa)
    ind_day = indicators["day"]["values"]
    # get the turbulence indicator series
    if opt["turbulence_filter"].lower() == "ustar":
        # indicators["turbulence"] = 1 ==> turbulent, indicators["turbulence"] = 0 ==> not turbulent
        indicators["turbulence"] = pfp_rp.get_turbulence_indicator_ustar(
            ldt, ustar, ustar_dict, ts)
    elif opt["turbulence_filter"].lower() == "ustar_evg":
        # ustar >= threshold ==> ind_ustar = 1, ustar < threshold == ind_ustar = 0
        indicators["ustar"] = pfp_rp.get_turbulence_indicator_ustar(
            ldt, ustar, ustar_dict, ts)
        ind_ustar = indicators["ustar"]["values"]
        # ustar >= threshold during day AND ustar has been >= threshold since sunset ==> indicators["turbulence"] = 1
        # indicators["turbulence"] = 0 during night once ustar has dropped below threshold even if it
        # increases above the threshold later in the night
        indicators["turbulence"] = pfp_rp.get_turbulence_indicator_ustar_evg(
            ldt, ind_day, ind_ustar, ustar, ustar_dict, ts)
    elif opt["turbulence_filter"].lower() == "l":
        #indicators["turbulence] = get_turbulence_indicator_l(ldt,L,z,d,zmdonL_threshold)
        indicators["turbulence"] = numpy.ones(len(ldt))
        msg = " Use of L as turbulence indicator not implemented, no filter applied"
        logger.warning(msg)
    else:
        msg = " Unrecognised turbulence filter option ("
        msg = msg + opt["turbulence_filter"] + "), no filter applied"
        logger.error(msg)
        return
    # initialise the final indicator series as the turbulence indicator
    # subsequent filters will modify the final indicator series
    # we must use copy.deepcopy() otherwise the "values" array will only
    # be copied by reference not value.  Damn Python's default of copy by reference!
    indicators["final"] = copy.deepcopy(indicators["turbulence"])
    # check to see if the user wants to accept all day time observations
    # regardless of ustar value
    if opt["accept_day_times"].lower() == "yes":
        # if yes, then we force the final indicator to be 1
        # if ustar is below the threshold during the day.
        idx = numpy.where(indicators["day"]["values"] == 1)[0]
        indicators["final"]["values"][idx] = numpy.int(1)
        indicators["final"]["attr"].update(indicators["day"]["attr"])
    # get the evening indicator series
    indicators["evening"] = pfp_rp.get_evening_indicator(
        cf, Fsd, Fsd_syn, sa, ts)
    indicators["dayevening"] = {
        "values": indicators["day"]["values"] + indicators["evening"]["values"]
    }
    indicators["dayevening"]["attr"] = indicators["day"]["attr"].copy()
    indicators["dayevening"]["attr"].update(indicators["evening"]["attr"])
    if opt["use_evening_filter"].lower() == "yes":
        idx = numpy.where(indicators["dayevening"]["values"] == 0)[0]
        indicators["final"]["values"][idx] = numpy.int(0)
        indicators["final"]["attr"].update(indicators["dayevening"]["attr"])
    # save the indicator series
    ind_flag = numpy.zeros(len(ldt))
    long_name = "Turbulence indicator, 1 for turbulent, 0 for non-turbulent"
    ind_attr = pfp_utils.MakeAttributeDictionary(long_name=long_name,
                                                 units="None")
    pfp_utils.CreateSeries(ds, "turbulence_indicator",
                           indicators["turbulence"]["values"], ind_flag,
                           ind_attr)
    long_name = "Day indicator, 1 for day time, 0 for night time"
    ind_attr = pfp_utils.MakeAttributeDictionary(long_name=long_name,
                                                 units="None")
    pfp_utils.CreateSeries(ds, "day_indicator", indicators["day"]["values"],
                           ind_flag, ind_attr)
    long_name = "Evening indicator, 1 for evening, 0 for not evening"
    ind_attr = pfp_utils.MakeAttributeDictionary(long_name=long_name,
                                                 units="None")
    pfp_utils.CreateSeries(ds, "evening_indicator",
                           indicators["evening"]["values"], ind_flag, ind_attr)
    long_name = "Day/evening indicator, 1 for day/evening, 0 for not day/evening"
    ind_attr = pfp_utils.MakeAttributeDictionary(long_name=long_name,
                                                 units="None")
    pfp_utils.CreateSeries(ds, "dayevening_indicator",
                           indicators["dayevening"]["values"], ind_flag,
                           ind_attr)
    long_name = "Final indicator, 1 for use data, 0 for don't use data"
    ind_attr = pfp_utils.MakeAttributeDictionary(long_name=long_name,
                                                 units="None")
    pfp_utils.CreateSeries(ds, "final_indicator",
                           indicators["final"]["values"], ind_flag, ind_attr)
    # loop over the series to be filtered
    for series in opt["filter_list"]:
        msg = " Applying " + opt["turbulence_filter"] + " filter to " + series
        logger.info(msg)
        # get the data
        data, flag, attr = pfp_utils.GetSeriesasMA(ds, series)
        # continue to next series if this series has been filtered before
        if "turbulence_filter" in attr:
            msg = " Series " + series + " has already been filtered, skipping ..."
            logger.warning(msg)
            continue
        # save the non-filtered data
        pfp_utils.CreateSeries(ds, series + "_nofilter", data, flag, attr)
        # now apply the filter
        data_filtered = numpy.ma.masked_where(
            indicators["final"]["values"] == 0, data, copy=True)
        flag_filtered = numpy.copy(flag)
        idx = numpy.where(indicators["final"]["values"] == 0)[0]
        flag_filtered[idx] = numpy.int32(61)
        # update the series attributes
        for item in indicators["final"]["attr"].keys():
            attr[item] = indicators["final"]["attr"][item]
        # and write the filtered data to the data structure
        pfp_utils.CreateSeries(ds, series, data_filtered, flag_filtered, attr)
        # and write a copy of the filtered datas to the data structure so it
        # will still exist once the gap filling has been done
        pfp_utils.CreateSeries(ds, series + "_filtered", data_filtered,
                               flag_filtered, attr)
    return
コード例 #13
0
def gfalternate_matchstartendtimes(ds,ds_alternate):
    """
    Purpose:
     Match the start and end times of the alternate and tower data.
     The logic is as follows:
      - if there is no overlap between the alternate and tower data then
        dummy series with missing data are created for the alternate data
        for the period of the tower data
      - if the alternate and tower data overlap then truncate or pad (with
        missing values) the alternate data series so that the periods of the
        tower data and alternate data match.
    Usage:
     gfalternate_matchstartendtimes(ds,ds_alternate)
     where ds is the data structure containing the tower data
           ds_alternate is the data structure containing the alternate data
    Author: PRI
    Date: July 2015
    """
    # check the time steps are the same
    ts_tower = int(ds.globalattributes["time_step"])
    ts_alternate = int(ds_alternate.globalattributes["time_step"])
    if ts_tower!=ts_alternate:
        msg = " GapFillFromAlternate: time step for tower and alternate data are different, returning ..."
        logger.error(msg)
        ds.returncodes["GapFillFromAlternate"] = "error"
        return
    # get the start and end times of the tower and the alternate data and see if they overlap
    ldt_alternate = ds_alternate.series["DateTime"]["Data"]
    start_alternate = ldt_alternate[0]
    ldt_tower = ds.series["DateTime"]["Data"]
    end_tower = ldt_tower[-1]
    # since the datetime is monotonically increasing we need only check the start datetime
    overlap = start_alternate<=end_tower
    # do the alternate and tower data overlap?
    if overlap:
        # index of alternate datetimes that are also in tower datetimes
        #alternate_index = pfp_utils.FindIndicesOfBInA(ldt_tower,ldt_alternate)
        #alternate_index = [pfp_utils.find_nearest_value(ldt_tower, dt) for dt in ldt_alternate]
        # index of tower datetimes that are also in alternate datetimes
        #tower_index = pfp_utils.FindIndicesOfBInA(ldt_alternate,ldt_tower)
        #tower_index = [pfp_utils.find_nearest_value(ldt_alternate, dt) for dt in ldt_tower]
        tower_index, alternate_index = pfp_utils.FindMatchingIndices(ldt_tower, ldt_alternate)
        # check that the indices point to the same times
        ldta = [ldt_alternate[i] for i in alternate_index]
        ldtt = [ldt_tower[i] for i in tower_index]
        if ldta!=ldtt:
            # and exit with a helpful message if they dont
            msg = " Something went badly wrong and I'm giving up"
            logger.error(msg)
            sys.exit()
        # get a list of alternate series
        alternate_series_list = [item for item in ds_alternate.series.keys() if "_QCFlag" not in item]
        # number of records in truncated or padded alternate data
        nRecs_tower = len(ldt_tower)
        # force the alternate dattime to be the tower date time
        ds_alternate.series["DateTime"] = ds.series["DateTime"]
        # loop over the alternate series and truncate or pad as required
        # truncation or padding is handled by the indices
        for series in alternate_series_list:
            if series in ["DateTime","DateTime_UTC"]: continue
            # get the alternate data
            data,flag,attr = pfp_utils.GetSeriesasMA(ds_alternate,series)
            # create an array of missing data of the required length
            data_overlap = numpy.full(nRecs_tower,c.missing_value,dtype=numpy.float64)
            flag_overlap = numpy.ones(nRecs_tower,dtype=numpy.int32)
            # replace missing data with alternate data where times match
            data_overlap[tower_index] = data[alternate_index]
            flag_overlap[tower_index] = flag[alternate_index]
            # write the truncated or padded series back into the alternate data structure
            pfp_utils.CreateSeries(ds_alternate,series,data_overlap,flag_overlap,attr)
        # update the number of records in the file
        ds_alternate.globalattributes["nc_nrecs"] = nRecs_tower
    else:
        # there is no overlap between the alternate and tower data, create dummy series
        nRecs = len(ldt_tower)
        ds_alternate.globalattributes["nc_nrecs"] = nRecs
        ds_alternate.series["DateTime"] = ds.series["DateTime"]
        alternate_series_list = [item for item in ds_alternate.series.keys() if "_QCFlag" not in item]
        for series in alternate_series_list:
            if series in ["DateTime","DateTime_UTC"]:
                continue
            _,  _, attr = pfp_utils.GetSeriesasMA(ds_alternate, series)
            data = numpy.full(nRecs, c.missing_value, dtype=numpy.float64)
            flag = numpy.ones(nRecs, dtype=numpy.int32)
            pfp_utils.CreateSeries(ds_alternate, series, data, flag, attr)
    ds.returncodes["GapFillFromAlternate"] = "normal"
コード例 #14
0
ファイル: pfp_clim.py プロジェクト: mdekauwe/PyFluxPro-1
def climatology(cf):
    nc_filename = pfp_io.get_infilenamefromcf(cf)
    if not pfp_utils.file_exists(nc_filename): return
    xl_filename = nc_filename.replace(".nc", "_Climatology.xls")
    xlFile = xlwt.Workbook()
    ds = pfp_io.nc_read_series(nc_filename)
    # calculate Fa if it is not in the data structure
    got_Fa = True
    if "Fa" not in ds.series.keys():
        if "Fn" in ds.series.keys() and "Fg" in ds.series.keys():
            pfp_ts.CalculateAvailableEnergy(ds,
                                            Fa_out='Fa',
                                            Fn_in='Fn',
                                            Fg_in='Fg')
        else:
            got_Fa = False
            logger.warning(" Fn or Fg not in data struicture")
    # get the time step
    ts = int(ds.globalattributes['time_step'])
    # get the site name
    SiteName = ds.globalattributes['site_name']
    # get the datetime series
    dt = ds.series['DateTime']['Data']
    Hdh = numpy.array([(d.hour + d.minute / float(60)) for d in dt])
    Month = numpy.array([d.month for d in dt])
    # get the initial start and end dates
    StartDate = str(dt[0])
    EndDate = str(dt[-1])
    # find the start index of the first whole day (time=00:30)
    si = pfp_utils.GetDateIndex(dt,
                                StartDate,
                                ts=ts,
                                default=0,
                                match='startnextday')
    # find the end index of the last whole day (time=00:00)
    ei = pfp_utils.GetDateIndex(dt,
                                EndDate,
                                ts=ts,
                                default=-1,
                                match='endpreviousday')
    # get local views of the datetime series
    ldt = dt[si:ei + 1]
    Hdh = Hdh[si:ei + 1]
    Month = Month[si:ei + 1]
    # get the number of time steps in a day and the number of days in the data
    ntsInDay = int(24.0 * 60.0 / float(ts))
    nDays = int(len(ldt)) / ntsInDay

    for ThisOne in cf['Variables'].keys():
        if "AltVarName" in cf['Variables'][ThisOne].keys():
            ThisOne = cf['Variables'][ThisOne]["AltVarName"]
        if ThisOne in ds.series.keys():
            logger.info(" Doing climatology for " + ThisOne)
            data, f, a = pfp_utils.GetSeriesasMA(ds, ThisOne, si=si, ei=ei)
            if numpy.ma.count(data) == 0:
                logger.warning(" No data for " + ThisOne + ", skipping ...")
                continue
            fmt_str = get_formatstring(cf, ThisOne, fmt_def='')
            xlSheet = xlFile.add_sheet(ThisOne)
            Av_all = do_diurnalstats(Month,
                                     Hdh,
                                     data,
                                     xlSheet,
                                     format_string=fmt_str,
                                     ts=ts)
            # now do it for each day
            # we want to preserve any data that has been truncated by the use of the "startnextday"
            # and "endpreviousday" match options used above.  Here we revisit the start and end indices
            # and adjust these backwards and forwards respectively if data has been truncated.
            nDays_daily = nDays
            ei_daily = ei
            si_daily = si
            sdate = ldt[0]
            edate = ldt[-1]
            # is there data after the current end date?
            if dt[-1] > ldt[-1]:
                # if so, push the end index back by 1 day so it is included
                ei_daily = ei + ntsInDay
                nDays_daily = nDays_daily + 1
                edate = ldt[-1] + datetime.timedelta(days=1)
            # is there data before the current start date?
            if dt[0] < ldt[0]:
                # if so, push the start index back by 1 day so it is included
                si_daily = si - ntsInDay
                nDays_daily = nDays_daily + 1
                sdate = ldt[0] - datetime.timedelta(days=1)
            # get the data and use the "pad" option to add missing data if required to
            # complete the extra days
            data, f, a = pfp_utils.GetSeriesasMA(ds,
                                                 ThisOne,
                                                 si=si_daily,
                                                 ei=ei_daily,
                                                 mode="pad")
            data_daily = data.reshape(nDays_daily, ntsInDay)
            xlSheet = xlFile.add_sheet(ThisOne + '(day)')
            write_data_1columnpertimestep(xlSheet,
                                          data_daily,
                                          ts,
                                          startdate=sdate,
                                          format_string=fmt_str)
            data_daily_i = do_2dinterpolation(data_daily)
            xlSheet = xlFile.add_sheet(ThisOne + 'i(day)')
            write_data_1columnpertimestep(xlSheet,
                                          data_daily_i,
                                          ts,
                                          startdate=sdate,
                                          format_string=fmt_str)
        elif ThisOne == "EF" and got_Fa:
            logger.info(" Doing evaporative fraction")
            EF = numpy.ma.zeros([48, 12]) + float(c.missing_value)
            Hdh, f, a = pfp_utils.GetSeriesasMA(ds, 'Hdh', si=si, ei=ei)
            Fa, f, a = pfp_utils.GetSeriesasMA(ds, 'Fa', si=si, ei=ei)
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            for m in range(1, 13):
                mi = numpy.where(Month == m)[0]
                Fa_Num, Hr, Fa_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fa[mi], ts)
                Fe_Num, Hr, Fe_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fe[mi], ts)
                index = numpy.ma.where((Fa_Num > 4) & (Fe_Num > 4))
                EF[:, m - 1][index] = Fe_Av[index] / Fa_Av[index]
            # reject EF values greater than upper limit or less than lower limit
            upr, lwr = get_rangecheck_limit(cf, 'EF')
            EF = numpy.ma.filled(
                numpy.ma.masked_where((EF > upr) | (EF < lwr), EF),
                float(c.missing_value))
            # write the EF to the Excel file
            xlSheet = xlFile.add_sheet('EF')
            write_data_1columnpermonth(xlSheet, EF, ts, format_string='0.00')
            # do the 2D interpolation to fill missing EF values
            EFi = do_2dinterpolation(EF)
            xlSheet = xlFile.add_sheet('EFi')
            write_data_1columnpermonth(xlSheet, EFi, ts, format_string='0.00')
            # now do EF for each day
            Fa, f, a = pfp_utils.GetSeriesasMA(ds, 'Fa', si=si, ei=ei)
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            EF = Fe / Fa
            EF = numpy.ma.filled(
                numpy.ma.masked_where((EF > upr) | (EF < lwr), EF),
                float(c.missing_value))
            EF_daily = EF.reshape(nDays, ntsInDay)
            xlSheet = xlFile.add_sheet('EF(day)')
            write_data_1columnpertimestep(xlSheet,
                                          EF_daily,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00')
            EFi = do_2dinterpolation(EF_daily)
            xlSheet = xlFile.add_sheet('EFi(day)')
            write_data_1columnpertimestep(xlSheet,
                                          EFi,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00')
        elif ThisOne == "BR":
            logger.info(" Doing Bowen ratio")
            BR = numpy.ma.zeros([48, 12]) + float(c.missing_value)
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            Fh, f, a = pfp_utils.GetSeriesasMA(ds, 'Fh', si=si, ei=ei)
            for m in range(1, 13):
                mi = numpy.where(Month == m)[0]
                Fh_Num, Hr, Fh_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fh[mi], ts)
                Fe_Num, Hr, Fe_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fe[mi], ts)
                index = numpy.ma.where((Fh_Num > 4) & (Fe_Num > 4))
                BR[:, m - 1][index] = Fh_Av[index] / Fe_Av[index]
            # reject BR values greater than upper limit or less than lower limit
            upr, lwr = get_rangecheck_limit(cf, 'BR')
            BR = numpy.ma.filled(
                numpy.ma.masked_where((BR > upr) | (BR < lwr), BR),
                float(c.missing_value))
            # write the BR to the Excel file
            xlSheet = xlFile.add_sheet('BR')
            write_data_1columnpermonth(xlSheet, BR, ts, format_string='0.00')
            # do the 2D interpolation to fill missing EF values
            BRi = do_2dinterpolation(BR)
            xlSheet = xlFile.add_sheet('BRi')
            write_data_1columnpermonth(xlSheet, BRi, ts, format_string='0.00')
            # now do BR for each day ...
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            Fh, f, a = pfp_utils.GetSeriesasMA(ds, 'Fh', si=si, ei=ei)
            BR = Fh / Fe
            BR = numpy.ma.filled(
                numpy.ma.masked_where((BR > upr) | (BR < lwr), BR),
                float(c.missing_value))
            BR_daily = BR.reshape(nDays, ntsInDay)
            xlSheet = xlFile.add_sheet('BR(day)')
            write_data_1columnpertimestep(xlSheet,
                                          BR_daily,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00')
            BRi = do_2dinterpolation(BR_daily)
            xlSheet = xlFile.add_sheet('BRi(day)')
            write_data_1columnpertimestep(xlSheet,
                                          BRi,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00')
        elif ThisOne == "WUE":
            logger.info(" Doing ecosystem WUE")
            WUE = numpy.ma.zeros([48, 12]) + float(c.missing_value)
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            Fc, f, a = pfp_utils.GetSeriesasMA(ds, 'Fc', si=si, ei=ei)
            for m in range(1, 13):
                mi = numpy.where(Month == m)[0]
                Fc_Num, Hr, Fc_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fc[mi], ts)
                Fe_Num, Hr, Fe_Av, Sd, Mx, Mn = get_diurnalstats(
                    Hdh[mi], Fe[mi], ts)
                index = numpy.ma.where((Fc_Num > 4) & (Fe_Num > 4))
                WUE[:, m - 1][index] = Fc_Av[index] / Fe_Av[index]
            # reject WUE values greater than upper limit or less than lower limit
            upr, lwr = get_rangecheck_limit(cf, 'WUE')
            WUE = numpy.ma.filled(
                numpy.ma.masked_where((WUE > upr) | (WUE < lwr), WUE),
                float(c.missing_value))
            # write the WUE to the Excel file
            xlSheet = xlFile.add_sheet('WUE')
            write_data_1columnpermonth(xlSheet,
                                       WUE,
                                       ts,
                                       format_string='0.00000')
            # do the 2D interpolation to fill missing EF values
            WUEi = do_2dinterpolation(WUE)
            xlSheet = xlFile.add_sheet('WUEi')
            write_data_1columnpermonth(xlSheet,
                                       WUEi,
                                       ts,
                                       format_string='0.00000')
            # now do WUE for each day ...
            Fe, f, a = pfp_utils.GetSeriesasMA(ds, 'Fe', si=si, ei=ei)
            Fc, f, a = pfp_utils.GetSeriesasMA(ds, 'Fc', si=si, ei=ei)
            WUE = Fc / Fe
            WUE = numpy.ma.filled(
                numpy.ma.masked_where((WUE > upr) | (WUE < lwr), WUE),
                float(c.missing_value))
            WUE_daily = WUE.reshape(nDays, ntsInDay)
            xlSheet = xlFile.add_sheet('WUE(day)')
            write_data_1columnpertimestep(xlSheet,
                                          WUE_daily,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00000')
            WUEi = do_2dinterpolation(WUE_daily)
            xlSheet = xlFile.add_sheet('WUEi(day)')
            write_data_1columnpertimestep(xlSheet,
                                          WUEi,
                                          ts,
                                          startdate=ldt[0],
                                          format_string='0.00000')
        else:
            logger.warning(" Requested variable " + ThisOne +
                           " not in data structure")
            continue
    logger.info(" Saving Excel file " + os.path.split(xl_filename)[1])
    xlFile.save(xl_filename)
コード例 #15
0
def rpLT_createdict(cf, ds, series):
    """
    Purpose:
     Creates a dictionary in ds to hold information about estimating ecosystem
     respiration using the Lloyd-Taylor method.
    Usage:
    Author: PRI
    Date October 2015
    """
    # get the section of the control file containing the series
    section = pfp_utils.get_cfsection(cf, series=series, mode="quiet")
    # return without doing anything if the series isn't in a control file section
    if len(section) == 0:
        logger.error("ERUsingLloydTaylor: Series " + series +
                     " not found in control file, skipping ...")
        return
    # check that none of the drivers have missing data
    driver_list = ast.literal_eval(
        cf[section][series]["ERUsingLloydTaylor"]["drivers"])
    target = cf[section][series]["ERUsingLloydTaylor"]["target"]
    for label in driver_list:
        data, flag, attr = pfp_utils.GetSeriesasMA(ds, label)
        if numpy.ma.count_masked(data) != 0:
            logger.error("ERUsingLloydTaylor: driver " + label +
                         " contains missing data, skipping target " + target)
            return
    # create the dictionary keys for this series
    rpLT_info = {}
    # site name
    rpLT_info["site_name"] = ds.globalattributes["site_name"]
    # source series for ER
    opt = pfp_utils.get_keyvaluefromcf(cf,
                                       [section, series, "ERUsingLloydTaylor"],
                                       "source",
                                       default="Fc")
    rpLT_info["source"] = opt
    # target series name
    rpLT_info["target"] = cf[section][series]["ERUsingLloydTaylor"]["target"]
    # list of drivers
    rpLT_info["drivers"] = ast.literal_eval(
        cf[section][series]["ERUsingLloydTaylor"]["drivers"])
    # name of SOLO output series in ds
    rpLT_info["output"] = cf[section][series]["ERUsingLloydTaylor"]["output"]
    # results of best fit for plotting later on
    rpLT_info["results"] = {
        "startdate": [],
        "enddate": [],
        "No. points": [],
        "r": [],
        "Bias": [],
        "RMSE": [],
        "Frac Bias": [],
        "NMSE": [],
        "Avg (obs)": [],
        "Avg (LT)": [],
        "Var (obs)": [],
        "Var (LT)": [],
        "Var ratio": [],
        "m_ols": [],
        "b_ols": []
    }
    # create the configuration dictionary
    rpLT_info["configs_dict"] = get_configs_dict(cf, ds)
    # create an empty series in ds if the output series doesn't exist yet
    if rpLT_info["output"] not in ds.series.keys():
        data, flag, attr = pfp_utils.MakeEmptySeries(ds, rpLT_info["output"])
        pfp_utils.CreateSeries(ds, rpLT_info["output"], data, flag, attr)
    # create the merge directory in the data structure
    if "merge" not in dir(ds): ds.merge = {}
    if "standard" not in ds.merge.keys(): ds.merge["standard"] = {}
    # create the dictionary keys for this series
    ds.merge["standard"][series] = {}
    # output series name
    ds.merge["standard"][series]["output"] = series
    # source
    ds.merge["standard"][series]["source"] = ast.literal_eval(
        cf[section][series]["MergeSeries"]["Source"])
    # create an empty series in ds if the output series doesn't exist yet
    if ds.merge["standard"][series]["output"] not in ds.series.keys():
        data, flag, attr = pfp_utils.MakeEmptySeries(
            ds, ds.merge["standard"][series]["output"])
        pfp_utils.CreateSeries(ds, ds.merge["standard"][series]["output"],
                               data, flag, attr)
    return rpLT_info