Example #1
0
def GapFillUsingInterpolation(cf, ds):
    """
    Purpose:
     Gap fill variables in the data structure using interpolation.
     All variables in the [Variables], [Drivers] and [Fluxes] section
     are processed.
    Usage:
     qcgf.GapFillUsingInterpolation(cf,ds)
     where cf is a control file object
           ds is a data structure
    Author: PRI
    Date: September 2016
    """
    label_list = qcutils.get_label_list_from_cf(cf)
    maxlen = int(
        qcutils.get_keyvaluefromcf(cf, ["Options"],
                                   "MaxGapInterpolate",
                                   default=2))
    if maxlen == 0:
        msg = " Gap fill by interpolation disabled in control file"
        logger.info(msg)
        return
    for label in label_list:
        section = qcutils.get_cfsection(cf, series=label)
        if "MaxGapInterpolate" in cf[section][label]:
            maxlen = int(
                qcutils.get_keyvaluefromcf(cf, [section, label],
                                           "MaxGapInterpolate",
                                           default=2))
            if maxlen == 0:
                msg = " Gap fill by interpolation disabled for " + label
                logger.info(msg)
                continue
            qcts.InterpolateOverMissing(ds, series=label, maxlen=2)
Example #2
0
def rpLT_createdict(cf,ds,series):
    """
    Purpose:
     Creates a dictionary in ds to hold information about estimating ecosystem
     respiration using the Lloyd-Taylor method.
    Usage:
    Author: PRI
    Date October 2015
    """
    # get the section of the control file containing the series
    section = qcutils.get_cfsection(cf,series=series,mode="quiet")
    # return without doing anything if the series isn't in a control file section
    if len(section)==0:
        log.error("ERUsingLloydTaylor: Series "+series+" not found in control file, skipping ...")
        return
    # check that none of the drivers have missing data
    driver_list = ast.literal_eval(cf[section][series]["ERUsingLloydTaylor"]["drivers"])
    target = cf[section][series]["ERUsingLloydTaylor"]["target"]
    for label in driver_list:
        data,flag,attr = qcutils.GetSeriesasMA(ds,label)
        if numpy.ma.count_masked(data)!=0:
            log.error("ERUsingLloydTaylor: driver "+label+" contains missing data, skipping target "+target)
            return
    # create the solo directory in the data structure
    if "rpLT" not in dir(ds): ds.rpLT = {}
    # create the dictionary keys for this series
    ds.rpLT[series] = {}
    # site name
    ds.rpLT[series]["site_name"] = ds.globalattributes["site_name"]
    # target series name
    ds.rpLT[series]["target"] = cf[section][series]["ERUsingLloydTaylor"]["target"]
    # list of drivers
    ds.rpLT[series]["drivers"] = ast.literal_eval(cf[section][series]["ERUsingLloydTaylor"]["drivers"])
    # name of SOLO output series in ds
    ds.rpLT[series]["output"] = cf[section][series]["ERUsingLloydTaylor"]["output"]
    # results of best fit for plotting later on
    ds.rpLT[series]["results"] = {"startdate":[],"enddate":[],"No. points":[],"r":[],
                                  "Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[],
                                  "Avg (obs)":[],"Avg (LT)":[],
                                  "Var (obs)":[],"Var (LT)":[],"Var ratio":[],
                                  "m_ols":[],"b_ols":[]}
    # create the configuration dictionary
    ds.rpLT[series]["configs_dict"] = get_configs_dict(cf,ds)
    # create an empty series in ds if the output series doesn't exist yet
    if ds.rpLT[series]["output"] not in ds.series.keys():
        data,flag,attr = qcutils.MakeEmptySeries(ds,ds.rpLT[series]["output"])
        qcutils.CreateSeries(ds,ds.rpLT[series]["output"],data,Flag=flag,Attr=attr)
    # create the merge directory in the data structure
    if "merge" not in dir(ds): ds.merge = {}
    if "standard" not in ds.merge.keys(): ds.merge["standard"] = {}
    # create the dictionary keys for this series
    ds.merge["standard"][series] = {}
    # output series name
    ds.merge["standard"][series]["output"] = series
    # source
    ds.merge["standard"][series]["source"] = ast.literal_eval(cf[section][series]["MergeSeries"]["Source"])
    # create an empty series in ds if the output series doesn't exist yet
    if ds.merge["standard"][series]["output"] not in ds.series.keys():
        data,flag,attr = qcutils.MakeEmptySeries(ds,ds.merge["standard"][series]["output"])
        qcutils.CreateSeries(ds,ds.merge["standard"][series]["output"],data,Flag=flag,Attr=attr)
Example #3
0
def GapFillFluxUsingMDS(cf, ds, series=""):
    section = qcutils.get_cfsection(cf, series=series, mode="quiet")
    if len(section) == 0:
        return
    if "GapFillFluxUsingMDS" in cf[section][series].keys():
        logger.info(" GapFillFluxUsingMDS: not implemented yet")
        return
Example #4
0
def do_dependencycheck(cf,ds,section='',series='',code=23,mode="quiet"):
    if len(section)==0 and len(series)==0: return
    if len(section)==0: section = qcutils.get_cfsection(cf,series=series,mode='quiet')
    if "DependencyCheck" not in cf[section][series].keys(): return
    if "Source" not in cf[section][series]["DependencyCheck"]:
        msg = " DependencyCheck: keyword Source not found for series "+series+", skipping ..."
        log.error(msg)
        return
    if mode=="verbose":
        msg = " Doing DependencyCheck for "+series
        log.info(msg)
    # get the precursor source list from the control file
    source_list = ast.literal_eval(cf[section][series]["DependencyCheck"]["Source"])
    # get the data
    dependent_data,dependent_flag,dependent_attr = qcutils.GetSeriesasMA(ds,series)
    # loop over the precursor source list
    for item in source_list:
        # check the precursor is in the data structure
        if item not in ds.series.keys():
            msg = " DependencyCheck: "+series+" precursor series "+item+" not found, skipping ..."
            continue
        # get the precursor data
        precursor_data,precursor_flag,precursor_attr = qcutils.GetSeriesasMA(ds,item)
        # mask the dependent data where the precurso is masked
        dependent_data = numpy.ma.masked_where(numpy.ma.getmaskarray(precursor_data)==True,dependent_data)
        # get an index of masked precursor data
        index = numpy.ma.where(numpy.ma.getmaskarray(precursor_data)==True)[0]
        # set the dependent QC flag
        dependent_flag[index] = numpy.int32(code)
    # put the data back into the data structure
    dependent_attr["DependencyCheck_source"] = str(source_list)
    qcutils.CreateSeries(ds,series,dependent_data,Flag=dependent_flag,Attr=dependent_attr)
    if 'do_dependencychecks' not in ds.globalattributes['Functions']:
        ds.globalattributes['Functions'] = ds.globalattributes['Functions']+',do_dependencychecks'
Example #5
0
def do_qcchecks_oneseries(cf,ds,series=''):
    section = qcutils.get_cfsection(cf,series=series,mode='quiet')
    if len(section)==0: return
    level = ds.globalattributes['nc_level']
    if level == 'L2':
        range_code        = 2
        diurnal_code      = 3
        excludedates_code = 6
        excludehours_code = 7
    if level == 'L3':
        excludedates_code = 6
        excludehours_code = 7
        range_code        = 16
        diurnal_code      = 17
    if level == 'L4':
        excludedates_code = 6
        excludehours_code = 7
        range_code        = 38
        diurnal_code      = 39
    # do the range check
    do_rangecheck(cf,ds,section=section,series=series,code=range_code)
    # do the diurnal check
    do_diurnalcheck(cf,ds,section=section,series=series,code=diurnal_code)
    # do exclude dates
    do_excludedates(cf,ds,section=section,series=series,code=excludedates_code)
    # do exclude hours
    do_excludehours(cf,ds,section=section,series=series,code=excludehours_code)
    if 'do_qcchecks' not in ds.globalattributes['Functions']:
        ds.globalattributes['Functions'] = ds.globalattributes['Functions']+',do_qcchecks'
Example #6
0
def gfMergeSeries_createdict(cf, ds, series):
    """ Creates a dictionary in ds to hold information about the merging of gap filled
        and tower data."""
    merge_prereq_list = ["Fsd", "Fsu", "Fld", "Flu", "Ts", "Sws"]
    # get the section of the control file containing the series
    section = qcutils.get_cfsection(cf, series=series, mode="quiet")
    # create the merge directory in the data structure
    if "merge" not in dir(ds): ds.merge = {}
    # check to see if this series is in the "merge first" list
    # series in the "merge first" list get merged first so they can be used with existing tower
    # data to re-calculate Fg, Fn and Fa
    merge_order = "standard"
    if series in merge_prereq_list: merge_order = "prerequisite"
    if merge_order not in ds.merge.keys(): ds.merge[merge_order] = {}
    # create the dictionary keys for this series
    ds.merge[merge_order][series] = {}
    # output series name
    ds.merge[merge_order][series]["output"] = series
    # site name
    ds.merge[merge_order][series]["source"] = ast.literal_eval(
        cf[section][series]["MergeSeries"]["Source"])
    # create an empty series in ds if the output series doesn't exist yet
    if ds.merge[merge_order][series]["output"] not in ds.series.keys():
        data, flag, attr = qcutils.MakeEmptySeries(
            ds, ds.merge[merge_order][series]["output"])
        qcutils.CreateSeries(ds, ds.merge[merge_order][series]["output"], data,
                             flag, attr)
Example #7
0
def gfClimatology_createdict(cf, ds, series):
    """ Creates a dictionary in ds to hold information about the climatological data used
        to gap fill the tower data."""
    # get the section of the control file containing the series
    section = qcutils.get_cfsection(cf, series=series, mode="quiet")
    # return without doing anything if the series isn't in a control file section
    if len(section) == 0:
        logger.error(
            "GapFillFromClimatology: Series %s not found in control file, skipping ...",
            series)
        return
    # create the climatology directory in the data structure
    if "climatology" not in dir(ds):
        ds.climatology = {}
    # name of alternate output series in ds
    output_list = cf[section][series]["GapFillFromClimatology"].keys()
    # loop over the outputs listed in the control file
    for output in output_list:
        # create the dictionary keys for this output
        ds.climatology[output] = {}
        ds.climatology[output]["label_tower"] = series
        # site name
        ds.climatology[output]["site_name"] = ds.globalattributes["site_name"]
        # Climatology file name
        file_list = cf["Files"].keys()
        lower_file_list = [item.lower() for item in file_list]
        # first, look in the [Files] section for a generic file name
        if "climatology" in lower_file_list:
            # found a generic file name
            i = lower_file_list.index("climatology")
            ds.climatology[output]["file_name"] = cf["Files"][file_list[i]]
        else:
            # no generic file name found, look for a file name in the variable section
            ds.climatology[output]["file_name"] = cf[section][series][
                "GapFillFromClimatology"][output]["file_name"]
        # climatology variable name if different from name used in control file
        if "climatology_name" in cf[section][series]["GapFillFromClimatology"][
                output]:
            ds.climatology[output]["climatology_name"] = cf[section][series][
                "GapFillFromClimatology"][output]["climatology_name"]
        else:
            ds.climatology[output]["climatology_name"] = series
        # climatology gap filling method
        if "method" not in cf[section][series]["GapFillFromClimatology"][
                output].keys():
            # default if "method" missing is "interpolated_daily"
            ds.climatology[output]["method"] = "interpolated_daily"
        else:
            ds.climatology[output]["method"] = cf[section][series][
                "GapFillFromClimatology"][output]["method"]
        # create an empty series in ds if the climatology output series doesn't exist yet
        if output not in ds.series.keys():
            data, flag, attr = qcutils.MakeEmptySeries(ds, output)
            qcutils.CreateSeries(ds, output, data, flag, attr)
Example #8
0
def do_qcchecks_oneseries(cf,ds,section='',series=''):
    if len(section)==0:
        section = qcutils.get_cfsection(cf,series=series,mode='quiet')
        if len(section)==0: return
    # do the range check
    do_rangecheck(cf,ds,section=section,series=series,code=2)
    # do the diurnal check
    do_diurnalcheck(cf,ds,section=section,series=series,code=5)
    # do exclude dates
    do_excludedates(cf,ds,section=section,series=series,code=6)
    # do exclude hours
    do_excludehours(cf,ds,section=section,series=series,code=7)
    if 'do_qcchecks' not in ds.globalattributes['Functions']:
        ds.globalattributes['Functions'] = ds.globalattributes['Functions']+',do_qcchecks'
Example #9
0
def do_qcchecks_oneseries(cf,ds,section='',series=''):
    if len(section)==0:
        section = qcutils.get_cfsection(cf,series=series,mode='quiet')
        if len(section)==0: return
    # do the range check
    do_rangecheck(cf,ds,section=section,series=series,code=2)
    # do the diurnal check
    do_diurnalcheck(cf,ds,section=section,series=series,code=5)
    # do exclude dates
    do_excludedates(cf,ds,section=section,series=series,code=6)
    # do exclude hours
    do_excludehours(cf,ds,section=section,series=series,code=7)
    if 'do_qcchecks' not in ds.globalattributes['Functions']:
        ds.globalattributes['Functions'] = ds.globalattributes['Functions']+',do_qcchecks'
Example #10
0
def UpdateVariableAttributes_QC(cf, variable):
    """
    Purpose:
    Usage:
    Side effects:
    Author: PRI
    Date: November 2016
    """
    label = variable["Label"]
    section = qcutils.get_cfsection(cf,series=label,mode='quiet')
    if label not in cf[section]:
        return
    if "RangeCheck" not in cf[section][label]:
        return
    if "Lower" in cf[section][label]["RangeCheck"]:
        variable["Attr"]["rangecheck_lower"] = cf[section][label]["RangeCheck"]["Lower"]
    if "Upper" in cf[section][label]["RangeCheck"]:
        variable["Attr"]["rangecheck_upper"] = cf[section][label]["RangeCheck"]["Upper"]
    return
Example #11
0
def do_qcchecks_oneseries(cf,ds,series=''):
    section = qcutils.get_cfsection(cf,series=series,mode='quiet')
    if len(section)==0: return
    level = ds.globalattributes['nc_level']
    if level == 'L2':
        range_code        = 2
        diurnal_code      = 3
        excludedates_code = 6
        excludehours_code = 7
    if level == 'L3':
        excludedates_code = 6
        excludehours_code = 7
        range_code        = 16
        diurnal_code      = 17
    if level == 'L4':
        excludedates_code = 6
        excludehours_code = 7
        range_code        = 82
        diurnal_code      = 83
    if level == 'L5':
        excludedates_code = 6
        excludehours_code = 7
        range_code        = 84
        diurnal_code      = 85
    if level == 'L6':
        excludedates_code = 6
        excludehours_code = 7
        range_code        = 86
        diurnal_code      = 87
    # do the range check
    do_rangecheck(cf,ds,section=section,series=series,code=range_code)
    # do the diurnal check
    do_diurnalcheck(cf,ds,section=section,series=series,code=diurnal_code)
    # do exclude dates
    do_excludedates(cf,ds,section=section,series=series,code=excludedates_code)
    # do exclude hours
    do_excludehours(cf,ds,section=section,series=series,code=excludehours_code)
    try:
        if 'do_qcchecks' not in ds.globalattributes['Functions']:
            ds.globalattributes['Functions'] = ds.globalattributes['Functions']+',do_qcchecks'
    except:
        ds.globalattributes['Functions'] = 'do_qcchecks'
Example #12
0
def GapFillParseControlFile(cf, ds, series, ds_alt):
    # find the section containing the series
    section = qcutils.get_cfsection(cf, series=series, mode="quiet")
    # return empty handed if the series is not in a section
    if len(section) == 0:
        return
    if "GapFillFromAlternate" in cf[section][series].keys():
        # create the alternate dictionary in ds
        gfalternate_createdict(cf, ds, series, ds_alt)
    if "GapFillUsingSOLO" in cf[section][series].keys():
        # create the SOLO dictionary in ds
        gfSOLO_createdict(cf, ds, series)
    if "GapFillUsingMDS" in cf[section][series].keys():
        # create the MDS dictionary in ds
        gfMDS_createdict(cf, ds, series)
    if "GapFillFromClimatology" in cf[section][series].keys():
        # create the climatology dictionary in the data structure
        gfClimatology_createdict(cf, ds, series)
    if "MergeSeries" in cf[section][series].keys():
        # create the merge series dictionary in the data structure
        gfMergeSeries_createdict(cf, ds, series)
Example #13
0
def do_qcchecks_oneseries(cf, ds, section, series):
    if len(section) == 0:
        section = qcutils.get_cfsection(cf, series=series, mode='quiet')
        if len(section) == 0: return
    # do the range check
    do_rangecheck(cf, ds, section, series, code=2)
    # do the lower range check
    do_lowercheck(cf, ds, section, series, code=2)
    # do the upper range check
    do_uppercheck(cf, ds, section, series, code=2)
    # do the diurnal check
    do_diurnalcheck(cf, ds, section, series, code=5)
    # do the EP QC flag check
    do_EPQCFlagCheck(cf, ds, section, series, code=9)
    # do exclude dates
    do_excludedates(cf, ds, section, series, code=6)
    # do exclude hours
    do_excludehours(cf, ds, section, series, code=7)
    # do wind direction corrections
    do_winddirectioncorrection(cf, ds, section, series)
    if 'do_qcchecks' not in ds.globalattributes['Functions']:
        ds.globalattributes[
            'Functions'] = ds.globalattributes['Functions'] + ',do_qcchecks'
Example #14
0
def rpLL_createdict(cf, ds, series):
    """
    Purpose:
     Creates a dictionary in ds to hold information about estimating ecosystem
     respiration using the Lasslop method.
    Usage:
    Author: PRI
    Date April 2016
    """
    # get the section of the control file containing the series
    section = qcutils.get_cfsection(cf, series=series, mode="quiet")
    # return without doing anything if the series isn't in a control file section
    if len(section) == 0:
        log.error("ERUsingLasslop: Series " + series +
                  " not found in control file, skipping ...")
        return
    # check that none of the drivers have missing data
    driver_list = ast.literal_eval(
        cf[section][series]["ERUsingLasslop"]["drivers"])
    target = cf[section][series]["ERUsingLasslop"]["target"]
    for label in driver_list:
        data, flag, attr = qcutils.GetSeriesasMA(ds, label)
        if numpy.ma.count_masked(data) != 0:
            log.error("ERUsingLasslop: driver " + label +
                      " contains missing data, skipping target " + target)
            return
    # create the solo directory in the data structure
    if "rpLL" not in dir(ds): ds.rpLL = {}
    # create the dictionary keys for this series
    ds.rpLL[series] = {}
    # site name
    ds.rpLL[series]["site_name"] = ds.globalattributes["site_name"]
    # target series name
    ds.rpLL[series]["target"] = cf[section][series]["ERUsingLasslop"]["target"]
    # list of drivers
    ds.rpLL[series]["drivers"] = ast.literal_eval(
        cf[section][series]["ERUsingLasslop"]["drivers"])
    # name of output series in ds
    ds.rpLL[series]["output"] = cf[section][series]["ERUsingLasslop"]["output"]
    # results of best fit for plotting later on
    ds.rpLL[series]["results"] = {
        "startdate": [],
        "enddate": [],
        "No. points": [],
        "r": [],
        "Bias": [],
        "RMSE": [],
        "Frac Bias": [],
        "NMSE": [],
        "Avg (obs)": [],
        "Avg (LT)": [],
        "Var (obs)": [],
        "Var (LT)": [],
        "Var ratio": [],
        "m_ols": [],
        "b_ols": []
    }
    # step size
    ds.rpLL[series]["step_size_days"] = int(
        cf[section][series]["ERUsingLasslop"]["step_size_days"])
    # window size
    ds.rpLL[series]["window_size_days"] = int(
        cf[section][series]["ERUsingLasslop"]["window_size_days"])
    # create an empty series in ds if the output series doesn't exist yet
    if ds.rpLL[series]["output"] not in ds.series.keys():
        data, flag, attr = qcutils.MakeEmptySeries(ds,
                                                   ds.rpLL[series]["output"])
        qcutils.CreateSeries(ds,
                             ds.rpLL[series]["output"],
                             data,
                             Flag=flag,
                             Attr=attr)
    # create the merge directory in the data structure
    if "merge" not in dir(ds): ds.merge = {}
    if "standard" not in ds.merge.keys(): ds.merge["standard"] = {}
    # create the dictionary keys for this series
    ds.merge["standard"][series] = {}
    # output series name
    ds.merge["standard"][series]["output"] = series
    # source
    ds.merge["standard"][series]["source"] = ast.literal_eval(
        cf[section][series]["MergeSeries"]["Source"])
    # create an empty series in ds if the output series doesn't exist yet
    if ds.merge["standard"][series]["output"] not in ds.series.keys():
        data, flag, attr = qcutils.MakeEmptySeries(
            ds, ds.merge["standard"][series]["output"])
        qcutils.CreateSeries(ds,
                             ds.merge["standard"][series]["output"],
                             data,
                             Flag=flag,
                             Attr=attr)
Example #15
0
def gfalternate_createdict(cf, ds, series, ds_alt):
    """
    Purpose:
     Creates a dictionary in ds to hold information about the alternate data used to gap fill the tower data.
    Usage:
    Side effects:
    Author: PRI
    Date: August 2014
    """
    # get the section of the control file containing the series
    section = qcutils.get_cfsection(cf, series=series, mode="quiet")
    # return without doing anything if the series isn't in a control file section
    if len(section) == 0:
        logger.error(
            "GapFillFromAlternate: Series %s not found in control file, skipping ...",
            series)
        return
    # create the alternate directory in the data structure
    if "alternate" not in dir(ds):
        ds.alternate = {}
    # name of alternate output series in ds
    output_list = cf[section][series]["GapFillFromAlternate"].keys()
    # loop over the outputs listed in the control file
    for output in output_list:
        # create the dictionary keys for this output
        ds.alternate[output] = {}
        ds.alternate[output]["label_tower"] = series
        # source name
        ds.alternate[output]["source"] = cf[section][series][
            "GapFillFromAlternate"][output]["source"]
        # site name
        ds.alternate[output]["site_name"] = ds.globalattributes["site_name"]
        # alternate data file name
        # first, look in the [Files] section for a generic file name
        file_list = cf["Files"].keys()
        lower_file_list = [item.lower() for item in file_list]
        if ds.alternate[output]["source"].lower() in lower_file_list:
            # found a generic file name
            i = lower_file_list.index(ds.alternate[output]["source"].lower())
            ds.alternate[output]["file_name"] = cf["Files"][file_list[i]]
        else:
            # no generic file name found, look for a file name in the variable section
            ds.alternate[output]["file_name"] = cf[section][series][
                "GapFillFromAlternate"][output]["file_name"]
        # if the file has not already been read, do it now
        if ds.alternate[output]["file_name"] not in ds_alt:
            ds_alternate = qcio.nc_read_series(
                ds.alternate[output]["file_name"], fixtimestepmethod="round")
            gfalternate_matchstartendtimes(ds, ds_alternate)
            ds_alt[ds.alternate[output]["file_name"]] = ds_alternate
        # get the type of fit
        ds.alternate[output]["fit_type"] = "OLS"
        if "fit" in cf[section][series]["GapFillFromAlternate"][output]:
            if cf[section][series]["GapFillFromAlternate"][output][
                    "fit"].lower() in [
                        "ols", "ols_thru0", "mrev", "replace", "rma", "odr"
                    ]:
                ds.alternate[output]["fit_type"] = cf[section][series][
                    "GapFillFromAlternate"][output]["fit"]
            else:
                logger.info(
                    "gfAlternate: unrecognised fit option for series %s, used OLS",
                    output)
        # correct for lag?
        if "lag" in cf[section][series]["GapFillFromAlternate"][output]:
            if cf[section][series]["GapFillFromAlternate"][output][
                    "lag"].lower() in ["no", "false"]:
                ds.alternate[output]["lag"] = "no"
            elif cf[section][series]["GapFillFromAlternate"][output][
                    "lag"].lower() in ["yes", "true"]:
                ds.alternate[output]["lag"] = "yes"
            else:
                logger.info(
                    "gfAlternate: unrecognised lag option for series %s",
                    output)
        else:
            ds.alternate[output]["lag"] = "yes"
        # choose specific alternate variable?
        if "usevars" in cf[section][series]["GapFillFromAlternate"][output]:
            ds.alternate[output]["usevars"] = ast.literal_eval(
                cf[section][series]["GapFillFromAlternate"][output]["usevars"])
        # alternate data variable name if different from name used in control file
        if "alternate_name" in cf[section][series]["GapFillFromAlternate"][
                output]:
            ds.alternate[output]["alternate_name"] = cf[section][series][
                "GapFillFromAlternate"][output]["alternate_name"]
        else:
            ds.alternate[output]["alternate_name"] = series
        # results of best fit for plotting later on
        ds.alternate[output]["results"] = {
            "startdate": [],
            "enddate": [],
            "No. points": [],
            "No. filled": [],
            "r": [],
            "Bias": [],
            "RMSE": [],
            "Frac Bias": [],
            "NMSE": [],
            "Avg (Tower)": [],
            "Avg (Alt)": [],
            "Var (Tower)": [],
            "Var (Alt)": [],
            "Var ratio": []
        }
        # create an empty series in ds if the alternate output series doesn't exist yet
        if output not in ds.series.keys():
            data, flag, attr = qcutils.MakeEmptySeries(ds, output)
            qcutils.CreateSeries(ds, output, data, flag, attr)
            qcutils.CreateSeries(ds, series + "_composite", data, flag, attr)
Example #16
0
def gfMDS_createdict(cf, ds, series):
    """
    Purpose:
     Create an information dictionary for MDS gap filling from the contents
     of the control file.
    Usage:
     info["MDS"] = gfMDS_createdict(cf)
    Author: PRI
    Date: May 2018
    """
    # get the section of the control file containing the series
    section = qcutils.get_cfsection(cf, series=series, mode="quiet")
    # return without doing anything if the series isn't in a control file section
    if len(section) == 0:
        logger.error(
            "GapFillUsingMDS: Series %s not found in control file, skipping ...",
            series)
        return
    # create the MDS attribute (a dictionary) in ds, this will hold all MDS settings
    if "mds" not in dir(ds):
        ds.mds = {}
    # name of MDS output series in ds
    output_list = cf[section][series]["GapFillUsingMDS"].keys()
    # loop over the outputs listed in the control file
    for output in output_list:
        # create the dictionary keys for this series
        ds.mds[output] = {}
        # get the target
        if "target" in cf[section][series]["GapFillUsingMDS"][output]:
            ds.mds[output]["target"] = cf[section][series]["GapFillUsingMDS"][
                output]["target"]
        else:
            ds.mds[output]["target"] = series
        # site name
        ds.mds[output]["site_name"] = ds.globalattributes["site_name"]
        # list of SOLO settings
        if "mds_settings" in cf[section][series]["GapFillUsingMDS"][output]:
            mdss_list = ast.literal_eval(
                cf[section][series]["GapFillUsingMDS"][output]["mds_settings"])

        # list of drivers
        ds.mds[output]["drivers"] = ast.literal_eval(
            cf[section][series]["GapFillUsingMDS"][output]["drivers"])
        # list of tolerances
        ds.mds[output]["tolerances"] = ast.literal_eval(
            cf[section][series]["GapFillUsingMDS"][output]["tolerances"])
        # get the ustar filter option
        opt = qcutils.get_keyvaluefromcf(
            cf, [section, series, "GapFillUsingMDS", output],
            "turbulence_filter",
            default="")
        ds.mds[output]["turbulence_filter"] = opt
        # get the day/night filter option
        opt = qcutils.get_keyvaluefromcf(
            cf, [section, series, "GapFillUsingMDS", output],
            "daynight_filter",
            default="")
        ds.mds[output]["daynight_filter"] = opt

    # check that all requested targets and drivers have a mapping to
    # a FluxNet label, remove if they don't
    fluxnet_label_map = {
        "Fc": "NEE",
        "Fe": "LE",
        "Fh": "H",
        "Fsd": "SW_IN",
        "Ta": "TA",
        "VPD": "VPD"
    }
    for mds_label in ds.mds:
        ds.mds[mds_label]["mds_label"] = mds_label
        pfp_target = ds.mds[mds_label]["target"]
        if pfp_target not in fluxnet_label_map:
            msg = " Target (" + pfp_target + ") not supported for MDS gap filling"
            logger.warning(msg)
            del ds.mds[mds_label]
        else:
            ds.mds[mds_label]["target_mds"] = fluxnet_label_map[pfp_target]
        pfp_drivers = ds.mds[mds_label]["drivers"]
        for pfp_driver in pfp_drivers:
            if pfp_driver not in fluxnet_label_map:
                msg = "Driver (" + pfp_driver + ") not supported for MDS gap filling"
                logger.warning(msg)
                ds.mds[mds_label]["drivers"].remove(pfp_driver)
            else:
                if "drivers_mds" not in ds.mds[mds_label]:
                    ds.mds[mds_label]["drivers_mds"] = []
                ds.mds[mds_label]["drivers_mds"].append(
                    fluxnet_label_map[pfp_driver])
        if len(ds.mds[mds_label]["drivers"]) == 0:
            del ds.mds[mds_label]
    return
Example #17
0
def plottimeseries(cf, nFig, dsa, dsb, si, ei):
    SiteName = dsa.globalattributes['site_name']
    Level = dsb.globalattributes['nc_level']
    dt = numpy.int32(dsa.globalattributes['time_step'])
    Month = dsa.series['Month']['Data'][0]
    p = plot_setup(cf, nFig)
    if qcutils.cfkeycheck(cf, Base='PlotSpec',
                          ThisOne='Width') and qcutils.cfkeycheck(
                              cf, Base='PlotSpec', ThisOne='Height'):
        p['PlotWidth'] = numpy.float64(cf['PlotSpec']['Width'])
        p['PlotHeight'] = numpy.float64(cf['PlotSpec']['Height'])
    log.info(' Plotting series: ' + str(p['SeriesList']))
    L1XArray = numpy.array(dsa.series['DateTime']['Data'][si:ei])
    L2XArray = numpy.array(dsb.series['DateTime']['Data'][si:ei])
    p['XAxMin'] = min(L2XArray)
    p['XAxMax'] = max(L2XArray)
    p['loc'], p['fmt'] = get_ticks(p['XAxMin'], p['XAxMax'])
    plt.ioff()
    fig = plt.figure(numpy.int32(nFig),
                     figsize=(p['PlotWidth'], p['PlotHeight']))
    fig.clf()
    plt.figtext(0.5,
                0.95,
                SiteName + ': ' + p['PlotDescription'],
                ha='center',
                size=16)
    for ThisOne, n in zip(p['SeriesList'], range(p['nGraphs'])):
        if ThisOne in dsa.series.keys():
            aflag = dsa.series[ThisOne]['Flag']
            p['Units'] = dsa.series[ThisOne]['Attr']['units']
            p['YAxOrg'] = p['ts_YAxOrg'] + n * p['yaxOrgOffset']
            L1YArray, p['nRecs'], p['nNotM'], p['nMskd'] = get_yarray(dsa,
                                                                      ThisOne,
                                                                      si=si,
                                                                      ei=ei)
            # check the control file to see if the Y axis minima have been specified
            nSer = p['SeriesList'].index(ThisOne)
            p['LYAxMax'], p['LYAxMin'] = get_yaxislimitsfromcf(
                cf, nFig, 'YLMax', 'YLMin', nSer, L1YArray)
            plot_onetimeseries_left(fig, n, ThisOne, L1XArray, L1YArray, p)
        if ThisOne in dsb.series.keys():
            bflag = dsb.series[ThisOne]['Flag']
            p['Units'] = dsb.series[ThisOne]['Attr']['units']
            p['YAxOrg'] = p['ts_YAxOrg'] + n * p['yaxOrgOffset']
            #Plot the Level 2 data series on the same X axis but with the scale on the right Y axis.
            L2YArray, p['nRecs'], p['nNotM'], p['nMskd'] = get_yarray(dsb,
                                                                      ThisOne,
                                                                      si=si,
                                                                      ei=ei)
            # check the control file to see if the Y axis minima have been specified
            nSer = p['SeriesList'].index(ThisOne)
            p['RYAxMax'], p['RYAxMin'] = get_yaxislimitsfromcf(
                cf, nFig, 'YRMax', 'YRMin', nSer, L2YArray)
            plot_onetimeseries_right(fig, n, ThisOne, L2XArray, L2YArray, p)

            #Plot the diurnal averages.
            Num2, Hr2, Av2, Sd2, Mx2, Mn2 = qcutils.get_diurnalstats(
                dsb.series['Hdh']['Data'][si:ei],
                dsb.series[ThisOne]['Data'][si:ei], dt)
            Av2 = numpy.ma.masked_where(Av2 == c.missing_value, Av2)
            Sd2 = numpy.ma.masked_where(Sd2 == c.missing_value, Sd2)
            Mx2 = numpy.ma.masked_where(Mx2 == c.missing_value, Mx2)
            Mn2 = numpy.ma.masked_where(Mn2 == c.missing_value, Mn2)
            hr2_ax = fig.add_axes([
                p['hr1_XAxOrg'], p['YAxOrg'], p['hr2_XAxLen'], p['ts_YAxLen']
            ])
            hr2_ax.hold(True)
            hr2_ax.plot(Hr2, Av2, 'y-', Hr2, Mx2, 'r-', Hr2, Mn2, 'b-')
            section = qcutils.get_cfsection(cf, series=ThisOne, mode='quiet')
            if len(section) != 0:
                if 'DiurnalCheck' in cf[section][ThisOne].keys():
                    NSdarr = numpy.array(eval(
                        cf[section][ThisOne]['DiurnalCheck']['NumSd']),
                                         dtype=numpy.float64)
                    nSd = NSdarr[Month - 1]
                    hr2_ax.plot(Hr2, Av2 + nSd * Sd2, 'r.', Hr2,
                                Av2 - nSd * Sd2, 'b.')
            plt.xlim(0, 24)
            plt.xticks([0, 6, 12, 18, 24])
            if n == 0:
                hr2_ax.set_xlabel('Hour', visible=True)
            else:
                hr2_ax.set_xlabel('', visible=False)
                plt.setp(hr2_ax.get_xticklabels(), visible=False)
            #if n > 0: plt.setp(hr2_ax.get_xticklabels(), visible=False)

            # vertical lines to show frequency distribution of flags
            bins = numpy.arange(0.5, 23.5)
            ind = bins[:len(bins) - 1] + 0.5
            index = numpy.where(numpy.mod(
                bflag, 10) == 0)  # find the elements with flag = 0, 10, 20 etc
            bflag[index] = 0  # set them all to 0
            hist, bin_edges = numpy.histogram(bflag, bins=bins)
            ymin = hist * 0
            delta = 0.01 * (numpy.max(hist) - numpy.min(hist))
            bar_ax = fig.add_axes([
                p['hr2_XAxOrg'], p['YAxOrg'], p['bar_XAxLen'], p['ts_YAxLen']
            ])
            bar_ax.set_ylim(0, numpy.max(hist))
            bar_ax.vlines(ind, ymin, hist)
            for i, j in zip(ind, hist):
                if j > 0.05 * numpy.max(hist):
                    bar_ax.text(i,
                                j + delta,
                                str(numpy.int32(i)),
                                ha='center',
                                size='small')
            if n == 0:
                bar_ax.set_xlabel('Flag', visible=True)
            else:
                bar_ax.set_xlabel('', visible=False)
                plt.setp(bar_ax.get_xticklabels(), visible=False)
            #if n > 0: plt.setp(bar_ax.get_xticklabels(), visible=False)
        else:
            log.error('  plttimeseries: series ' + ThisOne +
                      ' not in data structure')
    STList = []
    ETList = []
    if ei == -1:
        L1XArray = numpy.array(dsa.series['DateTime']['Data'][si:ei])
    else:
        L1XArray = numpy.array(dsa.series['DateTime']['Data'][si:ei + 1])
    for fmt in ['%Y', '_', '%m', '_', '%d', '_', '%H', '%M']:
        STList.append(L1XArray[0].strftime(fmt))
        if ei == -1:
            ETList.append(dsa.series['DateTime']['Data'][-1].strftime(fmt))
        else:
            ETList.append(L1XArray[-1].strftime(fmt))
    if qcutils.cfkeycheck(
            cf, Base='Output',
            ThisOne='PNGFile') and cf['Output']['PNGFile'] == 'True':
        log.info('  Generating a PNG file of the plot')
        PNGFileName = cf['Files']['PNG'][
            'PNGFilePath'] + 'Fig' + nFig + '_' + ''.join(
                STList) + '-' + ''.join(ETList) + '.png'
        plt.savefig(PNGFileName)
    fig.show()
Example #18
0
def rpLT_createdict(cf, ds, series):
    """
    Purpose:
     Creates a dictionary in ds to hold information about estimating ecosystem
     respiration using the Lloyd-Taylor method.
    Usage:
    Author: PRI
    Date October 2015
    """
    # get the section of the control file containing the series
    section = qcutils.get_cfsection(cf, series=series, mode="quiet")
    # return without doing anything if the series isn't in a control file section
    if len(section) == 0:
        logger.error("ERUsingLloydTaylor: Series " + series +
                     " not found in control file, skipping ...")
        return
    # check that none of the drivers have missing data
    driver_list = ast.literal_eval(
        cf[section][series]["ERUsingLloydTaylor"]["drivers"])
    target = cf[section][series]["ERUsingLloydTaylor"]["target"]
    for label in driver_list:
        data, flag, attr = qcutils.GetSeriesasMA(ds, label)
        if numpy.ma.count_masked(data) != 0:
            logger.error("ERUsingLloydTaylor: driver " + label +
                         " contains missing data, skipping target " + target)
            return
    # create the dictionary keys for this series
    rpLT_info = {}
    # site name
    rpLT_info["site_name"] = ds.globalattributes["site_name"]
    # source series for ER
    opt = qcutils.get_keyvaluefromcf(cf,
                                     [section, series, "ERUsingLloydTaylor"],
                                     "source",
                                     default="Fc")
    rpLT_info["source"] = opt
    # target series name
    rpLT_info["target"] = cf[section][series]["ERUsingLloydTaylor"]["target"]
    # list of drivers
    rpLT_info["drivers"] = ast.literal_eval(
        cf[section][series]["ERUsingLloydTaylor"]["drivers"])
    # name of SOLO output series in ds
    rpLT_info["output"] = cf[section][series]["ERUsingLloydTaylor"]["output"]
    # results of best fit for plotting later on
    rpLT_info["results"] = {
        "startdate": [],
        "enddate": [],
        "No. points": [],
        "r": [],
        "Bias": [],
        "RMSE": [],
        "Frac Bias": [],
        "NMSE": [],
        "Avg (obs)": [],
        "Avg (LT)": [],
        "Var (obs)": [],
        "Var (LT)": [],
        "Var ratio": [],
        "m_ols": [],
        "b_ols": []
    }
    # create the configuration dictionary
    rpLT_info["configs_dict"] = get_configs_dict(cf, ds)
    # create an empty series in ds if the output series doesn't exist yet
    if rpLT_info["output"] not in ds.series.keys():
        data, flag, attr = qcutils.MakeEmptySeries(ds, rpLT_info["output"])
        qcutils.CreateSeries(ds, rpLT_info["output"], data, flag, attr)
    # create the merge directory in the data structure
    if "merge" not in dir(ds): ds.merge = {}
    if "standard" not in ds.merge.keys(): ds.merge["standard"] = {}
    # create the dictionary keys for this series
    ds.merge["standard"][series] = {}
    # output series name
    ds.merge["standard"][series]["output"] = series
    # source
    ds.merge["standard"][series]["source"] = ast.literal_eval(
        cf[section][series]["MergeSeries"]["Source"])
    # create an empty series in ds if the output series doesn't exist yet
    if ds.merge["standard"][series]["output"] not in ds.series.keys():
        data, flag, attr = qcutils.MakeEmptySeries(
            ds, ds.merge["standard"][series]["output"])
        qcutils.CreateSeries(ds, ds.merge["standard"][series]["output"], data,
                             flag, attr)
    return rpLT_info
Example #19
0
def gfSOLO_createdict(cf, ds, series):
    """ Creates a dictionary in ds to hold information about the SOLO data used
        to gap fill the tower data."""
    # get the section of the control file containing the series
    section = qcutils.get_cfsection(cf, series=series, mode="quiet")
    # return without doing anything if the series isn't in a control file section
    if len(section) == 0:
        logger.error(
            "GapFillUsingSOLO: Series %s not found in control file, skipping ...",
            series)
        return
    # create the solo directory in the data structure
    if "solo" not in dir(ds): ds.solo = {}
    # name of SOLO output series in ds
    output_list = cf[section][series]["GapFillUsingSOLO"].keys()
    # loop over the outputs listed in the control file
    for output in output_list:
        # create the dictionary keys for this series
        ds.solo[output] = {}
        # get the target
        if "target" in cf[section][series]["GapFillUsingSOLO"][output]:
            ds.solo[output]["label_tower"] = cf[section][series][
                "GapFillUsingSOLO"][output]["target"]
        else:
            ds.solo[output]["label_tower"] = series
        # site name
        ds.solo[output]["site_name"] = ds.globalattributes["site_name"]
        # list of SOLO settings
        if "solo_settings" in cf[section][series]["GapFillUsingSOLO"][output]:
            ss_list = ast.literal_eval(cf[section][series]["GapFillUsingSOLO"]
                                       [output]["solo_settings"])
            ds.solo[output]["solo_settings"] = {}
            ds.solo[output]["solo_settings"]["nodes_target"] = int(ss_list[0])
            ds.solo[output]["solo_settings"]["training"] = int(ss_list[1])
            ds.solo[output]["solo_settings"]["factor"] = int(ss_list[2])
            ds.solo[output]["solo_settings"]["learningrate"] = float(
                ss_list[3])
            ds.solo[output]["solo_settings"]["iterations"] = int(ss_list[4])
        # list of drivers
        ds.solo[output]["drivers"] = ast.literal_eval(
            cf[section][series]["GapFillUsingSOLO"][output]["drivers"])
        # apply ustar filter
        opt = qcutils.get_keyvaluefromcf(
            cf, [section, series, "GapFillUsingSOLO", output],
            "turbulence_filter",
            default="")
        ds.solo[output]["turbulence_filter"] = opt
        opt = qcutils.get_keyvaluefromcf(
            cf, [section, series, "GapFillUsingSOLO", output],
            "daynight_filter",
            default="")
        ds.solo[output]["daynight_filter"] = opt
        # results of best fit for plotting later on
        ds.solo[output]["results"] = {
            "startdate": [],
            "enddate": [],
            "No. points": [],
            "r": [],
            "Bias": [],
            "RMSE": [],
            "Frac Bias": [],
            "NMSE": [],
            "Avg (obs)": [],
            "Avg (SOLO)": [],
            "Var (obs)": [],
            "Var (SOLO)": [],
            "Var ratio": [],
            "m_ols": [],
            "b_ols": []
        }
        # create an empty series in ds if the SOLO output series doesn't exist yet
        if output not in ds.series.keys():
            data, flag, attr = qcutils.MakeEmptySeries(ds, output)
            qcutils.CreateSeries(ds, output, data, flag, attr)
Example #20
0
def plottimeseries(cf,nFig,dsa,dsb,si,ei):
    SiteName = dsa.globalattributes['site_name']
    Level = dsb.globalattributes['nc_level']
    dt = int(dsa.globalattributes['time_step'])
    Month = dsa.series['Month']['Data'][0]
    p = plot_setup(cf,nFig)
    log.info(' Plotting series: '+str(p['SeriesList']))
    L1XArray = numpy.array(dsa.series['DateTime']['Data'][si:ei])
    L2XArray = numpy.array(dsb.series['DateTime']['Data'][si:ei])
    p['XAxMin'] = min(L2XArray)
    p['XAxMax'] = max(L2XArray)
    p['loc'],p['fmt'] = get_ticks(p['XAxMin'],p['XAxMax'])
    plt.ioff()
    fig = plt.figure(int(nFig),figsize=(p['PlotWidth'],p['PlotHeight']))
    fig.clf()
    plt.figtext(0.5,0.95,SiteName+': '+p['PlotDescription'],ha='center',size=16)
    for ThisOne, n in zip(p['SeriesList'],range(p['nGraphs'])):
        if ThisOne in dsa.series.keys():
            aflag = dsa.series[ThisOne]['Flag']
            p['Units'] = dsa.series[ThisOne]['Attr']['units']
            p['YAxOrg'] = p['ts_YAxOrg'] + n*p['yaxOrgOffset']
            L1YArray,p['nRecs'],p['nNotM'],p['nMskd'] = get_yarray(dsa,ThisOne,si=si,ei=ei)
            # check the control file to see if the Y axis minima have been specified
            nSer = p['SeriesList'].index(ThisOne)
            p['LYAxMax'],p['LYAxMin'] = get_yaxislimitsfromcf(cf,nFig,'YLMax','YLMin',nSer,L1YArray)
            plot_onetimeseries_left(fig,n,ThisOne,L1XArray,L1YArray,p)
        if ThisOne in dsb.series.keys():
            bflag = dsb.series[ThisOne]['Flag']
            p['Units'] = dsb.series[ThisOne]['Attr']['units']
            p['YAxOrg'] = p['ts_YAxOrg'] + n*p['yaxOrgOffset']
            #Plot the Level 2 data series on the same X axis but with the scale on the right Y axis.
            L2YArray,p['nRecs'],p['nNotM'],p['nMskd'] = get_yarray(dsb,ThisOne,si=si,ei=ei)
            # check the control file to see if the Y axis minima have been specified
            nSer = p['SeriesList'].index(ThisOne)
            p['RYAxMax'],p['RYAxMin'] = get_yaxislimitsfromcf(cf,nFig,'YRMax','YRMin',nSer,L2YArray)
            plot_onetimeseries_right(fig,n,ThisOne,L2XArray,L2YArray,p)

            #Plot the diurnal averages.
            Hr2,Av2,Sd2,Mx2,Mn2=get_diurnalstats(dsb.series['Hdh']['Data'][si:ei],
                                                dsb.series[ThisOne]['Data'][si:ei],dt)
            Av2 = numpy.ma.masked_where(Av2==-9999,Av2)
            Sd2 = numpy.ma.masked_where(Sd2==-9999,Sd2)
            Mx2 = numpy.ma.masked_where(Mx2==-9999,Mx2)
            Mn2 = numpy.ma.masked_where(Mn2==-9999,Mn2)
            hr2_ax = fig.add_axes([p['hr1_XAxOrg'],p['YAxOrg'],p['hr2_XAxLen'],p['ts_YAxLen']])
            hr2_ax.hold(True)
            hr2_ax.plot(Hr2,Av2,'y-',Hr2,Mx2,'r-',Hr2,Mn2,'b-')
            section = qcutils.get_cfsection(cf,series=ThisOne,mode='quiet')
            if len(section)!=0:
                if 'DiurnalCheck' in cf[section][ThisOne].keys():
                    NSdarr = numpy.array(eval(cf[section][ThisOne]['DiurnalCheck']['NumSd']),dtype=float)
                    nSd = NSdarr[Month-1]
                    hr2_ax.plot(Hr2,Av2+nSd*Sd2,'r.',Hr2,Av2-nSd*Sd2,'b.')
            plt.xlim(0,24)
            plt.xticks([0,6,12,18,24])
            if n==0:
                hr2_ax.set_xlabel('Hour',visible=True)
            else:
                hr2_ax.set_xlabel('',visible=False)
                plt.setp(hr2_ax.get_xticklabels(), visible=False)
            #if n > 0: plt.setp(hr2_ax.get_xticklabels(), visible=False)

            # vertical lines to show frequency distribution of flags
            bins = numpy.arange(0.5,23.5)
            ind = bins[:len(bins)-1]+0.5
            index = numpy.where(numpy.mod(bflag,10)==0)    # find the elements with flag = 0, 10, 20 etc
            bflag[index] = 0                               # set them all to 0
            hist, bin_edges = numpy.histogram(bflag, bins=bins)
            ymin = hist*0
            delta = 0.01*(numpy.max(hist)-numpy.min(hist))
            bar_ax = fig.add_axes([p['hr2_XAxOrg'],p['YAxOrg'],p['bar_XAxLen'],p['ts_YAxLen']])
            bar_ax.set_ylim(0,numpy.max(hist))
            bar_ax.vlines(ind,ymin,hist)
            for i,j in zip(ind,hist):
                if j>0.05*numpy.max(hist): bar_ax.text(i,j+delta,str(int(i)),ha='center',size='small')
            if n==0:
                bar_ax.set_xlabel('Flag',visible=True)
            else:
                bar_ax.set_xlabel('',visible=False)
                plt.setp(bar_ax.get_xticklabels(), visible=False)
            #if n > 0: plt.setp(bar_ax.get_xticklabels(), visible=False)
        else:
            log.error('  plttimeseries: series '+ThisOne+' not in data structure')
    fig.show()
    fname = 'plots/'+SiteName.replace(' ','')+'_'+Level+'_'+p['PlotDescription'].replace(' ','')+'.png'
    fig.savefig(fname,format='png')
Example #21
0
def do_dependencycheck(cf, ds, section, series, code=23, mode="quiet"):
    """
    Purpose:
    Usage:
    Author: PRI
    Date: Back in the day
    """
    if len(section) == 0 and len(series) == 0: return
    if len(section) == 0:
        section = qcutils.get_cfsection(cf, series=series, mode='quiet')
    if "DependencyCheck" not in cf[section][series].keys(): return
    if "Source" not in cf[section][series]["DependencyCheck"]:
        msg = " DependencyCheck: keyword Source not found for series " + series + ", skipping ..."
        logger.error(msg)
        return
    if mode == "verbose":
        msg = " Doing DependencyCheck for " + series
        logger.info(msg)
    # get the precursor source list from the control file
    source_list = ast.literal_eval(
        cf[section][series]["DependencyCheck"]["Source"])
    # check to see if the "ignore_missing" flag is set
    opt = qcutils.get_keyvaluefromcf(cf, [section, series, "DependencyCheck"],
                                     "ignore_missing",
                                     default="no")
    ignore_missing = False
    if opt.lower() in ["yes", "y", "true", "t"]:
        ignore_missing = True
    # get the data
    dependent_data, dependent_flag, dependent_attr = qcutils.GetSeries(
        ds, series)
    # loop over the precursor source list
    for item in source_list:
        # check the precursor is in the data structure
        if item not in ds.series.keys():
            msg = " DependencyCheck: " + series + " precursor series " + item + " not found, skipping ..."
            logger.warning(msg)
            continue
        # get the precursor data
        precursor_data, precursor_flag, precursor_attr = qcutils.GetSeries(
            ds, item)
        # check if the user wants to ignore missing precursor data
        if ignore_missing:
            # they do, so make an array of missing values
            nRecs = int(ds.globalattributes["nc_nrecs"])
            missing_array = numpy.ones(nRecs) * float(c.missing_value)
            # and find the indicies of elements equal to the missing value
            bool_array = numpy.isclose(precursor_data, missing_array)
            idx = numpy.where(bool_array == True)[0]
            # and set these flags to 0 so missing data is ignored
            precursor_flag[idx] = numpy.int32(0)
        # mask the dependent data where the precursor flag shows data not OK
        dependent_data = numpy.ma.masked_where(
            numpy.mod(precursor_flag, 10) != 0, dependent_data)
        # get an index where the precursor flag shows data not OK
        idx = numpy.ma.where(numpy.mod(precursor_flag, 10) != 0)[0]
        # set the dependent QC flag
        dependent_flag[idx] = numpy.int32(code)
    # put the data back into the data structure
    dependent_attr["DependencyCheck_source"] = str(source_list)
    qcutils.CreateSeries(ds, series, dependent_data, dependent_flag,
                         dependent_attr)
    # our work here is done
    return