Exemple #1
0
def opendss(filename, server='local'):
    """
    opendss( filename, server='local') :
    opens a dss file with the given filename and returns it in a group object
    The group object behaves like an array of data references. Each such data
    reference contains the data set.
    """
    return DSSUtil.createGroup(server,filename)
Exemple #2
0
def make_flag_value(flag_val):
    flag_vals = string.split(flag_val,"|")
    if len(flag_vals) !=2 : raise "Invalid flag: %s"%flag_val
    flag_type = FlagUtils.getQualityFlagId(flag_vals[0])
    user_id = DSSUtil.getUserId(string.lower(flag_vals[1]))
    _dummy_dse.setFlag(0)
    if flag_type == 0:
        FlagUtils.clearAllFlags(_dummy_dse,user_id)
    else:
        FlagUtils.setQualityFlag(_dummy_dse,flag_type,user_id)
    return _dummy_dse.getFlag()
Exemple #3
0
def writedss(filename, pathname, ds) :
    """
    writedss(filename,pathname,ds):
    writes the given data set ds to the filename and the pathname.
    """
    DSSUtil.writeData(filename,pathname,ds)
Exemple #4
0
import re
import string
from vista.set import Constants, DefaultReference,\
     DataReference, DataSetAttr, DataType, \
     FlagUtils, FlaggedDataSetElement, \
     Pathname, \
     RegularTimeSeries, IrregularTimeSeries, \
     SetUtils, PathnamePredicate, \
     PartNamePredicate, PathPartPredicate, \
     SortMechanism
from vista.db.dss import DSSUtil
from vista.time import TimeFactory
from vista.app import MainProperties
DSSUtil.setAccessProperties(MainProperties.getProperties())

def wrap_data(ds, filename='', server='',pathname=''):
    """
    wrap_data(dataset, filename='', server='', pathname=''
    wraps data set in a filename, servername and pathname
    """
    return gen_ref(ds,filename,server,pathname)
#
def gen_ref(ds, filename='', server='', pathname=''):
    "generates a DataReference given a dataset"
    if isinstance(ds, DataReference): return ds
    if (filename == '' and server == '' and pathname == ''):
        return DefaultReference(ds)
    else :
        return DefaultReference(server, filename, pathname, ds)

#
Exemple #5
0
def flag_data(ftype, dataset, valArray, log = 'flag.log', \
              Special = False, ResetExist = False):
    """
    flag_data(ftype, dataset, valArray, log = 'flag.log', Special = False):
    Flags a datastream's values for bad data:
    ftype='R': datavalue not within valArray[0]:valArray[1] marked as reject.
         ='D': datavalue difference from previous value not within
               valArray[0] range marked as reject. If len(valArray) > 1, 
               valArray[0] is an exceedance factor for values, and
               valArray[1] is an exceedance factor for 1st diff of values,
               both compared to Trailing Moving Average
         ='M': datavalue equals or very close to val[0:], an array of Missing Value markers;
               DSS flag set to missing. Optional Special means marker value
               is within normal data range, check more carefully.
         ='+|*': datavalue within range valArray[0]:valArray[1] scaled by valArray[2]
               amount, that is, datavalue is added/multiplied to/by valArray[2]. Used mainly
               for CDEC EC data that switches between milli- and micro-mhos/cm and
               to add 100 to USGS stage data.
    All values marked are written to log file.
    Special=True: Special treatment for Missing values that are within the
    normal operating range of the parameter
    ResetExist=True: reset any existing flag values to Unscreened
    Flags added to timeseries if needed.
    """
    from jarray import zeros
    def nearVal (val, target, tol=.001):
        # return True if relative error is less than tol
        if abs(val-target) < tol:
            return True
        else:
            return False
    if ftype == 'R':
        if len(valArray) != 2: 
            # assume hi and lo range will be calculated from percentiles
            yVals = sorted(SetUtils.createYArray(dataset))
            # lo and hi: 1st and 99th percentiles
            valArray = [yVals[int(len(yVals)*0.01)], yVals[int(len(yVals)*0.99)]]
        rej_head = 'Check range ' + str(valArray[0]) + ' - ' + str(valArray[1])
        rej_note = 'Range reject @ '
    elif ftype == 'D':
        nMoveAve = 5   # number of good elements in moving average
        moveAve = 0.0
        moveAveDiff = 0.0
        xTA = zeros(nMoveAve,'d')        
        diffFactor = False
        rej_head = 'Check diff w/ moving average ' + str(valArray[0])
        rej_note = 'Diff reject @ '
        if len(valArray) > 1: # factor
            diffFactor = True
            rej_head = 'Check vals, 1st diffs w/ MA ' + str(valArray[0])+ ', ' + str(valArray[1]) + ' factors'
#            yVals = sorted(SetUtils.createYArray(dataset))
#            # difference between 10th and 90th percentiles
#            pdiff = abs(yVals[int(len(yVals)*0.10)] - yVals[int(len(yVals)*0.90)])
            rej_note = 'Diff reject % @ '
    elif ftype == 'M':
        if len(valArray) < 1: raise 'At least one value must be given for Missing check.'
        rej_head = 'Check Missing value marker ' + str(valArray)
        rej_note = 'Missing @ '
    elif ftype == '+' or ftype == '*':
        if len(valArray) != 3: raise 'Three values must be given for Shift.'
        rej_head = 'Check scale ' + str(valArray[0]) + ' - ' + str(valArray[1])
        rej_note = 'Value scaled @ '
    else: raise 'First arg must be a single character R, D or M.'
    # nominal time interval in minutes to detect gaps in data
    intvls = {'IR-DAY': 15, 'IR-MON': 60, 'IR-YEAR': 1440, \
              '2MIN': 2, '10MIN': 10, '15MIN': 15, '1HOUR': 60, \
              '1DAY': 1440, '1MON': 43920}
    ePart = dataset.getName().split('/')[5]
    nomIntvl = intvls[ePart]
    # a flag to check if any flag was changed
    changedFlag = False
    # get the filter for missing values
    filter = Constants.DEFAULT_FLAG_FILTER
    # check if ds already has flags, if not, make them
    # open log file
    logfile = open(log, 'a')
    logfile.write('\n\n' + 'Name: ' + dataset.getName())
    logfile.write('\n' + 'Units: ' + dataset.getAttributes().getYUnits())
    logfile.write('\n' + rej_head)
    if dataset.isFlagged(): ds = dataset
    else: ds = ds_add_flags(dataset)
    # get user id for setting flags
    uId = DSSUtil.getUserId('datachecker')
    # create a missing data element
    ex = dataset.getElementAt(0)
    ex.setY(Constants.MISSING_VALUE)
    ex.setFlag(FlagUtils.MISSING_FLAG)
    eBad = None
    intvl = None
    nGood = 0
    for i in range(dataset.size()):
        changedEl = False
        e1 = dataset.getElementAt(i)
        if ResetExist:
            FlagUtils.clearAllFlags(e1, uId)
            dataset.putElementAt(i, e1) # put the element back into the data set
        if not filter.isAcceptable(e1) or \
               FlagUtils.getQualityFlag(e1) == FlagUtils.MISSING_FLAG: continue
        # get the data elements at the i, i-1, and i+1 positions
        if i > 0:
            e0 = dataset.getElementAt(i - 1)
            intvl = int(e1.getX() - e0.getX() + .01)
        else: 
            e0 = ex
        if i < len(dataset) - 1: 
            e2 = dataset.getElementAt(i + 1)
        else: 
            e2 = ex
        if ftype == 'R':    # Range
            if e1.y < valArray[0] or e1.y > valArray[1] :
                FlagUtils.setQualityFlag(e1, FlagUtils.REJECT_FLAG, uId)
                changedEl = True
        elif ftype == 'D':  # Difference (abs or %) between this and moving-ave value
# if large data time gap noted, flag element as questionable and reset moving average
            if intvl > nomIntvl*6:
                FlagUtils.setQualityFlag(e1, FlagUtils.QUESTIONABLE_FLAG, uId)
                changedEl = True
                moveAve = 0
                xTA = zeros(nMoveAve,'d')
                nGood = 0        
            if not diffFactor: 
                diff1 = abs(e1.y - moveAve)
            else:
                if moveAve <> 0: 
                    diff1 = abs(e1.y / moveAve)
                else:
                    diff1 = 1
                if moveAveDiff <> 0:
                    diff2 = abs((e1.y - e0.y) / moveAveDiff)
                else:
                    diff2 = 0
                if diff2 == 0:
                    diff2 = .1
                if diff2 < 1:
                    diff2 = 1 / diff2
            # check for spikes and longer plateaus near a bad value
            if diff1 == 0:
                diff1 = .1
            if diff1 < 1:
                diff1 = 1 / diff1
            if (nGood >= nMoveAve) and \
                ( (diff1 > valArray[0] and (not diffFactor or (diffFactor and diff2 > valArray[1]))) or \
                (eBad and nearVal(e1.y, eBad.y, abs(eBad.y*.001)))):
                FlagUtils.setQualityFlag(e1, FlagUtils.REJECT_FLAG, uId)
                changedEl = True
                if not eBad:
                    eBad = e1
            else:
                eBad = None
            if filter.isAcceptable(e1) and \
                FlagUtils.getQualityFlag(e1) != FlagUtils.MISSING_FLAG: 
                nGood += 1
                xTA.pop(0)
                xTA.append(e1.y)
            # moving average of data values
            moveAve = sum(xTA) / nMoveAve
            if diffFactor: 
                # moving average of 1st difference of data values
                xTA1 = xTA[1:nMoveAve]
                xTA0 = xTA[0:nMoveAve-1]
                moveAveDiff = sum([abs(xTA1[j]-xTA0[j]) for j in range(nMoveAve-1)]) / (nMoveAve -1)
        elif ftype == 'M':  # Missing values
            for vA in valArray:
                if nearVal(vA, e1.y):
                    if not Special or (Special and \
                    # Special treatment for Missing values that are within the
                    # normal operating range of the parameter; check that the value
                    # before or after is also Missing or not acceptable before
                    # marking this value as Missing
                     (not filter.isAcceptable(e0) or not filter.isAcceptable(e2)) or \
                     (nearVal(vA, e0.y) or nearVal(vA, e2.y))):
                        FlagUtils.setQualityFlag(e1, FlagUtils.MISSING_FLAG, uId)
                        #e1.y=Constants.MISSING_VALUE
                        changedEl = True
        elif ftype == '+':  # re-scale
            if not filter.isAcceptable(e1): continue
            if e1.y >= valArray[0] and e1.y <= valArray[1] :
                e1.y += valArray[2]
                changedEl = True
        elif ftype == '*':  # re-scale
            if not filter.isAcceptable(e1): continue
            if e1.y >= valArray[0] and e1.y <= valArray[1] :
                e1.y *= valArray[2]
                changedEl = True
        if changedEl:
            changedFlag = True
            dataset.putElementAt(i, e1) # put the element back into the data set
            logfile.write('\n' + rej_note + e1.getXString() + " : " + e1.getYString())
    # end the for loop
    logfile.close()
    if changedFlag or ResetExist:
        return ds
    else:
        return None
Exemple #6
0
	    System.loadLibrary("errno")
	    System.loadLibrary("posix")
except:
    pass
# check for display
from java.awt import Toolkit
display = 1
try :
    tk = Toolkit.getDefaultToolkit()
except:
    print 'Problem with display'
    display = 0
#
from vista.db.dss import DSSUtil
from vista.app import MainProperties
DSSUtil.setAccessProperties(MainProperties.getProperties())
#
from vdss import *
from vtimeseries import *
from vdisplay import *
#
def exit():
    sys.exit()
#
def flag_data(ftype, dataset, valArray, log = 'flag.log', \
              Special = False, ResetExist = False):
    """
    flag_data(ftype, dataset, valArray, log = 'flag.log', Special = False):
    Flags a datastream's values for bad data:
    ftype='R': datavalue not within valArray[0]:valArray[1] marked as reject.
         ='D': datavalue difference from previous value not within
Exemple #7
0
def flag_data(ftype, dataset, valArray, log = 'flag.log', \
              Special = False, ResetExist = False):
    """
    flag_data(ftype, dataset, valArray, log = 'flag.log', Special = False):
    Flags a datastream's values for bad data:
    ftype='R': datavalue not within valArray[0]:valArray[1] marked as reject.
         ='D': datavalue difference from previous value not within
               valArray[0] range marked as reject. If len(valArray) > 1, 
               valArray[0] is an exceedance factor for values, and
               valArray[1] is an exceedance factor for 1st diff of values,
               both compared to Trailing Moving Average
         ='M': datavalue equals or very close to val[0:], an array of Missing Value markers;
               DSS flag set to missing. Optional Special means marker value
               is within normal data range, check more carefully.
         ='+|*': datavalue within range valArray[0]:valArray[1] scaled by valArray[2]
               amount, that is, datavalue is added/multiplied to/by valArray[2]. Used mainly
               for CDEC EC data that switches between milli- and micro-mhos/cm and
               to add 100 to USGS stage data.
    All values marked are written to log file.
    Special=True: Special treatment for Missing values that are within the
    normal operating range of the parameter
    ResetExist=True: reset any existing flag values to Unscreened
    Flags added to timeseries if needed.
    """
    from jarray import zeros

    def nearVal(val, target, tol=.001):
        # return True if relative error is less than tol
        if abs(val - target) < tol:
            return True
        else:
            return False

    if ftype == 'R':
        if len(valArray) != 2:
            # assume hi and lo range will be calculated from percentiles
            yVals = sorted(SetUtils.createYArray(dataset))
            # lo and hi: 1st and 99th percentiles
            valArray = [
                yVals[int(len(yVals) * 0.01)], yVals[int(len(yVals) * 0.99)]
            ]
        rej_head = 'Check range ' + str(valArray[0]) + ' - ' + str(valArray[1])
        rej_note = 'Range reject @ '
    elif ftype == 'D':
        nMoveAve = 5  # number of good elements in moving average
        moveAve = 0.0
        moveAveDiff = 0.0
        xTA = zeros(nMoveAve, 'd')
        diffFactor = False
        rej_head = 'Check diff w/ moving average ' + str(valArray[0])
        rej_note = 'Diff reject @ '
        if len(valArray) > 1:  # factor
            diffFactor = True
            rej_head = 'Check vals, 1st diffs w/ MA ' + str(
                valArray[0]) + ', ' + str(valArray[1]) + ' factors'
            #            yVals = sorted(SetUtils.createYArray(dataset))
            #            # difference between 10th and 90th percentiles
            #            pdiff = abs(yVals[int(len(yVals)*0.10)] - yVals[int(len(yVals)*0.90)])
            rej_note = 'Diff reject % @ '
    elif ftype == 'M':
        if len(valArray) < 1:
            raise 'At least one value must be given for Missing check.'
        rej_head = 'Check Missing value marker ' + str(valArray)
        rej_note = 'Missing @ '
    elif ftype == '+' or ftype == '*':
        if len(valArray) != 3: raise 'Three values must be given for Shift.'
        rej_head = 'Check scale ' + str(valArray[0]) + ' - ' + str(valArray[1])
        rej_note = 'Value scaled @ '
    else:
        raise 'First arg must be a single character R, D or M.'
    # nominal time interval in minutes to detect gaps in data
    intvls = {'IR-DAY': 15, 'IR-MON': 60, 'IR-YEAR': 1440, \
              '2MIN': 2, '10MIN': 10, '15MIN': 15, '1HOUR': 60, \
              '1DAY': 1440, '1MON': 43920}
    ePart = dataset.getName().split('/')[5]
    nomIntvl = intvls[ePart]
    # a flag to check if any flag was changed
    changedFlag = False
    # get the filter for missing values
    filter = Constants.DEFAULT_FLAG_FILTER
    # check if ds already has flags, if not, make them
    # open log file
    logfile = open(log, 'a')
    logfile.write('\n\n' + 'Name: ' + dataset.getName())
    logfile.write('\n' + 'Units: ' + dataset.getAttributes().getYUnits())
    logfile.write('\n' + rej_head)
    if dataset.isFlagged(): ds = dataset
    else: ds = ds_add_flags(dataset)
    # get user id for setting flags
    uId = DSSUtil.getUserId('datachecker')
    # create a missing data element
    ex = dataset.getElementAt(0)
    ex.setY(Constants.MISSING_VALUE)
    ex.setFlag(FlagUtils.MISSING_FLAG)
    eBad = None
    intvl = None
    nGood = 0
    for i in range(dataset.size()):
        changedEl = False
        e1 = dataset.getElementAt(i)
        if ResetExist:
            FlagUtils.clearAllFlags(e1, uId)
            dataset.putElementAt(i,
                                 e1)  # put the element back into the data set
        if not filter.isAcceptable(e1) or \
               FlagUtils.getQualityFlag(e1) == FlagUtils.MISSING_FLAG:
            continue
        # get the data elements at the i, i-1, and i+1 positions
        if i > 0:
            e0 = dataset.getElementAt(i - 1)
            intvl = int(e1.getX() - e0.getX() + .01)
        else:
            e0 = ex
        if i < len(dataset) - 1:
            e2 = dataset.getElementAt(i + 1)
        else:
            e2 = ex
        if ftype == 'R':  # Range
            if e1.y < valArray[0] or e1.y > valArray[1]:
                FlagUtils.setQualityFlag(e1, FlagUtils.REJECT_FLAG, uId)
                changedEl = True
        elif ftype == 'D':  # Difference (abs or %) between this and moving-ave value
            # if large data time gap noted, flag element as questionable and reset moving average
            if intvl > nomIntvl * 6:
                FlagUtils.setQualityFlag(e1, FlagUtils.QUESTIONABLE_FLAG, uId)
                changedEl = True
                moveAve = 0
                xTA = zeros(nMoveAve, 'd')
                nGood = 0
            if not diffFactor:
                diff1 = abs(e1.y - moveAve)
            else:
                if moveAve <> 0:
                    diff1 = abs(e1.y / moveAve)
                else:
                    diff1 = 1
                if moveAveDiff <> 0:
                    diff2 = abs((e1.y - e0.y) / moveAveDiff)
                else:
                    diff2 = 0
                if diff2 == 0:
                    diff2 = .1
                if diff2 < 1:
                    diff2 = 1 / diff2
            # check for spikes and longer plateaus near a bad value
            if diff1 == 0:
                diff1 = .1
            if diff1 < 1:
                diff1 = 1 / diff1
            if (nGood >= nMoveAve) and \
                ( (diff1 > valArray[0] and (not diffFactor or (diffFactor and diff2 > valArray[1]))) or \
                (eBad and nearVal(e1.y, eBad.y, abs(eBad.y*.001)))):
                FlagUtils.setQualityFlag(e1, FlagUtils.REJECT_FLAG, uId)
                changedEl = True
                if not eBad:
                    eBad = e1
            else:
                eBad = None
            if filter.isAcceptable(e1) and \
                FlagUtils.getQualityFlag(e1) != FlagUtils.MISSING_FLAG:
                nGood += 1
                xTA.pop(0)
                xTA.append(e1.y)
            # moving average of data values
            moveAve = sum(xTA) / nMoveAve
            if diffFactor:
                # moving average of 1st difference of data values
                xTA1 = xTA[1:nMoveAve]
                xTA0 = xTA[0:nMoveAve - 1]
                moveAveDiff = sum(
                    [abs(xTA1[j] - xTA0[j])
                     for j in range(nMoveAve - 1)]) / (nMoveAve - 1)
        elif ftype == 'M':  # Missing values
            for vA in valArray:
                if nearVal(vA, e1.y):
                    if not Special or (Special and \
                    # Special treatment for Missing values that are within the
                    # normal operating range of the parameter; check that the value
                    # before or after is also Missing or not acceptable before
                    # marking this value as Missing




                     (not filter.isAcceptable(e0) or not filter.isAcceptable(e2)) or \
                     (nearVal(vA, e0.y) or nearVal(vA, e2.y))):
                        FlagUtils.setQualityFlag(e1, FlagUtils.MISSING_FLAG,
                                                 uId)
                        #e1.y=Constants.MISSING_VALUE
                        changedEl = True
        elif ftype == '+':  # re-scale
            if not filter.isAcceptable(e1): continue
            if e1.y >= valArray[0] and e1.y <= valArray[1]:
                e1.y += valArray[2]
                changedEl = True
        elif ftype == '*':  # re-scale
            if not filter.isAcceptable(e1): continue
            if e1.y >= valArray[0] and e1.y <= valArray[1]:
                e1.y *= valArray[2]
                changedEl = True
        if changedEl:
            changedFlag = True
            dataset.putElementAt(i,
                                 e1)  # put the element back into the data set
            logfile.write('\n' + rej_note + e1.getXString() + " : " +
                          e1.getYString())
    # end the for loop
    logfile.close()
    if changedFlag or ResetExist:
        return ds
    else:
        return None
Exemple #8
0
except:
    pass
# check for display
from java.awt import Toolkit

display = 1
try:
    tk = Toolkit.getDefaultToolkit()
except:
    print 'Problem with display'
    display = 0
#
from vista.db.dss import DSSUtil
from vista.app import MainProperties

DSSUtil.setAccessProperties(MainProperties.getProperties())
#
from vdss import *
from vtimeseries import *
from vdisplay import *


#
def exit():
    sys.exit()


#
def flag_data(ftype, dataset, valArray, log = 'flag.log', \
              Special = False, ResetExist = False):
    """