Example #1
0
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing if measurements fall into a
        user-defined valid range. Returns 1 for presumably good data and 0 for
        data presumed bad.

    Implemented by:

        2010-07-28: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.
        2013-05-30: Christopher Mueller. Performance improvements by adding
            strict_validation flag.

    Usage:

        qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)

            where

        qcflag = Boolean, 0 if value is outside range, else = 1.

        dat = Input dataset, any scalar or vector. Must be numeric and real.
        datlim = Two-element vector with the minimum and maximum values
            considered to be valid.
        strict_validation = Flag (default is False) to assert testing of input
            types (e.g. isreal, isnumeric)

    References:

        OOI (2012). Data Product Specification for Global Range Test. Document
            Control Number 1341-10004. https://alfresco.oceanobservatories.org
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
    """
    dat = np.atleast_1d(dat)
    datlim = np.atleast_1d(datlim)

    if strict_validation:
        if not utils.isnumeric(dat).all():
            raise ValueError('\'dat\' must be numeric')

        if not utils.isreal(dat).all():
            raise ValueError('\'dat\' must be real')

        if not utils.isnumeric(datlim).all():
            raise ValueError('\'datlim\' must be numeric')

        if not utils.isreal(datlim).all():
            raise ValueError('\'datlim\' must be real')

        if len(datlim) < 2:  # Must have at least 2 elements
            raise ValueError('\'datlim\' must have at least 2 elements')

    return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing if measurements fall into a
        user-defined valid range. Returns 1 for presumably good data and 0 for
        data presumed bad.

    Implemented by:

        2010-07-28: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.
        2013-05-30: Christopher Mueller. Performance improvements by adding
            strict_validation flag.

    Usage:

        qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)

            where

        qcflag = Boolean, 0 if value is outside range, else = 1.

        dat = Input dataset, any scalar or vector. Must be numeric and real.
        datlim = Two-element vector with the minimum and maximum values
            considered to be valid.
        strict_validation = Flag (default is False) to assert testing of input
            types (e.g. isreal, isnumeric)

    References:

        OOI (2012). Data Product Specification for Global Range Test. Document
            Control Number 1341-10004. https://alfresco.oceanobservatories.org
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
    """
    dat = np.atleast_1d(dat)
    datlim = np.atleast_1d(datlim)

    if strict_validation:
        if not utils.isnumeric(dat).all():
            raise ValueError('\'dat\' must be numeric')

        if not utils.isreal(dat).all():
            raise ValueError('\'dat\' must be real')

        if not utils.isnumeric(datlim).all():
            raise ValueError('\'datlim\' must be numeric')

        if not utils.isreal(datlim).all():
            raise ValueError('\'datlim\' must be real')

        if len(datlim) < 2:  # Must have at least 2 elements
            raise ValueError('\'datlim\' must have at least 2 elements')

    return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing a time series for "stuck
        values", i.e. repeated occurences of one value. Returns 1 for
        presumably good data and 0 for data presumed bad.

    Implemented by:

        2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.

    Usage:

        qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);

            where

        qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
        x = Input time series (vector, numeric).
        reso = Resolution; repeat values less than reso apart will be
            considered "stuck values".
        num = Minimum number of successive values within reso of each other
            that will trigger the "stuck value". num is optional and defaults
            to 10 if omitted or empty.

    References:

        OOI (2012). Data Product Specification for Stuck Value Test. Document
            Control Number 1341-10008. https://alfresco.oceanobservatories.org/
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
    """
    dat = np.atleast_1d(x)

    if strict_validation:
        if not utils.isnumeric(dat).all():
            raise ValueError('\'x\' must be numeric')

        if not utils.isvector(dat):
            raise ValueError('\'x\' must be a vector')

        if not utils.isreal(dat).all():
            raise ValueError('\'x\' must be real')

        for k, arg in {'reso': reso, 'num': num}.iteritems():
            if not utils.isnumeric(arg).all():
                raise ValueError('\'{0}\' must be numeric'.format(k))

            if not utils.isscalar(arg):
                raise ValueError('\'{0}\' must be a scalar'.format(k))

            if not utils.isreal(arg).all():
                raise ValueError('\'{0}\' must be real'.format(k))

    num = np.abs(num)
    dat = np.asanyarray(dat, dtype=np.float)
    ll = len(x)
    if ll < num:
        # Warn - 'num' is greater than len(x), returning zeros
        out = np.zeros(dat.size, dtype='int8')
    else:
        out = stuckvalues(dat, reso, num)

    return out
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing if measurements contain a
        significant portion of a polynomial. Returns 1 if this is not the case,
        else 0.

        The purpose of this test is to check if a significant fraction of the
        variability in a time series can be explained by a drift, possibly
        interpreted as a sensor drift. This drift is assumed to be a polynomial
        of order ORD. Use ORD=1 to consider a linear drift

        The time series dat is passed to MatLab's POLYFIT routine to obtain a
        polynomial fit PP to dat, and the difference dat-PP is compared to the
        original dat. If the standard deviation of (dat-PP) is less than that
        of dat by a factor of NSTD, the time series is assumed to contain a
        significant trend (output will be 0), else not (output will be 1).

    Implemented by:

        2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.
        2013-05-30: Christopher Mueller. Performance optimizations.

    Usage:

        qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)

            where

        qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
        dat = Input dataset, a numeric real vector.
        t = time record associated with dat
        ord_n (optional, defaults to 1) = Polynomial order.
        nstd (optional, defaults to 3) = Factor by how much the standard
            deviation must be reduced before qcflag switches from 1 to 0
        strict_validation (optional, defaults to False) = Flag asserting
            testing of inputs.

    References:

        OOI (2012). Data Product Specification for Trend Test. Document
            Control Number 1341-10007. https://alfresco.oceanobservatories.org/
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
    """
    dat = np.atleast_1d(dat)
    t = np.atleast_1d(t)

    if strict_validation:
        for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
            if not utils.isnumeric(arg).all():
                raise ValueError('\'{0}\' must be numeric'.format(k))

            if not utils.isreal(arg).all():
                raise ValueError('\'{0}\' must be real'.format(k))

        for k, arg in {'dat': dat, 't': t}.iteritems():
            if not utils.isvector(arg):
                raise ValueError('\'{0}\' must be a vector'.format(k))

        for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
            if not utils.isscalar(arg):
                raise ValueError('\'{0}\' must be a scalar'.format(k))

    ord_n = int(round(abs(ord_n)))
    nstd = int(abs(nstd))

    ll = len(dat)
    # Not needed because time is incorporated as 't'
    # t = range(ll)

    pp = np.polyfit(t, dat, ord_n)
    datpp = np.polyval(pp, t)

    # test for a trend
    if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
        trndtst = 0
    else:
        trndtst = 1

    # insure output size equals input, even though test yields a single value.
    qcflag = np.ones(dat.shape).astype('int8') * trndtst
    return qcflag
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing a time series for spikes.
        Returns 1 for presumably good data and 0 for data presumed bad.

        The time series is divided into windows of len L (an odd integer
        number). Then, window by window, each value is compared to its (L-1)
        neighboring values: a range R of these (L-1) values is computed (max.
        minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
        value is presumed to be good, i.e. no spike, if it deviates from the
        mean of the (L-1) peers by less than a multiple of the range,
        N*max(R,ACC).

        Further than (L-1)/2 values from the start or end points, the peer
        values are symmetrically before and after the test value. Within that
        range of the start and end, the peers are the first/last L values
        (without the test value itself).

        The purpose of ACC is to restrict spike detection to deviations
        exceeding a minimum threshold value (N*ACC) even if the data have
        little variability. Use ACC=0 to disable this behavior.

    Implemented by:

        2012-07-28: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.
        2013-05-30: Christopher Mueller. Performance optimizations.

    Usage:

        qcflag = dataqc_spiketest(dat, acc, N, L)

            where

        qcflag = Boolean, 0 if value is outside range, else = 1.

        dat = input data set, a numeric, real vector.
        acc = Accuracy of any input measurement.
        N = (optional, defaults to 5) Range multiplier, cf. above
        L = (optional, defaults to 5) Window len, cf. above

    References:

        OOI (2012). Data Product Specification for Spike Test. Document
            Control Number 1341-10006. https://alfresco.oceanobservatories.org/
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
    """
    dat = np.atleast_1d(dat)

    if strict_validation:
        if not utils.isnumeric(dat).all():
            raise ValueError('\'dat\' must be numeric')

        if not utils.isreal(dat).all():
            raise ValueError('\'dat\' must be real')

        if not utils.isvector(dat):
            raise ValueError('\'dat\' must be a vector')

        for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
            if not utils.isnumeric(arg).all():
                raise ValueError('\'{0}\' must be numeric'.format(k))

            if not utils.isreal(arg).all():
                raise ValueError('\'{0}\' must be real'.format(k))
    dat = np.asanyarray(dat, dtype=np.float)
    
    out = spikevalues(dat, L, N, acc)
    return out
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing if measurements fall into a
        user-defined valid range. This range is not constant but varies with
        measurement location. Returns 1 for presumably good data and 0 for data
        presumed bad.

    Implemented by:

        2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.

    Usage:

        qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)

            where

        qcflag = Boolean, 0 if value is outside range, else = 1.

        dat = input data set, a numeric real scalar or column vector.
        z = location of measurement dat. must have same # of rows as dat and
            same # of columns as datlimz
        datlim = two column array with the minimum (column 1) and maximum
            (column 2) values considered valid.
        datlimz = array with the locations where datlim is given. must have
            same # of rows as datlim and same # of columns as z.

    References:

        OOI (2012). Data Product Specification for Local Range Test. Document
            Control Number 1341-10005. https://alfresco.oceanobservatories.org/
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
    """
    if strict_validation:
        # check if dat and datlim are matrices
        if not utils.isvector(dat):
            raise ValueError('\'dat\' must be a matrix')

        if not utils.ismatrix(datlim):
            raise ValueError('\'datlim\' must be a matrix')

        # check if all inputs are numeric and real
        for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
                       'datlimz': datlimz}.iteritems():
            if not utils.isnumeric(arg).all():
                raise ValueError('\'{0}\' must be numeric'.format(k))

            if not utils.isreal(arg).all():
                raise ValueError('\'{0}\' must be real'.format(k))

    # test size and shape of the input arrays datlimz and datlim, setting test
    # variables.
    array_size = datlimz.shape
    if len(array_size) == 1:
        numlim = array_size[0]
        ndim = 1
    else:
        numlim = array_size[0]
        ndim = array_size[1]

    array_size = datlim.shape
    tmp1 = array_size[0]
    tmp2 = array_size[1]
    if tmp1 != numlim:
        raise ValueError('\'datlim\' and \'datlimz\' must '
                         'have the same number of rows.')

    if tmp2 != 2:
        raise ValueError('\'datlim\' must be structured as 2-D array '
                         'with exactly 2 columns and 1 through N rows.')

    # test the size and shape of the z input array
    array_size = z.shape
    if len(array_size) == 1:
        num = array_size[0]
        tmp2 = 1
    else:
        num = array_size[0]
        tmp2 = array_size[1]

    if tmp2 != ndim:
        raise ValueError('\'z\' must have the same number of columns '
                         'as \'datlimz\'.')

    if num != dat.size:
        raise ValueError('Len of \'dat\' must match number of '
                         'rows in \'z\'')

    # test datlim, values in column 2 must be greater than those in column 1
    if not all(datlim[:, 1] > datlim[:, 0]):
        raise ValueError('Second column values of \'datlim\' should be '
                         'greater than first column values.')

    # calculate the upper and lower limits for the data set
    if ndim == 1:
        # determine the lower limits using linear interpolation
        lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
        # determine the upper limits using linear interpolation
        lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
    else:
        # Compute Delaunay Triangulation and use linear interpolation to
        # determine the N-dimensional lower limits
        F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
        lim1 = F(z).reshape(dat.size)

        # Compute Delaunay Triangulation and use linear interpolation to
        # determine the N-dimensional upper limits
        F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
        lim2 = F(z).reshape(dat.size)

    # replace NaNs from above interpolations
    ff = (np.isnan(lim1)) | (np.isnan(lim2))
    lim1[ff] = np.max(datlim[:, 1])
    lim2[ff] = np.min(datlim[:, 0])

    # compute the qcflags
    qcflag = (dat >= lim1) & (dat <= lim2)
    return qcflag.astype('int8')
Example #7
0
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing a time series for "stuck
        values", i.e. repeated occurences of one value. Returns 1 for
        presumably good data and 0 for data presumed bad.

    Implemented by:

        2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.

    Usage:

        qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);

            where

        qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
        x = Input time series (vector, numeric).
        reso = Resolution; repeat values less than reso apart will be
            considered "stuck values".
        num = Minimum number of successive values within reso of each other
            that will trigger the "stuck value". num is optional and defaults
            to 10 if omitted or empty.

    References:

        OOI (2012). Data Product Specification for Stuck Value Test. Document
            Control Number 1341-10008. https://alfresco.oceanobservatories.org/
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
    """
    dat = np.atleast_1d(x)

    if strict_validation:
        if not utils.isnumeric(dat).all():
            raise ValueError('\'x\' must be numeric')

        if not utils.isvector(dat):
            raise ValueError('\'x\' must be a vector')

        if not utils.isreal(dat).all():
            raise ValueError('\'x\' must be real')

        for k, arg in {'reso': reso, 'num': num}.iteritems():
            if not utils.isnumeric(arg).all():
                raise ValueError('\'{0}\' must be numeric'.format(k))

            if not utils.isscalar(arg):
                raise ValueError('\'{0}\' must be a scalar'.format(k))

            if not utils.isreal(arg).all():
                raise ValueError('\'{0}\' must be real'.format(k))

    num = np.abs(num)
    dat = np.asanyarray(dat, dtype=np.float)
    ll = len(x)
    if ll < num:
        # Warn - 'num' is greater than len(x), returning zeros
        out = np.zeros(dat.size, dtype='int8')
    else:
        out = stuckvalues(dat, reso, num)

    return out
Example #8
0
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing if measurements contain a
        significant portion of a polynomial. Returns 1 if this is not the case,
        else 0.

        The purpose of this test is to check if a significant fraction of the
        variability in a time series can be explained by a drift, possibly
        interpreted as a sensor drift. This drift is assumed to be a polynomial
        of order ORD. Use ORD=1 to consider a linear drift

        The time series dat is passed to MatLab's POLYFIT routine to obtain a
        polynomial fit PP to dat, and the difference dat-PP is compared to the
        original dat. If the standard deviation of (dat-PP) is less than that
        of dat by a factor of NSTD, the time series is assumed to contain a
        significant trend (output will be 0), else not (output will be 1).

    Implemented by:

        2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.
        2013-05-30: Christopher Mueller. Performance optimizations.

    Usage:

        qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)

            where

        qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
        dat = Input dataset, a numeric real vector.
        t = time record associated with dat
        ord_n (optional, defaults to 1) = Polynomial order.
        nstd (optional, defaults to 3) = Factor by how much the standard
            deviation must be reduced before qcflag switches from 1 to 0
        strict_validation (optional, defaults to False) = Flag asserting
            testing of inputs.

    References:

        OOI (2012). Data Product Specification for Trend Test. Document
            Control Number 1341-10007. https://alfresco.oceanobservatories.org/
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
    """
    dat = np.atleast_1d(dat)
    t = np.atleast_1d(t)

    if strict_validation:
        for k, arg in {
                'dat': dat,
                't': t,
                'ord_n': ord_n,
                'nstd': nstd
        }.iteritems():
            if not utils.isnumeric(arg).all():
                raise ValueError('\'{0}\' must be numeric'.format(k))

            if not utils.isreal(arg).all():
                raise ValueError('\'{0}\' must be real'.format(k))

        for k, arg in {'dat': dat, 't': t}.iteritems():
            if not utils.isvector(arg):
                raise ValueError('\'{0}\' must be a vector'.format(k))

        for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
            if not utils.isscalar(arg):
                raise ValueError('\'{0}\' must be a scalar'.format(k))

    ord_n = int(round(abs(ord_n)))
    nstd = int(abs(nstd))

    ll = len(dat)
    # Not needed because time is incorporated as 't'
    # t = range(ll)

    pp = np.polyfit(t, dat, ord_n)
    datpp = np.polyval(pp, t)

    # test for a trend
    if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
        trndtst = 0
    else:
        trndtst = 1

    # insure output size equals input, even though test yields a single value.
    qcflag = np.ones(dat.shape).astype('int8') * trndtst
    return qcflag
Example #9
0
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing a time series for spikes.
        Returns 1 for presumably good data and 0 for data presumed bad.

        The time series is divided into windows of len L (an odd integer
        number). Then, window by window, each value is compared to its (L-1)
        neighboring values: a range R of these (L-1) values is computed (max.
        minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
        value is presumed to be good, i.e. no spike, if it deviates from the
        mean of the (L-1) peers by less than a multiple of the range,
        N*max(R,ACC).

        Further than (L-1)/2 values from the start or end points, the peer
        values are symmetrically before and after the test value. Within that
        range of the start and end, the peers are the first/last L values
        (without the test value itself).

        The purpose of ACC is to restrict spike detection to deviations
        exceeding a minimum threshold value (N*ACC) even if the data have
        little variability. Use ACC=0 to disable this behavior.

    Implemented by:

        2012-07-28: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.
        2013-05-30: Christopher Mueller. Performance optimizations.

    Usage:

        qcflag = dataqc_spiketest(dat, acc, N, L)

            where

        qcflag = Boolean, 0 if value is outside range, else = 1.

        dat = input data set, a numeric, real vector.
        acc = Accuracy of any input measurement.
        N = (optional, defaults to 5) Range multiplier, cf. above
        L = (optional, defaults to 5) Window len, cf. above

    References:

        OOI (2012). Data Product Specification for Spike Test. Document
            Control Number 1341-10006. https://alfresco.oceanobservatories.org/
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
    """
    dat = np.atleast_1d(dat)

    if strict_validation:
        if not utils.isnumeric(dat).all():
            raise ValueError('\'dat\' must be numeric')

        if not utils.isreal(dat).all():
            raise ValueError('\'dat\' must be real')

        if not utils.isvector(dat):
            raise ValueError('\'dat\' must be a vector')

        for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
            if not utils.isnumeric(arg).all():
                raise ValueError('\'{0}\' must be numeric'.format(k))

            if not utils.isreal(arg).all():
                raise ValueError('\'{0}\' must be real'.format(k))
    dat = np.asanyarray(dat, dtype=np.float)

    out = spikevalues(dat, L, N, acc)
    return out
Example #10
0
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
    """
    Description:

        Data quality control algorithm testing if measurements fall into a
        user-defined valid range. This range is not constant but varies with
        measurement location. Returns 1 for presumably good data and 0 for data
        presumed bad.

    Implemented by:

        2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.

    Usage:

        qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)

            where

        qcflag = Boolean, 0 if value is outside range, else = 1.

        dat = input data set, a numeric real scalar or column vector.
        z = location of measurement dat. must have same # of rows as dat and
            same # of columns as datlimz
        datlim = two column array with the minimum (column 1) and maximum
            (column 2) values considered valid.
        datlimz = array with the locations where datlim is given. must have
            same # of rows as datlim and same # of columns as z.

    References:

        OOI (2012). Data Product Specification for Local Range Test. Document
            Control Number 1341-10005. https://alfresco.oceanobservatories.org/
            (See: Company Home >> OOI >> Controlled >> 1000 System Level >>
            1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
    """
    if strict_validation:
        # check if dat and datlim are matrices
        if not utils.isvector(dat):
            raise ValueError('\'dat\' must be a matrix')

        if not utils.ismatrix(datlim):
            raise ValueError('\'datlim\' must be a matrix')

        # check if all inputs are numeric and real
        for k, arg in {
                'dat': dat,
                'z': z,
                'datlim': datlim,
                'datlimz': datlimz
        }.iteritems():
            if not utils.isnumeric(arg).all():
                raise ValueError('\'{0}\' must be numeric'.format(k))

            if not utils.isreal(arg).all():
                raise ValueError('\'{0}\' must be real'.format(k))

    if len(datlim.shape) == 3 and datlim.shape[0] == 1:
        datlim = datlim.reshape(datlim.shape[1:])

    if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
        datlimz = datlimz.reshape(datlimz.shape[1:])

    # test size and shape of the input arrays datlimz and datlim, setting test
    # variables.
    array_size = datlimz.shape
    if len(array_size) == 1:
        numlim = array_size[0]
        ndim = 1
    else:
        numlim = array_size[0]
        ndim = array_size[1]

    array_size = datlim.shape
    tmp1 = array_size[0]
    tmp2 = array_size[1]
    if tmp1 != numlim:
        raise ValueError('\'datlim\' and \'datlimz\' must '
                         'have the same number of rows.')

    if tmp2 != 2:
        raise ValueError('\'datlim\' must be structured as 2-D array '
                         'with exactly 2 columns and 1 through N rows.')

    # test the size and shape of the z input array
    array_size = z.shape
    if len(array_size) == 1:
        num = array_size[0]
        tmp2 = 1
    else:
        num = array_size[0]
        tmp2 = array_size[1]

    if tmp2 != ndim:
        raise ValueError('\'z\' must have the same number of columns '
                         'as \'datlimz\'.')

    if num != dat.size:
        raise ValueError('Len of \'dat\' must match number of '
                         'rows in \'z\'')

    # test datlim, values in column 2 must be greater than those in column 1
    if not all(datlim[:, 1] > datlim[:, 0]):
        raise ValueError('Second column values of \'datlim\' should be '
                         'greater than first column values.')

    # calculate the upper and lower limits for the data set
    if ndim == 1:
        # determine the lower limits using linear interpolation
        lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
        # determine the upper limits using linear interpolation
        lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
    else:
        # Compute Delaunay Triangulation and use linear interpolation to
        # determine the N-dimensional lower limits
        F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
        lim1 = F(z).reshape(dat.size)

        # Compute Delaunay Triangulation and use linear interpolation to
        # determine the N-dimensional upper limits
        F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
        lim2 = F(z).reshape(dat.size)

    # replace NaNs from above interpolations
    ff = (np.isnan(lim1)) | (np.isnan(lim2))
    lim1[ff] = np.max(datlim[:, 1])
    lim2[ff] = np.min(datlim[:, 0])

    # compute the qcflags
    qcflag = (dat >= lim1) & (dat <= lim2)
    return qcflag.astype('int8')