コード例 #1
0
    def process(self, trace):
        """
        Generates statistics about the amplitude values.

        This may take a while to calculate - use a moderate looping interval.

        .. rubric:: Example

        >>> from obspy.core import Trace
        >>> import numpy as np
        >>> tr = Trace(data=np.arange(-5,5))
        >>> result = MinMaxAmplitudeFeature().process(tr)
        >>> result['max']
        4.0
        >>> result['upper_quantile']
        1.75
        """
        result = {}
        result['min'] = float(trace.data.min())
        result['max'] = float(trace.data.max())
        result['avg'] = float(trace.data.mean())
        result['median'] = float(scoreatpercentile(trace.data, 50, False))
        result['lower_quantile'] = float(scoreatpercentile(trace.data, 25))
        result['upper_quantile'] = float(scoreatpercentile(trace.data, 75))
        return result
コード例 #2
0
ファイル: feature.py プロジェクト: CSchwarz1234/obspy
    def process(self, trace):
        """
        Generates statistics about the amplitude values.

        This may take a while to calculate - use a moderate looping interval.

        .. rubric:: Example

        >>> from obspy import Trace
        >>> import numpy as np
        >>> tr = Trace(data=np.arange(-5,5))
        >>> result = MinMaxAmplitudeFeature().process(tr)
        >>> result['max']
        4.0
        >>> result['upper_quantile']
        1.75
        """
        result = {}
        result['min'] = float(trace.data.min())
        result['max'] = float(trace.data.max())
        result['avg'] = float(trace.data.mean())
        result['median'] = float(scoreatpercentile(trace.data, 50, False))
        result['lower_quantile'] = float(scoreatpercentile(trace.data, 25))
        result['upper_quantile'] = float(scoreatpercentile(trace.data, 75))
        return result
コード例 #3
0
ファイル: util.py プロジェクト: gthompson/obspy
def getTimingAndDataQuality(file_or_file_object):
    """
    Counts all data quality flags of the given Mini-SEED file and returns
    statistics about the timing quality if applicable.

    :type file_or_file_object: basestring or open file-like object.
    :param file_or_file_object: Mini-SEED file name or open file-like object
        containing a Mini-SEED record.

    :return: Dictionary with information about the timing quality and the data
        quality flags.

    .. rubric:: Data quality

    This method will count all set data quality flag bits in the fixed section
    of the data header in a Mini-SEED file and returns the total count for each
    flag type.

    ========  =================================================
    Bit       Description
    ========  =================================================
    [Bit 0]   Amplifier saturation detected (station dependent)
    [Bit 1]   Digitizer clipping detected
    [Bit 2]   Spikes detected
    [Bit 3]   Glitches detected
    [Bit 4]   Missing/padded data present
    [Bit 5]   Telemetry synchronization error
    [Bit 6]   A digital filter may be charging
    [Bit 7]   Time tag is questionable
    ========  =================================================

    .. rubric:: Timing quality

    If the file has a Blockette 1001 statistics about the timing quality will
    also be returned. See the doctests for more information.

    This method will read the timing quality in Blockette 1001 for each
    record in the file if available and return the following statistics:
    Minima, maxima, average, median and upper and lower quantile.
    Quantiles are calculated using a integer round outwards policy: lower
    quantiles are rounded down (probability < 0.5), and upper quantiles
    (probability > 0.5) are rounded up.
    This gives no more than the requested probability in the tails, and at
    least the requested probability in the central area.
    The median is calculating by either taking the middle value or, with an
    even numbers of values, the average between the two middle values.

    .. rubric:: Example

    >>> from obspy.core.util import getExampleFile
    >>> filename = getExampleFile("qualityflags.mseed")
    >>> tq = getTimingAndDataQuality(filename)
    >>> for k, v in tq.items():
    ...     print(k, v)
    data_quality_flags [9, 8, 7, 6, 5, 4, 3, 2]

    Also works with file pointers and BytesIOs.

    >>> f = open(filename, 'rb')
    >>> tq = getTimingAndDataQuality(f)
    >>> for k, v in tq.items():
    ...     print(k, v)
    data_quality_flags [9, 8, 7, 6, 5, 4, 3, 2]

    >>> from obspy.core import compatibility
    >>> file_object = compatibility.BytesIO(f.read())
    >>> f.close()
    >>> tq = getTimingAndDataQuality(file_object)
    >>> for k, v in tq.items():
    ...     print(k, v)
    data_quality_flags [9, 8, 7, 6, 5, 4, 3, 2]

    If the file pointer or BytesIO position does not correspond to the first
    record the omitted records will be skipped.

    >>> _ = file_object.seek(1024, 1)
    >>> tq = getTimingAndDataQuality(file_object)
    >>> for k, v in tq.items():
    ...     print(k, v)
    data_quality_flags [8, 8, 7, 6, 5, 4, 3, 2]
    >>> file_object.close()

    Reading a file with Blockette 1001 will return timing quality statistics.
    The data quality flags will always exists because they are part of the
    fixed Mini-SEED header and therefore need to be in every Mini-SEED file.

    >>> filename = getExampleFile("timingquality.mseed")
    >>> tq = getTimingAndDataQuality(filename)
    >>> for k, v in sorted(tq.items()):
    ...     print(k, v)
    data_quality_flags [0, 0, 0, 0, 0, 0, 0, 0]
    timing_quality_average 50.0
    timing_quality_lower_quantile 25.0
    timing_quality_max 100.0
    timing_quality_median 50.0
    timing_quality_min 0.0
    timing_quality_upper_quantile 75.0

    Also works with file pointers and BytesIOs.

    >>> f = open(filename, 'rb')
    >>> tq = getTimingAndDataQuality(f)
    >>> for k, v in sorted(tq.items()):
    ...     print(k, v)
    data_quality_flags [0, 0, 0, 0, 0, 0, 0, 0]
    timing_quality_average 50.0
    timing_quality_lower_quantile 25.0
    timing_quality_max 100.0
    timing_quality_median 50.0
    timing_quality_min 0.0
    timing_quality_upper_quantile 75.0

    >>> file_object = compatibility.BytesIO(f.read())
    >>> f.close()
    >>> tq = getTimingAndDataQuality(file_object)
    >>> for k, v in sorted(tq.items()):
    ...     print(k, v)
    data_quality_flags [0, 0, 0, 0, 0, 0, 0, 0]
    timing_quality_average 50.0
    timing_quality_lower_quantile 25.0
    timing_quality_max 100.0
    timing_quality_median 50.0
    timing_quality_min 0.0
    timing_quality_upper_quantile 75.0
    >>> file_object.close()
    """
    # Read the first record to get a starting point and.
    info = getRecordInformation(file_or_file_object)
    # Keep track of the extracted information.
    quality_count = [0, 0, 0, 0, 0, 0, 0, 0]
    timing_quality = []
    offset = 0

    # Loop over each record. A valid record needs to have a record length of at
    # least 256 bytes.
    while offset <= (info['filesize'] - 256):
        this_info = getRecordInformation(file_or_file_object, offset)
        # Add the timing quality.
        if 'timing_quality' in this_info:
            timing_quality.append(float(this_info['timing_quality']))
        # Add the value of each bit to the quality_count.
        for _i in range(8):
            if (this_info['data_quality_flags'] & (1 << _i)) != 0:
                quality_count[_i] += 1
        offset += this_info['record_length']

    # Collect the results in a dictionary.
    result = {'data_quality_flags': quality_count}

    # Parse of the timing quality list.
    count = len(timing_quality)
    timing_quality = sorted(timing_quality)
    # If no timing_quality was collected just return an empty dictionary.
    if count == 0:
        return result
    # Otherwise calculate some statistical values from the timing quality.
    result['timing_quality_min'] = min(timing_quality)
    result['timing_quality_max'] = max(timing_quality)
    result['timing_quality_average'] = sum(timing_quality) / count
    result['timing_quality_median'] = \
        scoreatpercentile(timing_quality, 50, issorted=False)
    result['timing_quality_lower_quantile'] = \
        scoreatpercentile(timing_quality, 25, issorted=False)
    result['timing_quality_upper_quantile'] = \
        scoreatpercentile(timing_quality, 75, issorted=False)
    return result
コード例 #4
0
ファイル: util.py プロジェクト: shineusn/obspy
def getTimingAndDataQuality(file_or_file_object):
    """
    Counts all data quality flags of the given Mini-SEED file and returns
    statistics about the timing quality if applicable.

    :type file_or_file_object: basestring or open file-like object.
    :param file_or_file_object: Mini-SEED file name or open file-like object
        containing a Mini-SEED record.

    :return: Dictionary with information about the timing quality and the data
        quality flags.

    .. rubric:: Data quality

    This method will count all set data quality flag bits in the fixed section
    of the data header in a Mini-SEED file and returns the total count for each
    flag type.

    ========  =================================================
    Bit       Description
    ========  =================================================
    [Bit 0]   Amplifier saturation detected (station dependent)
    [Bit 1]   Digitizer clipping detected
    [Bit 2]   Spikes detected
    [Bit 3]   Glitches detected
    [Bit 4]   Missing/padded data present
    [Bit 5]   Telemetry synchronization error
    [Bit 6]   A digital filter may be charging
    [Bit 7]   Time tag is questionable
    ========  =================================================

    .. rubric:: Timing quality

    If the file has a Blockette 1001 statistics about the timing quality will
    also be returned. See the doctests for more information.

    This method will read the timing quality in Blockette 1001 for each
    record in the file if available and return the following statistics:
    Minima, maxima, average, median and upper and lower quantile.
    Quantiles are calculated using a integer round outwards policy: lower
    quantiles are rounded down (probability < 0.5), and upper quantiles
    (probability > 0.5) are rounded up.
    This gives no more than the requested probability in the tails, and at
    least the requested probability in the central area.
    The median is calculating by either taking the middle value or, with an
    even numbers of values, the average between the two middle values.

    .. rubric:: Example

    >>> from obspy.core.util import getExampleFile
    >>> filename = getExampleFile("qualityflags.mseed")
    >>> tq = getTimingAndDataQuality(filename)
    >>> for k, v in tq.items():
    ...     print(k, v)
    data_quality_flags [9, 8, 7, 6, 5, 4, 3, 2]

    Also works with file pointers and BytesIOs.

    >>> f = open(filename, 'rb')
    >>> tq = getTimingAndDataQuality(f)
    >>> for k, v in tq.items():
    ...     print(k, v)
    data_quality_flags [9, 8, 7, 6, 5, 4, 3, 2]

    >>> from obspy.core import compatibility
    >>> file_object = compatibility.BytesIO(f.read())
    >>> f.close()
    >>> tq = getTimingAndDataQuality(file_object)
    >>> for k, v in tq.items():
    ...     print(k, v)
    data_quality_flags [9, 8, 7, 6, 5, 4, 3, 2]

    If the file pointer or BytesIO position does not correspond to the first
    record the omitted records will be skipped.

    >>> _ = file_object.seek(1024, 1)
    >>> tq = getTimingAndDataQuality(file_object)
    >>> for k, v in tq.items():
    ...     print(k, v)
    data_quality_flags [8, 8, 7, 6, 5, 4, 3, 2]
    >>> file_object.close()

    Reading a file with Blockette 1001 will return timing quality statistics.
    The data quality flags will always exists because they are part of the
    fixed Mini-SEED header and therefore need to be in every Mini-SEED file.

    >>> filename = getExampleFile("timingquality.mseed")
    >>> tq = getTimingAndDataQuality(filename)
    >>> for k, v in sorted(tq.items()):
    ...     print(k, v)
    data_quality_flags [0, 0, 0, 0, 0, 0, 0, 0]
    timing_quality_average 50.0
    timing_quality_lower_quantile 25.0
    timing_quality_max 100.0
    timing_quality_median 50.0
    timing_quality_min 0.0
    timing_quality_upper_quantile 75.0

    Also works with file pointers and BytesIOs.

    >>> f = open(filename, 'rb')
    >>> tq = getTimingAndDataQuality(f)
    >>> for k, v in sorted(tq.items()):
    ...     print(k, v)
    data_quality_flags [0, 0, 0, 0, 0, 0, 0, 0]
    timing_quality_average 50.0
    timing_quality_lower_quantile 25.0
    timing_quality_max 100.0
    timing_quality_median 50.0
    timing_quality_min 0.0
    timing_quality_upper_quantile 75.0

    >>> file_object = compatibility.BytesIO(f.read())
    >>> f.close()
    >>> tq = getTimingAndDataQuality(file_object)
    >>> for k, v in sorted(tq.items()):
    ...     print(k, v)
    data_quality_flags [0, 0, 0, 0, 0, 0, 0, 0]
    timing_quality_average 50.0
    timing_quality_lower_quantile 25.0
    timing_quality_max 100.0
    timing_quality_median 50.0
    timing_quality_min 0.0
    timing_quality_upper_quantile 75.0
    >>> file_object.close()
    """
    # Read the first record to get a starting point and.
    info = getRecordInformation(file_or_file_object)
    # Keep track of the extracted information.
    quality_count = [0, 0, 0, 0, 0, 0, 0, 0]
    timing_quality = []
    offset = 0

    # Loop over each record. A valid record needs to have a record length of at
    # least 256 bytes.
    while offset <= (info['filesize'] - 256):
        this_info = getRecordInformation(file_or_file_object, offset)
        # Add the timing quality.
        if 'timing_quality' in this_info:
            timing_quality.append(float(this_info['timing_quality']))
        # Add the value of each bit to the quality_count.
        for _i in range(8):
            if (this_info['data_quality_flags'] & (1 << _i)) != 0:
                quality_count[_i] += 1
        offset += this_info['record_length']

    # Collect the results in a dictionary.
    result = {'data_quality_flags': quality_count}

    # Parse of the timing quality list.
    count = len(timing_quality)
    timing_quality = sorted(timing_quality)
    # If no timing_quality was collected just return an empty dictionary.
    if count == 0:
        return result
    # Otherwise calculate some statistical values from the timing quality.
    result['timing_quality_min'] = min(timing_quality)
    result['timing_quality_max'] = max(timing_quality)
    result['timing_quality_average'] = sum(timing_quality) / count
    result['timing_quality_median'] = \
        scoreatpercentile(timing_quality, 50, issorted=False)
    result['timing_quality_lower_quantile'] = \
        scoreatpercentile(timing_quality, 25, issorted=False)
    result['timing_quality_upper_quantile'] = \
        scoreatpercentile(timing_quality, 75, issorted=False)
    return result