Exemple #1
0
    def _rtrim(self, endtime, pad=False, nearest_sample=True, fill_value=None):
        """
        Cut current trace to given end time. For more info see
        :meth:`~obspy.core.trace.Trace.trim`.

        .. rubric:: Example

        >>> tr = Trace(data=np.arange(0, 10))
        >>> tr.stats.delta = 1.0
        >>> tr._rtrim(tr.stats.starttime + 2)  # doctest: +ELLIPSIS
        <...Trace object at 0x...>
        >>> tr.data
        array([0, 1, 2])
        >>> tr.stats.endtime
        UTCDateTime(1970, 1, 1, 0, 0, 2)
        """
        org_dtype = self.data.dtype
        if isinstance(endtime, float) or isinstance(endtime, int):
            endtime = UTCDateTime(self.stats.endtime) - endtime
        elif not isinstance(endtime, UTCDateTime):
            raise TypeError
        # check if in boundary
        if nearest_sample:
            delta = round_away((endtime - self.stats.starttime) *
                               self.stats.sampling_rate) - self.stats.npts + 1
            delta = int(delta)
        else:
            # solution for #127, however some tests need to be changed
            # delta = -1*int(math.floor(compatibility.round_away(
            #     (self.stats.endtime - endtime) * \
            #     self.stats.sampling_rate, 7)))
            delta = int(
                math.floor(
                    round((endtime - self.stats.endtime) *
                          self.stats.sampling_rate, 7)))
        if delta == 0 or (delta > 0 and not pad):
            return self
        if delta > 0 and pad:
            try:
                gap = createEmptyDataChunk(delta, self.data.dtype, fill_value)
            except ValueError:
                # createEmptyDataChunk returns negative ValueError ?? for
                # too large number of points, e.g. 189336539799
                raise Exception("Time offset between starttime and " +
                                "trace.starttime too large")
            self.data = np.ma.concatenate((self.data, gap))
            return self
        elif endtime < self.stats.starttime:
            self.stats.starttime = self.stats.endtime + \
                delta * self.stats.delta
            self.data = np.empty(0, dtype=org_dtype)
            return self
        # cut from right
        delta = abs(delta)
        total = len(self.data) - delta
        if endtime == self.stats.starttime:
            total = 1
        self.data = self.data[:total]
        return self
Exemple #2
0
    def _rtrim(self, endtime, pad=False, nearest_sample=True, fill_value=None):
        """
        Cut current trace to given end time. For more info see
        :meth:`~obspy.core.trace.Trace.trim`.

        .. rubric:: Example

        >>> tr = Trace(data=np.arange(0, 10))
        >>> tr.stats.delta = 1.0
        >>> tr._rtrim(tr.stats.starttime + 2)  # doctest: +ELLIPSIS
        <...Trace object at 0x...>
        >>> tr.data
        array([0, 1, 2])
        >>> tr.stats.endtime
        UTCDateTime(1970, 1, 1, 0, 0, 2)
        """
        org_dtype = self.data.dtype
        if isinstance(endtime, float) or isinstance(endtime, int):
            endtime = UTCDateTime(self.stats.endtime) - endtime
        elif not isinstance(endtime, UTCDateTime):
            raise TypeError
        # check if in boundary
        if nearest_sample:
            delta = round_away((endtime - self.stats.starttime) *
                self.stats.sampling_rate) - self.stats.npts + 1
            delta = int(delta)
        else:
            # solution for #127, however some tests need to be changed
            # delta = -1*int(math.floor(compatibility.round_away(
            #     (self.stats.endtime - endtime) * \
            #     self.stats.sampling_rate, 7)))
            delta = int(math.floor(round((endtime - self.stats.endtime) *
                                   self.stats.sampling_rate, 7)))
        if delta == 0 or (delta > 0 and not pad):
            return self
        if delta > 0 and pad:
            try:
                gap = createEmptyDataChunk(delta, self.data.dtype, fill_value)
            except ValueError:
                # createEmptyDataChunk returns negative ValueError ?? for
                # too large number of points, e.g. 189336539799
                raise Exception("Time offset between starttime and " +
                                "trace.starttime too large")
            self.data = np.ma.concatenate((self.data, gap))
            return self
        elif endtime < self.stats.starttime:
            self.stats.starttime = self.stats.endtime + \
                delta * self.stats.delta
            self.data = np.empty(0, dtype=org_dtype)
            return self
        # cut from right
        delta = abs(delta)
        total = len(self.data) - delta
        if endtime == self.stats.starttime:
            total = 1
        self.data = self.data[:total]
        return self
Exemple #3
0
 def trim(self, starttime=None, endtime=None, pad=False,
          nearest_sample=True, fill_value=None):
     """ Cut all traces of this Stream object to given start and end time """
     if not self:
         return
     # select start/end time fitting to a sample point of the first trace
     if nearest_sample:
         tr = self.traces[0]
         if starttime:
             delta = round_away(
                 (starttime - tr.stats.starttime) * tr.stats.sampling_rate)
             starttime = tr.stats.starttime + delta * tr.stats.delta
         if endtime:
             delta = round_away(
                 (endtime - tr.stats.endtime) * tr.stats.sampling_rate)
             # delta is negative!
             endtime = tr.stats.endtime + delta * tr.stats.delta
     for trace in self.traces:
         trace.trim(starttime, endtime, pad=pad,
                    nearest_sample=nearest_sample, fill_value=fill_value)
     # remove empty traces after trimming
     self.traces = [_i for _i in self.traces if _i.stats.npts]
     return self
Exemple #4
0
    def _ltrim(self, starttime, pad=False, nearest_sample=True,
               fill_value=None):
        """
        Cut current trace to given start time. For more info see
        :meth:`~obspy.core.trace.Trace.trim`.

        .. rubric:: Example

        >>> tr = Trace(data=np.arange(0, 10))
        >>> tr.stats.delta = 1.0
        >>> tr._ltrim(tr.stats.starttime + 8)  # doctest: +ELLIPSIS
        <...Trace object at 0x...>
        >>> tr.data
        array([8, 9])
        >>> tr.stats.starttime
        UTCDateTime(1970, 1, 1, 0, 0, 8)
        """
        org_dtype = self.data.dtype
        if isinstance(starttime, float) or isinstance(starttime, int):
            starttime = UTCDateTime(self.stats.starttime) + starttime
        elif not isinstance(starttime, UTCDateTime):
            raise TypeError
        # check if in boundary
        if nearest_sample:
            delta = round_away((starttime - self.stats.starttime) * self.stats.sampling_rate)
            # due to rounding and npts starttime must always be right of
            # self.stats.starttime, rtrim relies on it
            if delta < 0 and pad:
                npts = abs(delta) + 10  # use this as a start
                newstarttime = self.stats.starttime - npts / \
                    float(self.stats.sampling_rate)
                newdelta = round_away((starttime - newstarttime) * self.stats.sampling_rate)
                delta = newdelta - npts
            delta = int(delta)
        else:
            delta = int(math.floor(round((self.stats.starttime - starttime) *
                                   self.stats.sampling_rate, 7))) * -1
        # Adjust starttime only if delta is greater than zero or if the values
        # are padded with masked arrays.
        if delta > 0 or pad:
            self.stats.starttime += delta * self.stats.delta
        if delta == 0 or (delta < 0 and not pad):
            return self
        elif delta < 0 and pad:
            try:
                gap = createEmptyDataChunk(abs(delta), self.data.dtype,
                                           fill_value)
            except ValueError:
                # createEmptyDataChunk returns negative ValueError ?? for
                # too large number of points, e.g. 189336539799
                raise Exception("Time offset between starttime and "
                                "trace.starttime too large")
            self.data = np.ma.concatenate((gap, self.data))
            return self
        elif starttime > self.stats.endtime:
            self.data = np.empty(0, dtype=org_dtype)
            return self
        elif delta > 0:
            try:
                self.data = self.data[delta:]
            except IndexError:
                # a huge numbers for delta raises an IndexError
                # here we just create empty array with same dtype
                self.data = np.empty(0, dtype=org_dtype)
        return self
Exemple #5
0
 def __add__(self, trace, method=1, interpolation_samples=0,
             fill_value="latest", sanity_checks=True):
     """ Add another Trace object to current trace """
     if sanity_checks:
         if not isinstance(trace, Trace):
             raise TypeError
         #  check id
         if self.getId() != trace.getId():
             raise TypeError("Trace ID differs")
         #  check sample rate
         if self.stats.sampling_rate != trace.stats.sampling_rate:
             raise TypeError("Sampling rate differs")
         #  check calibration factor
         if self.stats.calib != trace.stats.calib:
             raise TypeError("Calibration factor differs")
         # check data type
         if self.data.dtype != trace.data.dtype:
             raise TypeError("Data type differs")
     # check times
     if self.stats.starttime <= trace.stats.starttime:
         lt = self
         rt = trace
     else:
         rt = self
         lt = trace
     # check whether to use the latest value to fill a gap
     if fill_value == "latest":
         fill_value = lt.data[-1]
     elif fill_value == "interpolate":
         fill_value = (lt.data[-1], rt.data[0])
     sr = self.stats.sampling_rate
     delta = (rt.stats.starttime - lt.stats.endtime) * sr
     delta = int(round_away(delta)) - 1
     delta_endtime = lt.stats.endtime - rt.stats.endtime
     # create the returned trace
     out = self.__class__(header=deepcopy(lt.stats))
     # check if overlap or gap
     if delta < 0 and delta_endtime < 0:
         # overlap
         delta = abs(delta)
         if np.all(np.equal(lt.data[-delta:], rt.data[:delta])):
             # check if data are the same
             data = [lt.data[:-delta], rt.data]
         elif method == 0:
             raise ValueError, "Unknown method 2 in __add__"
         elif method == 1 and interpolation_samples >= -1:
             try:
                 ls = lt.data[-delta - 1]
             except:
                 ls = lt.data[0]
             if interpolation_samples == -1:
                 interpolation_samples = delta
             elif interpolation_samples > delta:
                 interpolation_samples = delta
             try:
                 rs = rt.data[interpolation_samples]
             except IndexError:
                 # contained trace
                 data = [lt.data]
             else:
                 # include left and right sample (delta + 2)
                 interpolation = np.linspace(ls, rs, interpolation_samples+2)
                 # cut ls and rs and ensure correct data type
                 interpolation = np.require(interpolation[1:-1], lt.data.dtype)
                 data = [lt.data[:-delta], interpolation, rt.data[interpolation_samples:]]
         else:
             raise NotImplementedError
     elif delta < 0 and delta_endtime >= 0:
         # contained trace
         delta = abs(delta)
         lenrt = len(rt)
         t1 = len(lt) - delta
         t2 = t1 + lenrt
         # check if data are the same
         data_equal = (lt.data[t1:t2] == rt.data)
         # force a masked array and fill it for check of equality of valid
         # data points
         if np.all(np.ma.masked_array(data_equal).filled()):
             # if all (unmasked) data are equal,
             if isinstance(data_equal, np.ma.masked_array):
                 x = np.ma.masked_array(lt.data[t1:t2])
                 y = np.ma.masked_array(rt.data)
                 data_same = np.choose(x.mask, [x, y])
                 data = np.choose(x.mask & y.mask, [data_same, np.nan])
                 if np.any(np.isnan(data)):
                     data = np.ma.masked_invalid(data)
                 # convert back to maximum dtype of original data
                 dtype = np.max((x.dtype, y.dtype))
                 data = data.astype(dtype)
                 data = [lt.data[:t1], data, lt.data[t2:]]
             else:
                 data = [lt.data]
         elif method == 1:
             data = [lt.data]
         else:
             raise NotImplementedError
     elif delta == 0:
         # exact fit - merge both traces
         data = [lt.data, rt.data]
     else:
         # gap; use fixed value or interpolate in between
         gap = createEmptyDataChunk(delta, lt.data.dtype, fill_value)
         data = [lt.data, gap, rt.data]
     # merge traces depending on NumPy array type
     if True in [isinstance(_i, np.ma.masked_array) for _i in data]:
         data = np.ma.concatenate(data)
     else:
         data = np.concatenate(data)
         data = np.require(data, dtype=lt.data.dtype)
     # Check if we can downgrade to normal ndarray
     if isinstance(data, np.ma.masked_array) and \
        np.ma.count_masked(data) == 0:
         data = data.compressed()
     out.data = data
     return out
Exemple #6
0
    def _ltrim(self,
               starttime,
               pad=False,
               nearest_sample=True,
               fill_value=None):
        """
        Cut current trace to given start time. For more info see
        :meth:`~obspy.core.trace.Trace.trim`.

        .. rubric:: Example

        >>> tr = Trace(data=np.arange(0, 10))
        >>> tr.stats.delta = 1.0
        >>> tr._ltrim(tr.stats.starttime + 8)  # doctest: +ELLIPSIS
        <...Trace object at 0x...>
        >>> tr.data
        array([8, 9])
        >>> tr.stats.starttime
        UTCDateTime(1970, 1, 1, 0, 0, 8)
        """
        org_dtype = self.data.dtype
        if isinstance(starttime, float) or isinstance(starttime, int):
            starttime = UTCDateTime(self.stats.starttime) + starttime
        elif not isinstance(starttime, UTCDateTime):
            raise TypeError
        # check if in boundary
        if nearest_sample:
            delta = round_away(
                (starttime - self.stats.starttime) * self.stats.sampling_rate)
            # due to rounding and npts starttime must always be right of
            # self.stats.starttime, rtrim relies on it
            if delta < 0 and pad:
                npts = abs(delta) + 10  # use this as a start
                newstarttime = self.stats.starttime - npts / \
                    float(self.stats.sampling_rate)
                newdelta = round_away(
                    (starttime - newstarttime) * self.stats.sampling_rate)
                delta = newdelta - npts
            delta = int(delta)
        else:
            delta = int(
                math.floor(
                    round((self.stats.starttime - starttime) *
                          self.stats.sampling_rate, 7))) * -1
        # Adjust starttime only if delta is greater than zero or if the values
        # are padded with masked arrays.
        if delta > 0 or pad:
            self.stats.starttime += delta * self.stats.delta
        if delta == 0 or (delta < 0 and not pad):
            return self
        elif delta < 0 and pad:
            try:
                gap = createEmptyDataChunk(abs(delta), self.data.dtype,
                                           fill_value)
            except ValueError:
                # createEmptyDataChunk returns negative ValueError ?? for
                # too large number of points, e.g. 189336539799
                raise Exception("Time offset between starttime and "
                                "trace.starttime too large")
            self.data = np.ma.concatenate((gap, self.data))
            return self
        elif starttime > self.stats.endtime:
            self.data = np.empty(0, dtype=org_dtype)
            return self
        elif delta > 0:
            try:
                self.data = self.data[delta:]
            except IndexError:
                # a huge numbers for delta raises an IndexError
                # here we just create empty array with same dtype
                self.data = np.empty(0, dtype=org_dtype)
        return self
Exemple #7
0
 def __add__(self,
             trace,
             method=1,
             interpolation_samples=0,
             fill_value="latest",
             sanity_checks=True):
     """ Add another Trace object to current trace """
     if sanity_checks:
         if not isinstance(trace, Trace):
             raise TypeError
         #  check id
         if self.getId() != trace.getId():
             raise TypeError("Trace ID differs")
         #  check sample rate
         if self.stats.sampling_rate != trace.stats.sampling_rate:
             raise TypeError("Sampling rate differs")
         #  check calibration factor
         if self.stats.calib != trace.stats.calib:
             raise TypeError("Calibration factor differs")
         # check data type
         if self.data.dtype != trace.data.dtype:
             raise TypeError("Data type differs")
     # check times
     if self.stats.starttime <= trace.stats.starttime:
         lt = self
         rt = trace
     else:
         rt = self
         lt = trace
     # check whether to use the latest value to fill a gap
     if fill_value == "latest":
         fill_value = lt.data[-1]
     elif fill_value == "interpolate":
         fill_value = (lt.data[-1], rt.data[0])
     sr = self.stats.sampling_rate
     delta = (rt.stats.starttime - lt.stats.endtime) * sr
     delta = int(round_away(delta)) - 1
     delta_endtime = lt.stats.endtime - rt.stats.endtime
     # create the returned trace
     out = self.__class__(header=deepcopy(lt.stats))
     # check if overlap or gap
     if delta < 0 and delta_endtime < 0:
         # overlap
         delta = abs(delta)
         if np.all(np.equal(lt.data[-delta:], rt.data[:delta])):
             # check if data are the same
             data = [lt.data[:-delta], rt.data]
         elif method == 0:
             raise ValueError, "Unknown method 2 in __add__"
         elif method == 1 and interpolation_samples >= -1:
             try:
                 ls = lt.data[-delta - 1]
             except:
                 ls = lt.data[0]
             if interpolation_samples == -1:
                 interpolation_samples = delta
             elif interpolation_samples > delta:
                 interpolation_samples = delta
             try:
                 rs = rt.data[interpolation_samples]
             except IndexError:
                 # contained trace
                 data = [lt.data]
             else:
                 # include left and right sample (delta + 2)
                 interpolation = np.linspace(ls, rs,
                                             interpolation_samples + 2)
                 # cut ls and rs and ensure correct data type
                 interpolation = np.require(interpolation[1:-1],
                                            lt.data.dtype)
                 data = [
                     lt.data[:-delta], interpolation,
                     rt.data[interpolation_samples:]
                 ]
         else:
             raise NotImplementedError
     elif delta < 0 and delta_endtime >= 0:
         # contained trace
         delta = abs(delta)
         lenrt = len(rt)
         t1 = len(lt) - delta
         t2 = t1 + lenrt
         # check if data are the same
         data_equal = (lt.data[t1:t2] == rt.data)
         # force a masked array and fill it for check of equality of valid
         # data points
         if np.all(np.ma.masked_array(data_equal).filled()):
             # if all (unmasked) data are equal,
             if isinstance(data_equal, np.ma.masked_array):
                 x = np.ma.masked_array(lt.data[t1:t2])
                 y = np.ma.masked_array(rt.data)
                 data_same = np.choose(x.mask, [x, y])
                 data = np.choose(x.mask & y.mask, [data_same, np.nan])
                 if np.any(np.isnan(data)):
                     data = np.ma.masked_invalid(data)
                 # convert back to maximum dtype of original data
                 dtype = np.max((x.dtype, y.dtype))
                 data = data.astype(dtype)
                 data = [lt.data[:t1], data, lt.data[t2:]]
             else:
                 data = [lt.data]
         elif method == 1:
             data = [lt.data]
         else:
             raise NotImplementedError
     elif delta == 0:
         # exact fit - merge both traces
         data = [lt.data, rt.data]
     else:
         # gap; use fixed value or interpolate in between
         gap = createEmptyDataChunk(delta, lt.data.dtype, fill_value)
         data = [lt.data, gap, rt.data]
     # merge traces depending on NumPy array type
     if True in [isinstance(_i, np.ma.masked_array) for _i in data]:
         data = np.ma.concatenate(data)
     else:
         data = np.concatenate(data)
         data = np.require(data, dtype=lt.data.dtype)
     # Check if we can downgrade to normal ndarray
     if isinstance(data, np.ma.masked_array) and \
        np.ma.count_masked(data) == 0:
         data = data.compressed()
     out.data = data
     return out