コード例 #1
0
    badObs = []
    for ob in iagaCodes:

        dist_stream = dist_factory.get_timeseries(
            observatory=ob,
            starttime=out_start,
            endtime=out_end,
            channels=['Xdt', 'Ydt', 'Zdt', 'Fdt'])

        sq_stream = sq_factory.get_timeseries(
            observatory=ob,
            starttime=out_start,
            endtime=out_end,
            channels=['Xsq', 'Ysq', 'Zsq', 'Fsq'])

        if dist_stream.count() == 0 or sq_stream.count() == 0:
            print ob, 'data could not be read; skipping...'
            badObs.append(ob)  # remove bad iagaCodes after for-loop
            continue
        else:
            print ob, 'data read in successfully'

        dist_X = dist_stream.select(channel="Xdt")[0]
        dist_Y = dist_stream.select(channel="Ydt")[0]
        dist_Z = dist_stream.select(channel="Zdt")[0]

        sq_X = sq_stream.select(channel="Xsq")[0]
        sq_Y = sq_stream.select(channel="Ysq")[0]
        sq_Z = sq_stream.select(channel="Zsq")[0]

        #
コード例 #2
0
ファイル: template_gen.py プロジェクト: travisalongi/Cascadia
  if os.path.isfile(my_file):
      st += read(my_file)
  my_file = ('/auto/proj/Cascadia/data_nobackup/NC/KCS/KCS.NC.EHZ.%s.%s' % (yr, days))
  if os.path.isfile(my_file):
      st += read(my_file)
  my_file = ('/auto/proj/Cascadia/data_nobackup/NC/KCO/KCO.NC.EHZ.%s.%s' % (yr, days))
  if os.path.isfile(my_file):
      st += read(my_file)
  my_file = ('/auto/proj/Cascadia/data_nobackup/NC/KMR/KMR.NC.HHZ.%s.%s' % (yr, days))
  if os.path.isfile(my_file):
      st += read(my_file)
  my_file = ('/auto/proj/Cascadia/data_nobackup/NC/KPP/KPP.NC.HHZ.%s.%s' % (yr, days))
  if os.path.isfile(my_file):
      st += read(my_file)
 
  if st.count() > 0: # need waveforms to continue
      std = Stream()
      for tr in st:
          num = tr.stats.npts
          samp = tr.stats.sampling_rate             
          if num >= (samp*86400)*.8:
              std.append(tr)
      
      print('number of good waveforms ', std.count())
      if std.count() < 3: # want 3 or more waveforms for templates
          print('skipping event not enough good waveforms')
          
      else:         
          std.sort(['starttime'])
          std.merge(fill_value="interpolate")
          st1=std.copy()
コード例 #3
0
    def read_from_SDS(self,
                      sds_root,
                      net_name,
                      sta_name,
                      comp_name,
                      starttime=None,
                      endtime=None,
                      rmean=False,
                      taper=False,
                      pad_value=None):
        """
        Read waveform data from an SDS structured archive.  Simple overlaps and
        adjacent traces are merged if possile.

        :param sds_root: root of the SDS archive
        :param net_name: network name
        :param sta_name: station name
        :param comp_name: component name
        :param starttime: Start time of data to be read.
        :param endtime: End time of data to be read.
        :param rmean: If ``True`` removes the mean from the data upon reading.
            If data are segmented, the mean will be removed from all segments
            individually.
        :param taper: If ``True`` applies a cosine taper to the data upon
            reading.  If data are segmented, tapers are applied to all segments
            individually.
        :param pad_value: If this parameter is set, points between
            ``starttime`` and the first point in the file, and points between
            the last point in the file and ``endtime``, will be set to
            ``pad_value``.  You may want to also use the ``rmean`` and
            ``taper`` parameters, depending on the nature of the data.

        :type sds_root: string
        :type net_name: string
        :type sta_name: string
        :type comp_name: string
        :type starttime: ``obspy.core.utcdatetime.UTCDateTime`` object,
            optional
        :type endtime: ``obspy.core.utcdatetime.UTCDateTime`` object, optional
        :type rmean: boolean, optional
        :type taper: boolean, optional
        :type pad_value: float, optional

        :raises UserWarning: If there are no data between ``starttime`` and
            ``endtime``

        """

        logging.info("Reading from SDS structure %s %s %s ..." %
                     (net_name, sta_name, comp_name))

        # Get the complete file list. If a directory, get all the filenames.
        filename = os.path.join(sds_root, net_name, sta_name,
                                "%s.D" % comp_name, "*")
        logging.debug("Reading %s between %s and %s" %
                      (filename, starttime.isoformat(), endtime.isoformat()))
        if os.path.isdir(glob.glob(filename)[0]):
            filename = os.path.join(filename, "*")
        file_glob = glob.glob(filename)

        # read header from all files to keep only those within the time limits
        fnames_within_times = []
        for fname in file_glob:
            st_head = stream.read(fname, headonly=True)
            # retrieve first_start and last_end time for the stream
            # without making any assumptions on order of traces
            first_start = st_head[0].stats.starttime
            last_end = st_head[0].stats.endtime
            # find earliest start time and latest end time in stream
            for tr in st_head:
                if tr.stats.starttime < first_start:
                    first_start = tr.stats.starttime
                if tr.stats.endtime > last_end:
                    last_end = tr.stats.endtime
            # add to list if start or end time are within our requested limits
            if (first_start < endtime and last_end > starttime):
                fnames_within_times.append(fname)

        logging.debug("Found %d files to read" % len(fnames_within_times))

        # now read the full data only for the relevant files
        st = Stream()
        for fname in fnames_within_times:
            st_tmp = read(fname, starttime=starttime, endtime=endtime)
            for tr in st_tmp:
                st.append(tr)
        # and merge nicely
        st.merge(method=-1)

        if st.count() > 1:  # There are gaps after sensible cleanup merging
            logging.info("File contains gaps:")
            st.printGaps()

        # apply rmean if requested
        if rmean:
            logging.info("Removing the mean from single traces.")
            st = stream_rmean(st)

        # apply rmean if requested
        if taper:
            logging.info("Tapering single traces.")
            st = stream_taper(st)

        if not pad_value is None:
            try:
                first_tr = st.traces[0]
                # save delta (to save typing)
                delta = first_tr.stats.delta
                if (not starttime is None) and \
                   ((first_tr.stats.starttime - starttime) > delta):
                    logging.debug("Padding with value %f from %s to first\
                                   point in file at %s." %
                                  (pad_value, starttime.isoformat(),
                                   first_tr.stats.starttime.isoformat()))
                    # find the number of points from starttime to
                    # end of the first trace
                    npts_full_trace = \
                        int(np.floor((first_tr.stats.endtime -
                                      starttime) / delta))+1
                    # find the number of points of the padding section
                    n_pad = npts_full_trace - first_tr.stats.npts
                    # fill the full time range with padd value
                    tr_pad = np.zeros(npts_full_trace) + pad_value
                    # substitute in the data
                    tr_pad[n_pad:] = first_tr.data[:]
                    first_tr.data = tr_pad
                    first_tr.stats.starttime = starttime
                    first_tr.stats.npts = npts_full_trace
                    st.traces[0] = first_tr

                last_tr = st.traces[-1]
                # save delta (to save typing)
                delta = last_tr.stats.delta
                if (not endtime is None) and \
                   ((endtime - last_tr.stats.endtime) > delta):
                    logging.debug(
                        "Padding with value %f from last point\
                                   in file at %s to %s." %
                        (pad_value, last_tr.stats.endtime.isoformat(),
                         endtime.isoformat()))
                    # find the number of points from endtime to
                    # start of the last trace
                    npts_full_trace = \
                        int(np.floor((endtime -
                                      last_tr.stats.starttime) / delta))+1
                    # fill the full time range with padd value
                    tr_pad = np.zeros(npts_full_trace) + pad_value
                    # substitute in the data
                    tr_pad[0:last_tr.stats.npts] = last_tr.data[:]
                    last_tr.data = tr_pad
                    last_tr.stats.npts = npts_full_trace
                    st.traces[-1] = last_tr

            except IndexError:
                logging.warning('No data within time limits requested')
                raise UserWarning('No data within time limits requested.')

        try:
            self.stream = st
            self.trace = st.traces[0]
            self.proc = "None"
        except IndexError:
            raise UserWarning('No data within time limits requested.')
コード例 #4
0
                in_factory = EdgeFactory(host=edge_url,
                                         port=edge_port,
                                         interval='minute',
                                         type='adjusted')
                in_stream += in_factory.get_timeseries(starttime=in_start,
                                                       endtime=in_end,
                                                       observatory=ob,
                                                       channels=ch)
                print('Retrieved from Edge: %s-%s' % (ob, ch), end="")
                print(' from', in_start, 'to', in_end)
            else:
                print("Decreasing interval requested (", end="")
                print(out_start, 'to', out_end, ")", end="")
                print(" skipping %s-%s..." % (ob, ch))

        if in_stream.count() is not len(channels):
            # if any channel was not read in, STOP PROCESSING
            print("No inputs processed or written...")
            pass

        else:
            # channels are processed separately from input retrieval in order to
            # guarantee and maintain synchronization
            chan_dt = []
            chan_sq = []
            chan_sv = []
            chan_sd = []
            for ch in channels:

                # process time series with SqDistAlgorithm
                out_stream += svsqdist[ch].process(
コード例 #5
0
ファイル: OP_waveforms.py プロジェクト: amaggi/waveloc
    def read_from_SDS(self, sds_root, net_name, sta_name, comp_name,
                      starttime=None, endtime=None, rmean=False, taper=False,
                      pad_value=None):
        """
        Read waveform data from an SDS structured archive.  Simple overlaps and
        adjacent traces are merged if possile.

        :param sds_root: root of the SDS archive
        :param net_name: network name
        :param sta_name: station name
        :param comp_name: component name
        :param starttime: Start time of data to be read.
        :param endtime: End time of data to be read.
        :param rmean: If ``True`` removes the mean from the data upon reading.
            If data are segmented, the mean will be removed from all segments
            individually.
        :param taper: If ``True`` applies a cosine taper to the data upon
            reading.  If data are segmented, tapers are applied to all segments
            individually.
        :param pad_value: If this parameter is set, points between
            ``starttime`` and the first point in the file, and points between
            the last point in the file and ``endtime``, will be set to
            ``pad_value``.  You may want to also use the ``rmean`` and
            ``taper`` parameters, depending on the nature of the data.

        :type sds_root: string
        :type net_name: string
        :type sta_name: string
        :type comp_name: string
        :type starttime: ``obspy.core.utcdatetime.UTCDateTime`` object,
            optional
        :type endtime: ``obspy.core.utcdatetime.UTCDateTime`` object, optional
        :type rmean: boolean, optional
        :type taper: boolean, optional
        :type pad_value: float, optional

        :raises UserWarning: If there are no data between ``starttime`` and
            ``endtime``

        """

        logging.info("Reading from SDS structure %s %s %s ..." %
                     (net_name, sta_name, comp_name))

        # Get the complete file list. If a directory, get all the filenames.
        filename = os.path.join(sds_root, net_name, sta_name,
                                "%s.D" % comp_name, "*")
        logging.debug("Reading %s between %s and %s" %
                      (filename, starttime.isoformat(), endtime.isoformat()))
        if os.path.isdir(glob.glob(filename)[0]):
            filename = os.path.join(filename, "*")
        file_glob = glob.glob(filename)

        # read header from all files to keep only those within the time limits
        fnames_within_times = []
        for fname in file_glob:
            st_head = stream.read(fname, headonly=True)
            # retrieve first_start and last_end time for the stream
            # without making any assumptions on order of traces
            first_start = st_head[0].stats.starttime
            last_end = st_head[0].stats.endtime
            # find earliest start time and latest end time in stream
            for tr in st_head:
                if tr.stats.starttime < first_start:
                    first_start = tr.stats.starttime
                if tr.stats.endtime > last_end:
                    last_end = tr.stats.endtime
            # add to list if start or end time are within our requested limits
            if (first_start < endtime and last_end > starttime):
                fnames_within_times.append(fname)

        logging.debug("Found %d files to read" % len(fnames_within_times))

        # now read the full data only for the relevant files
        st = Stream()
        for fname in fnames_within_times:
            st_tmp = read(fname, starttime=starttime, endtime=endtime)
            for tr in st_tmp:
                st.append(tr)
        # and merge nicely
        st.merge(method=-1)

        if st.count() > 1:  # There are gaps after sensible cleanup merging
            logging.info("File contains gaps:")
            st.printGaps()

        # apply rmean if requested
        if rmean:
            logging.info("Removing the mean from single traces.")
            st = stream_rmean(st)

        # apply rmean if requested
        if taper:
            logging.info("Tapering single traces.")
            st = stream_taper(st)

        if not pad_value is None:
            try:
                first_tr = st.traces[0]
                # save delta (to save typing)
                delta = first_tr.stats.delta
                if (not starttime is None) and \
                   ((first_tr.stats.starttime - starttime) > delta):
                    logging.debug("Padding with value %f from %s to first\
                                   point in file at %s." %
                                  (pad_value,
                                   starttime.isoformat(),
                                   first_tr.stats.starttime.isoformat()))
                    # find the number of points from starttime to
                    # end of the first trace
                    npts_full_trace = \
                        int(np.floor((first_tr.stats.endtime -
                                      starttime) / delta))+1
                    # find the number of points of the padding section
                    n_pad = npts_full_trace-first_tr.stats.npts
                    # fill the full time range with padd value
                    tr_pad = np.zeros(npts_full_trace)+pad_value
                    # substitute in the data
                    tr_pad[n_pad:] = first_tr.data[:]
                    first_tr.data = tr_pad
                    first_tr.stats.starttime = starttime
                    first_tr.stats.npts = npts_full_trace
                    st.traces[0] = first_tr

                last_tr = st.traces[-1]
                # save delta (to save typing)
                delta = last_tr.stats.delta
                if (not endtime is None) and \
                   ((endtime - last_tr.stats.endtime) > delta):
                    logging.debug("Padding with value %f from last point\
                                   in file at %s to %s." %
                                  (pad_value,
                                   last_tr.stats.endtime.isoformat(),
                                   endtime.isoformat()))
                    # find the number of points from endtime to
                    # start of the last trace
                    npts_full_trace = \
                        int(np.floor((endtime -
                                      last_tr.stats.starttime) / delta))+1
                    # fill the full time range with padd value
                    tr_pad = np.zeros(npts_full_trace)+pad_value
                    # substitute in the data
                    tr_pad[0:last_tr.stats.npts] = last_tr.data[:]
                    last_tr.data = tr_pad
                    last_tr.stats.npts = npts_full_trace
                    st.traces[-1] = last_tr

            except IndexError:
                logging.warning('No data within time limits requested')
                raise UserWarning('No data within time limits requested.')

        try:
            self.stream = st
            self.trace = st.traces[0]
            self.proc = "None"
        except IndexError:
            raise UserWarning('No data within time limits requested.')