Пример #1
0
def read_IMS_ASCII(path, net='', **kwargs):
    """
    read a IMS_ASCII seismogram from a single station
    :param path: path to file
    :return: uquake.core.Stream
    """

    data = np.loadtxt(path, delimiter=',', skiprows=1)
    stats = Stats()

    with open(path) as fid:
        field = fid.readline().split(',')

    stats.sampling_rate = float(field[1])
    timetmp = datetime.fromtimestamp(float(field[5])) \
              + timedelta(
        seconds=float(field[6]) / 1e6)  # trigger time in second

    trgtime_UTC = UTCDateTime(timetmp)
    stats.starttime = trgtime_UTC - float(field[10]) / stats.sampling_rate
    stats.npts = len(data)

    stats.station = field[8]
    stats.network = net

    traces = []
    component = np.array(['X', 'Y', 'Z'])
    std = np.std(data, axis=0)
    mstd = np.max(std)

    for k, dt in enumerate(data.T):
        stats.channel = '%s' % (component[k])
        traces.append(Trace(data=np.array(dt), header=stats))

    return Stream(traces=traces)
Пример #2
0
 def python2obspy(self):
     from obspy.core.trace import Stats, Trace
     from obspy.core.utcdatetime import UTCDateTime
     s = Stats()
     s.network = self.network
     s.station = self.station
     s.location = self.location
     s.channel = self.channel
     s.sampling_rate = self.sampling_rate
     s.starttime = UTCDateTime(self.starttime)
     s.npts = len(self.data)
     misc_fields = dict()
     if 'CALIB' in self.misc_fields:
         s.calib = self.misc_fields.pop('CALIB')
     s.update(self.misc_fields)
     return Trace(self.data[:], header=s)
Пример #3
0
 def python2obspy(self):
     from obspy.core.trace import Stats, Trace
     from obspy.core.utcdatetime import UTCDateTime
     s = Stats()
     s.network = self.network
     s.station = self.station
     s.location = self.location
     s.channel = self.channel
     s.sampling_rate = self.sampling_rate
     s.starttime = UTCDateTime(self.starttime)
     s.npts = len(self.data)
     misc_fields = dict()
     if 'CALIB' in self.misc_fields:
         s.calib = self.misc_fields.pop('CALIB')
     s.update(self.misc_fields)
     return Trace(self.data[:], header=s)
Пример #4
0
def read_TEXCEL_CSV(filename, **kwargs):
    """
    Reads a texcel csv file and returns a uquake Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        uquake :func:`~uquake.core.stream.read` function, call this
        instead.
    :param filename: the path to the file
    :param kwargs:
    :return: ~uquake.core.stream.Stream
    """

    with open(filename) as fle:
        x = []
        y = []
        z = []

        for k, line in enumerate(fle):
            if k == 0:
                if 'MICROPHONE' in line:
                    offset = 9
                else:
                    offset = 8
            # header

            if k < 2:
                continue

            val = line.strip().split(',')

            # relative time

            if k == 3:
                rt0 = timedelta(seconds=float(val[0]))

            elif k == 6:
                station = str(eval(val[offset]))

            elif k == 7:
                date = val[offset]

            elif k == 8:
                date_time = date + " " + val[offset]
                datetime = parse(date_time)
                starttime = datetime + rt0

            elif k == 9:
                site = val[offset]

            elif k == 10:
                location = val[offset]

            elif k == 17:

                sensitivity_x = float(val[offset])
                sensitivity_y = float(val[offset + 1])
                sensitivity_z = float(val[offset + 2])

            elif k == 18:
                range_x = float(val[offset])
                range_y = float(val[offset + 1])
                range_z = float(val[offset + 2])

            elif k == 19:
                trigger_x = float(val[offset])
                trigger_y = float(val[offset + 1])
                trigger_z = float(val[offset + 2])

            elif k == 20:
                si_x = float(val[offset])
                si_y = float(val[offset + 1])
                si_z = float(val[offset + 2])

            elif k == 21:
                sr_x = float(val[offset])
                sr_y = float(val[offset + 1])
                sr_z = float(val[offset + 2])

            x.append(float(val[1]))
            y.append(float(val[2]))
            z.append(float(val[3]))

        x = np.array(x)
        y = np.array(y)
        z = np.array(z)

        stats = Stats()
        stats.network = site
        stats.delta = si_x / 1000.0
        stats.npts = len(x)
        stats.location = location
        stats.station = station
        stats.starttime = UTCDateTime(starttime)

        stats.channel = 'radial'
        tr_x = Trace(data=x / 1000.0, header=stats)

        stats.delta = si_y / 1000.0
        stats.channel = 'transverse'
        tr_y = Trace(data=y / 1000.0, header=stats)

        stats.delta = si_z / 1000.0
        stats.channel = 'vertical'
        tr_z = Trace(data=z / 1000.0, header=stats)

    return Stream(traces=[tr_x, tr_y, tr_z])
Пример #5
0
    def attach_obspy_trace_stats(self, kstnm, kinst, force_without_loc=False):
        '''Attaches attribute: obspy_trace_stats, an obspy.core.trace.Stats instance.

        obspy_trace_stats holds metadata common to both miniSEED and SAC formats.
        obspy_trace_stats.sac holds extra metadata only found in the SAC format.

        Floats are NOT converted to np.float32() in either case.

        NB: the SAC header value shown to the world (e.g., "sac.delta"), and the private SAC header
        written to disk (e.g., "sac._hf[0]"), differ in type.  The relevant float header values that
        actually get written to disk with sac.write are stored in the private "._hf" attribute,
        which is not generated with initialization of the raw Stats() container. Therefore, if
        printing those values to, e.g. a text file, ensure the relevant F (float) fields are cast to
        np.float32 first.

        For example:
        >> from obspy.core.trace import Trace
        >> from obspy.io.sac.sactrace import SACTrace
        >> trace = Trace()
        >> sac = SACTrace.from_obspy_trace(trace)  <-- this gets called by sac.write (within stream.write)
        >> sac.delta = 1/20
        >> isinstance(sac.delta, float)            <-- True: this is the public attr shown to the world
        >> isinstance(sac.delta, np.float32)       <-- False
        >> isinstance(sac._hf[0], float)           <-- False
        >> isinstance(sac._hf[0], np.float32)      <-- True: this is the private attr written to disk

        For more detail see: http://www.adc1.iris.edu/files/sac-manual/manual/file_format.html

        Update function `events.write_metadata` if the fields in this method are changed.

        '''

        # Fill metadata common to SAC and miniSEED formats
        stats = Stats()
        stats.network = utils.network()
        stats.station = kstnm
        stats.location = "00"
        stats.channel = utils.band_code(
            self.decimated_fs) + "DH"  # SEED manual Appendix A
        stats.starttime = self.corrected_starttime
        stats.sampling_rate = self.decimated_fs
        stats.npts = len(self.processed_data)

        # Extra metadata, some of which is only written to SAC files
        keys = [
            'stla', 'stlo', 'stel', 'stdp', 'scale', 'cmpaz', 'cmpinc',
            'user0', 'user1', 'user2', 'user3', 'kinst', 'kuser0', 'kuser1',
            'kuser2'
        ]
        def_float = -12345.

        # Default SAC header (we may not will not fill all of these keys)
        stats.sac = dict.fromkeys(keys, def_float)

        # Fill station-location header fields.
        if not force_without_loc:
            stats.sac["stla"] = self.station_loc.latitude
            stats.sac["stlo"] = self.station_loc.longitude

        # Elevation is 0 (our reference is truly sea level)
        stats.sac["stel"] = 0

        # Add scaling factor to convert digital counts to Pa
        stats.sac["scale"] = utils.sacpz_const()

        # Add dip (CMPINC; "component incidence") in SAC dip convention, using as guide:
        # https://github.com/iris-edu/mseed2sac/blob/master/doc/mseed2sac.md
        #
        # SAC dip convention: "degrees down from vertical up/outward",
        # i.e., BHN, BHE = 90, BHZ = 0
        #
        # SEED dip convection: "degrees down from horizontal"
        # i.e., BHN, BHE = 0, BHZ = -90
        stats.sac["cmpinc"] = 0  # SAC dip

        # Add azimuth: horizontal projection of component vector measured clockwise from north
        # It is 0 for vertical components. Theoretically, BHN, BHZ = 90, BHE = 90
        stats.sac["cmpaz"] = 0

        # NB: I checked how IRIS serves up hydrophone data (in MATLAB):
        # >> s = irisFetch.Stations('channel', '*', '*', '*', '?DH')
        #
        # For all 3233 channels from 2147 stations that were returned:
        # dip = -90, 0, or 90
        # azimuth = 0 or 360
        #
        # For dip = -90, I assume that is the SEED dip convention
        # For dip = +90, I do not know; I thought perhaps it might be some(thing like a?)
        # right-hand-rule convention, but not all +90 dips are associated with 360 azimuth

        # REQ events do not record their depth at the time of acquisition, and because the onboard
        # detection algorithm was not triggered there are no trigger parameters to report
        if not self.is_requested:
            stats.sac[
                "stdp"] = self.depth  # meters (from external pressure sensor; down is positive)
            stats.sac["user0"] = self.snr
            stats.sac["user1"] = self.criterion
            stats.sac["user2"] = self.trig  # sample index

        # Clock drift correction, which is the 'Time correction' applied in the 48-byte
        # fixed header in utils.set_mseed_time_correction()
        stats.sac[
            "user3"] = self.clockdrift_correction  # = self.mseed_time_correction

        # Generic instrument (e.g., '452.020')
        stats.sac['kinst'] = kinst

        # automaid version number
        stats.sac["kuser0"] = self.__version__

        # String describing detection/request status, and number of wavelet scales transmitted
        # (e.g., 'DET.WLT5')
        reqdet_scales = self.processed_file_name.split('.')[-2:]
        stats.sac['kuser1'] = '.'.join(reqdet_scales)

        # String detailing the type of (i)CDF24 transform: edge correction and
        # normalization
        stats.sac[
            'kuser2'] = 'ec' + self.edges_correction + 'norm' + self.normalized

        # Attach Stats to events object
        self.obspy_trace_stats = stats
Пример #6
0
  def dorange (self):
    # load batches
    print "mkms: loading batches.."
    self.bdatas = []

    for i in self.ids:
      d = Dat ()
      d.read (os.path.join (self.root, str(i) + '.DAT'))

      self.bdatas.append (d.bdata)

    # set up datastream
    print "mkms: setting up stream for %s.." % self.station,
    self.st = Stream ()
    for bd in self.bdatas:
      for b in bd.batches:
        s = Stats ()
        s.sampling_rate = self.sampling_rate
        s.npts = b.length
        s.network = self.network
        s.location = self.location
        s.station = self.station
        s.channel = self.channel
        s.starttime = UTCDateTime ((b.ref / 1000000.0))

        t = Trace (data = numpy.array (b.samples_i, dtype = numpy.int32), header = s)
        self.st.append (t)

    print "done."

    # generate file name
    self.name = self.st[0].id.replace ('.', '_')

    self.start = self.st[0].stats.starttime
    self.name = self.start.strftime ("%Y-%m-%d-%H%M-%S") + '.' + self.name

    if self.optplot:
      self.plot ()

    if not self.optnowrite:
      print "mkms: writing %s.mseed.." % self.name,

      if not os.path.exists (self.destdir):
        os.makedirs (self.destdir)

      self.st.write (os.path.join (self.destdir, self.name + '.mseed'), format = 'MSEED', encoding = 'INT32', byteorder = 1, flush = 1, verbose = 0)

      print "done."

      # write ids and refs
      idsf = open (os.path.join (self.destdir, self.name + '.ids'), 'w')
      refsf = open (os.path.join (self.destdir, self.name + '.refs'), 'w')
      for bd in self.bdatas:
        idsf.write ("%d,%d\n" % (bd.id, 1 if bd.e_sdlag else 0))

        for b in bd.batches:
          refsf.write ("%d,%d,%d,%d,%s,%s,%s,%s,%s,%d\n" % (bd.id, b.no, b.ref, b.status, b.latitude[:-2], b.latitude[-2:], b.longitude[:-2], b.longitude[-2:], b.checksum, 1 if b.checksum_pass else 0))

      idsf.close ()
      refsf.close ()

      return (self.name + '.mseed', idsf, refsf)
    else:
      print "mkms: would write %s.mseed (disabled)." % os.path.join (self.destdir, self.name)
      return None
Пример #7
0
        thetacount = thetacount + 1
        # store stats
        stationname = sta + '%04d' % i
        channelnameN = cha + '%s' % 'N'
        channelnameE = cha + '%s' % 'E'

        # for NS components
        statsN = Stats()
        statsN.sampling_rate = 1.0 / sampling_rate_x
        statsN.delta = sampling_rate_x
        statsN.starttime = starttime
        statsN.npts = len(traceN.data)
        statsN.network = net
        statsN.station = stationname
        statsN.location = ''
        statsN.channel = channelnameN
        traceN.stats = statsN
        traceN.stats.sac = obspy.core.AttribDict()
        traceN.stats.sac.back_azimuth = plottheta  # use this as azimuth of station

        #---applying filters---#
        traceN.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
        tN = traceN.stats.starttime
        traceN.trim(starttime=tN, endtime=tN + trim_end_time)
        traceN.taper(0.05, side='right')

        #----------------------#
        stemp += Stream(traceN)
        # for EW components
        statsE = Stats()
        statsE.sampling_rate = 1.0 / sampling_rate_z
Пример #8
0
    def parseISF(self, isf_data, header_only=None, convert=None):
        """
        Determine starting point of data.
        Header has something like 'CURVE #520000', where ascii '5' is the
        length of the length field '20000'.

        ::

            'CURVE #520000xxxx...'
              ^      ^^    ^
              |      ||    +---- data_loc:    location of first valid byte of data
              |      |+--------- len_loc:     location of first byte of data length field
              |      +---------- len_len_loc: location of length of length
              +----------------- tag_loc:     location of start tag
        """
        start_tag = 'CURVE #'
        tag_loc = int(isf_data.find(start_tag))
        len_len_loc = tag_loc + len(start_tag)
        len_loc = len_len_loc + 1
        len_len = int(isf_data[len_len_loc]) # e.g. 5
        data_loc = len_loc + len_len
        data_len = int(isf_data[len_loc:len_loc+len_len]) # e.g. 20000

        # Extract and parse header
        header = isf_data[:tag_loc]
        # Reformat the header into a dictionary
        header_dict = {}
        items = header.replace(':WFMPRE:','').replace(':','').split(';') # list of "key value" pairs
        for item in items:
            if(item):
                key, value = item.split(' ', 1) # maxsplit 1 to ignore subsequent spaces in value
                value = value.replace('"', '')
                header_dict[key] = value

        if(header_only):
            return header_dict

        # Extract data and convert from string to integer value
        data = isf_data[data_loc:]

        stats = Stats()
        stats.npts = int(header_dict['NR_PT'])
        stats.calib = float(header_dict['YMULT'])
        byte_order = header_dict['BYT_OR'] # 'MSB' or 'LSB'

        if(byte_order == 'MSB'):
            byte_order = '>'
        else:
            byte_order = '<'
        points = []
        for i in range(0, stats.npts*2, 2):
            value = data[i:i+2] # as string
            converted = struct.unpack('%sh' % byte_order, value)[0]
            points.append(converted)

        # Optionally convert points to engineering units
        if(convert):
            try:
                points = np.array(points) * stats.calib  # requires numpy
            except NameError:
                # If numpy not available, use list instead.
                p = []
                for point in points:
                    p.append(point * stats.calib)
                points = p
        stats.time_offset = float(header_dict['XZERO'])
        stats.calib_unit = header_dict['YUNIT']
        stats.delta = float(header_dict['XINCR'])
        stats.amp_offset = float(header_dict['YOFF'])
        stats.comments = header_dict['WFID']
        stats.channel = header_dict['WFID'][0:3]

        return stats, points