Example #1
0
def flex_cut_stream(st, cut_start, cut_end, dynamic_npts=0):
    """
    Flexible cut stream. But checks for the time.

    :param st: input stream
    :param cut_start: cut starttime
    :param cut_end: cut endtime
    :param dynamic_npts: the dynamic number of points before cut_start
        and after
        cut_end
    :return: the cutted stream
    """
    if not isinstance(st, Stream):
        raise TypeError("flex_cut_stream method only accepts obspy.Stream "
                        "the first Argument")
    new_st = Stream()
    count = 0
    for tr in st:
        flex_cut_trace(tr, cut_start, cut_end, dynamic_npts=dynamic_npts)
        # throw out small piece of data at this step
        if tr.stats.starttime <= cut_start and tr.stats.endtime >= cut_end:
            new_st.append(tr)
            count += 1
    if count == 0:
        raise ValueError("None of traces in Stream satisfy the "
                         "cut time length")
    return new_st
Example #2
0
File: plot.py Project: avuan/obspy
def main():
    parser = OptionParser(__doc__.strip(), version="%prog " + __version__)
    parser.add_option("-f", default=None, type="string",
                      dest="format", help="Waveform format.")
    parser.add_option("-o", "--outfile", default=None, type="string",
                      dest="outfile", help="Output filename.")
    parser.add_option("-n", "--no-automerge", default=True, dest="automerge",
                      action="store_false",
                      help="Disable automatic merging of matching channels.")

    (options, args) = parser.parse_args()

    # Print help and exit if no arguments are given
    if len(args) == 0:
        parser.print_help()
        raise SystemExit()

    if options.outfile is not None:
        import matplotlib
        matplotlib.use("agg")

    st = Stream()
    for arg in args:
        st += read(arg, format=options.format)
    st.plot(outfile=options.outfile, automerge=options.automerge)
Example #3
0
    def test_casted_stats_nscl_writes_to_mseed(self):
        """
        Ensure a Stream object that has had its nslc types cast to str can
        still be written.
        """
        st = Stream(traces=read()[0])

        # Get a new stats object with just the basic items in it
        stats_items = set(Stats())
        new_stats = Stats()
        new_stats.__dict__.update({x: st[0].stats[x] for x in stats_items})
        with warnings.catch_warnings(record=True):
            new_stats.network = 1
            new_stats.station = 1.1
        new_stats.channel = 'Non'
        st[0].stats = new_stats
        # try writing stream to bytes buffer
        bio = io.BytesIO()
        st.write(bio, 'mseed')
        bio.seek(0)
        # read bytes and compare
        stt = read(bio)
        # remove _mseed so streams can compare equal
        stt[0].stats.pop('mseed')
        del stt[0].stats._format  # format gets added upon writing
        self.assertEqual(st, stt)
 def test_writeAndReadDifferentEncodings(self):
     """
     Writes and read a file with different encoding via the obspy.core
     methods.
     """
     npts = 1000
     np.random.seed(815)  # make test reproducable
     data = np.random.randn(npts).astype('float64') * 1e3 + .5
     st = Stream([Trace(data=data)])
     # Loop over some record lengths.
     for encoding, value in ENCODINGS.iteritems():
         seed_dtype = value[2]
         with NamedTemporaryFile() as tf:
             tempfile = tf.name
             # Write it once with the encoding key and once with the value.
             st[0].data = data.astype(seed_dtype)
             st.verify()
             st.write(tempfile, format="MSEED", encoding=encoding)
             st2 = read(tempfile)
             del st2[0].stats.mseed
             np.testing.assert_array_equal(st[0].data, st2[0].data)
             del st2
             ms = _MSStruct(tempfile)
             ms.read(-1, 1, 1, 0)
             self.assertEqual(ms.msr.contents.encoding, encoding)
             del ms  # for valgrind
Example #5
0
def main(argv=None):
    parser = ArgumentParser(prog='obspy-print', description=__doc__.strip())
    parser.add_argument('-V', '--version', action='version',
                        version='%(prog)s ' + __version__)
    parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'],
                        help='Waveform format (slightly faster if specified).')
    parser.add_argument('-n', '--no-merge', action='store_false',
                        dest='merge', help='Switch off cleanup merge.')
    parser.add_argument('-g', '--print-gaps', action='store_true',
                        help='Switch on printing of gap information.')
    parser.add_argument('files', nargs='+',
                        help='Files to process.')

    # Deprecated arguments
    action = _DeprecatedArgumentAction('--nomerge',
                                       '--no-merge',
                                       real_action='store_false')
    parser.add_argument('--nomerge', nargs=0, action=action, dest='merge',
                        help=SUPPRESS)

    args = parser.parse_args(argv)

    st = Stream()
    for f in args.files:
        st += read(f, format=args.format)
    if args.merge:
        st.merge(-1)
    print(st)
    if args.print_gaps:
        print()
        st.printGaps()
Example #6
0
def sum_adjoint_with_weighting(adj_stream, meta_info, weight_dict):
    new_stream = Stream()
    new_meta = {}
    done_comps = []
    # sum using components weight
    for comp, comp_weights in weight_dict.iteritems():
        for chan_id, chan_weight in comp_weights.iteritems():
            if comp not in done_comps:
                done_comps.append(comp)
                adj_tr = adj_stream.select(id=chan_id)[0]
                comp_tr = adj_tr.copy()
                comp_tr.data *= chan_weight
                comp_tr.stats.location = ""
                comp_tr.stats.channel = comp
                new_stream.append(comp_tr)
                new_meta[comp_tr.id] = meta_info[adj_tr.id].copy()
                new_meta[comp_tr.id]["misfit"] = \
                    chan_weight * meta_info[adj_tr.id]["misfit"]
            else:
                adj_tr = adj_stream.select(id=chan_id)[0]
                comp_tr = new_stream.select(channel="*%s" % comp)[0]
                comp_tr.data += chan_weight * adj_tr.data
                new_meta[comp_tr.id]["misfit"] += \
                    chan_weight * meta_info[adj_tr.id]["misfit"]
    return new_stream, new_meta
Example #7
0
def sum_adjoint_no_weighting(adj_stream, meta_info):
    """
    Add same components in adjoint source together without
    extra weight, i.e., equal weight.

    :param adj_stream:
    :param meta_info:
    :return:
    """
    new_stream = Stream()
    new_meta = {}
    done_comps = []
    for tr in adj_stream:
        comp = tr.stats.channel[-1]
        # print(comp, done_comps)
        if comp not in done_comps:
            done_comps.append(comp)
            comp_tr = tr.copy()
            comp_tr.stats.location = ""
            comp_tr.stats.channel = "MX" + comp
            new_stream.append(comp_tr)
            new_meta[comp_tr.id] = deepcopy(meta_info[tr.id])
        else:
            comp_tr = new_stream.select(component=comp)[0]
            comp_tr.data += tr.data
            new_meta[comp_tr.id]["misfit"] += meta_info[tr.id]["misfit"]

    return new_stream, new_meta
Example #8
0
 def test_mergePreviews2(self):
     """
     Test case for issue #84.
     """
     # Note: explicitly creating np.ones instead of np.empty in order to
     # prevent NumPy warnings related to max function
     tr1 = Trace(data=np.ones(2880))
     tr1.stats.starttime = UTCDateTime("2010-01-01T00:00:00.670000Z")
     tr1.stats.delta = 30.0
     tr1.stats.preview = True
     tr1.verify()
     tr2 = Trace(data=np.ones(2881))
     tr2.stats.starttime = UTCDateTime("2010-01-01T23:59:30.670000Z")
     tr2.stats.delta = 30.0
     tr2.stats.preview = True
     tr2.verify()
     st1 = Stream([tr1, tr2])
     st1.verify()
     # merge
     st2 = merge_previews(st1)
     st2.verify()
     # check
     self.assertTrue(st2[0].stats.preview)
     self.assertEqual(st2[0].stats.starttime, tr1.stats.starttime)
     self.assertEqual(st2[0].stats.endtime, tr2.stats.endtime)
     self.assertEqual(st2[0].stats.npts, 5760)
     self.assertEqual(len(st2[0]), 5760)
Example #9
0
 def test_read_and_write_via_obspy(self):
     """
     Read and Write files via L{obspy.Stream}
     """
     # read trace
     tr = read(self.file)[0]
     # write comparison trace
     st2 = Stream()
     st2.traces.append(Trace())
     tr2 = st2[0]
     tr2.data = copy.deepcopy(tr.data)
     tr2.stats = copy.deepcopy(tr.stats)
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st2.write(tempfile, format='SAC')
         # read comparison trace
         tr3 = read(tempfile)[0]
     # check if equal
     self.assertEqual(tr3.stats['station'], tr.stats['station'])
     self.assertEqual(tr3.stats.npts, tr.stats.npts)
     self.assertEqual(tr.stats['sampling_rate'], tr.stats['sampling_rate'])
     self.assertEqual(tr.stats.get('channel'), tr.stats.get('channel'))
     self.assertEqual(tr.stats.get('starttime'), tr.stats.get('starttime'))
     self.assertEqual(tr.stats.sac.get('nvhdr'), tr.stats.sac.get('nvhdr'))
     np.testing.assert_equal(tr.data, tr3.data)
Example #10
0
def main(argv=None):
    parser = ArgumentParser(prog="obspy-plot", description=__doc__.strip())
    parser.add_argument("-V", "--version", action="version", version="%(prog)s " + __version__)
    parser.add_argument("-f", "--format", choices=ENTRY_POINTS["waveform"], help="Waveform format.")
    parser.add_argument("-o", "--outfile", help="Output filename.")
    parser.add_argument(
        "-n",
        "--no-automerge",
        dest="automerge",
        action="store_false",
        help="Disable automatic merging of matching channels.",
    )
    parser.add_argument(
        "--full",
        dest="full",
        action="store_true",
        help="Disable min/max-plot, i.e. always plot every "
        'single sample (Stream.plot(..., method="full"), '
        "for interactive zooming).",
    )
    parser.add_argument("files", nargs="+", help="Files to plot.")
    args = parser.parse_args(argv)

    if args.outfile is not None:
        MatplotlibBackend.switch_backend("AGG", sloppy=False)

    st = Stream()
    for f in args.files:
        st += read(f, format=args.format)
    kwargs = {"outfile": args.outfile, "automerge": args.automerge}
    if args.full:
        kwargs["method"] = "full"
    st.plot(**kwargs)
Example #11
0
File: plot.py Project: Brtle/obspy
def main(argv=None):
    parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip())
    parser.add_argument('-V', '--version', action='version',
                        version='%(prog)s ' + __version__)
    parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'],
                        help='Waveform format.')
    parser.add_argument('-o', '--outfile',
                        help='Output filename.')
    parser.add_argument('-n', '--no-automerge', dest='automerge',
                        action='store_false',
                        help='Disable automatic merging of matching channels.')
    parser.add_argument('--full', dest='full', action='store_true',
                        help='Disable min/max-plot, i.e. always plot every '
                             'single sample (Stream.plot(..., method="full"), '
                             'for interactive zooming).')
    parser.add_argument('files', nargs='+',
                        help='Files to plot.')
    args = parser.parse_args(argv)

    if args.outfile is not None:
        MatplotlibBackend.switch_backend("AGG", sloppy=False)

    st = Stream()
    for f in args.files:
        st += read(f, format=args.format)
    kwargs = {"outfile": args.outfile,
              "automerge": args.automerge}
    if args.full:
        kwargs['method'] = "full"
    st.plot(**kwargs)
Example #12
0
 def test_writeIntegersViaObsPy(self):
     """
     Write file test via L{obspy.Trace}.
     """
     npts = 1000
     # data cloud of integers - float won't work!
     np.random.seed(815)  # make test reproducable
     data = np.random.randint(-1000, 1000, npts).astype("int32")
     stats = {
         "network": "BW",
         "station": "TEST",
         "location": "",
         "channel": "EHE",
         "npts": npts,
         "sampling_rate": 200.0,
     }
     start = UTCDateTime(2000, 1, 1)
     stats["starttime"] = start
     tr = Trace(data=data, header=stats)
     st = Stream([tr])
     st.verify()
     # write
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st.write(tempfile, format="GSE2")
         # read again
         stream = read(tempfile)
     stream.verify()
     np.testing.assert_equal(data, stream[0].data)
     # test default attributes
     self.assertEqual("CM6", stream[0].stats.gse2.datatype)
     self.assertEqual(-1, stream[0].stats.gse2.vang)
     self.assertEqual(1.0, stream[0].stats.gse2.calper)
     self.assertEqual(1.0, stream[0].stats.calib)
Example #13
0
def sum_adj_on_component(adj_stream, weight_dict):
    """
    Sum adjoint source on different channels but same component
    together, like "II.AAK.00.BHZ" and "II.AAK.10.BHZ" to form
    "II.AAK.BHZ"

    :param adj_stream: adjoint source stream
    :param weight_dict: weight dictionary, should be something like
        {"Z":{"II.AAK.00.BHZ": 0.5, "II.AAK.10.BHZ": 0.5},
         "R":{"II.AAK.00.BHR": 0.3, "II.AAK.10.BHR": 0.7},
         "T":{"II.AAK..BHT": 1.0}}
    :return: summed adjoint source stream
    """
    new_stream = Stream()
    done_comps = []
    for comp, comp_weights in weight_dict.iteritems():
        for chan_id, chan_weight in comp_weights.iteritems():
            if comp not in done_comps:
                done_comps.append(comp)
                comp_tr = adj_stream.select(id=chan_id)[0]
                comp_tr.data *= chan_weight
                comp_tr.stats.location = ""
            else:
                comp_tr.data += \
                    chan_weight * adj_stream.select(id=chan_id)[0].data
        new_stream.append(comp_tr)
    return new_stream
Example #14
0
    def test_writing_blockette_100(self):
        """
        Tests that blockette 100 is written correctly. It is only used if
        the sampling rate is higher than 32727 Hz or smaller than 1.0 /
        32727.0 Hz.
        """
        # Three traces, only the middle one needs it.
        tr = Trace(data=np.linspace(0, 100, 101))
        st = Stream(traces=[tr.copy(), tr.copy(), tr.copy()])

        st[1].stats.sampling_rate = 60000.0

        with io.BytesIO() as buf:
            st.write(buf, format="mseed")
            buf.seek(0, 0)
            st2 = read(buf)

        self.assertTrue(np.allclose(st[0].stats.sampling_rate, st2[0].stats.sampling_rate))
        self.assertTrue(np.allclose(st[1].stats.sampling_rate, st2[1].stats.sampling_rate))
        self.assertTrue(np.allclose(st[2].stats.sampling_rate, st2[2].stats.sampling_rate))

        st[1].stats.sampling_rate = 1.0 / 60000.0

        with io.BytesIO() as buf:
            st.write(buf, format="mseed")
            buf.seek(0, 0)
            st2 = read(buf)

        self.assertTrue(np.allclose(st[0].stats.sampling_rate, st2[0].stats.sampling_rate))
        self.assertTrue(np.allclose(st[1].stats.sampling_rate, st2[1].stats.sampling_rate))
        self.assertTrue(np.allclose(st[2].stats.sampling_rate, st2[2].stats.sampling_rate))
 def test_writeAndReadDifferentRecordLengths(self):
     """
     Tests Mini-SEED writing and record lengths.
     """
     # libmseed instance.
     npts = 6000
     np.random.seed(815)  # make test reproducable
     data = np.random.randint(-1000, 1000, npts).astype('int32')
     st = Stream([Trace(data=data)])
     record_lengths = [256, 512, 1024, 2048, 4096, 8192]
     # Loop over some record lengths.
     for rec_len in record_lengths:
         # Write it.
         with NamedTemporaryFile() as tf:
             tempfile = tf.name
             st.write(tempfile, format="MSEED", reclen=rec_len)
             # Get additional header info
             info = util.getRecordInformation(tempfile)
             # Test reading the two files.
             temp_st = read(tempfile)
         np.testing.assert_array_equal(data, temp_st[0].data)
         del temp_st
         # Check record length.
         self.assertEqual(info['record_length'], rec_len)
         # Check if filesize is a multiple of the record length.
         self.assertEqual(info['filesize'] % rec_len, 0)
Example #16
0
 def test_writeIntegersViaObsPy(self):
     """
     Write file test via L{obspy.Trace}.
     """
     npts = 1000
     # data cloud of integers - float won't work!
     np.random.seed(815)  # make test reproducable
     data = np.random.randint(-1000, 1000, npts).astype(np.int32)
     stats = {'network': 'BW', 'station': 'TEST', 'location': '',
              'channel': 'EHE', 'npts': npts, 'sampling_rate': 200.0}
     start = UTCDateTime(2000, 1, 1)
     stats['starttime'] = start
     tr = Trace(data=data, header=stats)
     st = Stream([tr])
     st.verify()
     # write
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st.write(tempfile, format="GSE2")
         # read again
         stream = read(tempfile)
     stream.verify()
     np.testing.assert_equal(data, stream[0].data)
     # test default attributes
     self.assertEqual('CM6', stream[0].stats.gse2.datatype)
     self.assertEqual(-1, stream[0].stats.gse2.vang)
     self.assertEqual(1.0, stream[0].stats.gse2.calper)
     self.assertEqual(1.0, stream[0].stats.calib)
 def test_allDataTypesAndEndiansInSingleFile(self):
     """
     Tests all data and endian types into a single file.
     """
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st1 = Stream()
         data = np.random.randint(-1000, 1000, 500)
         for dtype in ["i2", "i4", "f4", "f8", "S1"]:
             for enc in ["<", ">", "="]:
                 st1.append(Trace(data=data.astype(np.dtype(enc + dtype))))
         # this will raise a UserWarning - ignoring for test
         with warnings.catch_warnings(record=True):
             warnings.simplefilter('ignore', UserWarning)
             st1.write(tempfile, format="MSEED")
             # read everything back (int16 gets converted into int32)
             st2 = read(tempfile)
             for dtype in ["i4", "i4", "f4", "f8", "S1"]:
                 for enc in ["<", ">", "="]:
                     tr = st2.pop(0).data
                     self.assertEqual(tr.dtype.kind +
                                      str(tr.dtype.itemsize),
                                      dtype)
                     # byte order is always native (=)
                     np.testing.assert_array_equal(tr,
                                                   data.astype("=" + dtype))
 def test_Header(self):
     """
     Tests whether the header is correctly written and read.
     """
     np.random.seed(815)  # make test reproducable
     data = np.random.randint(-1000, 1000, 50).astype('int32')
     stats = {'network': 'BW', 'station': 'TEST', 'location': 'A',
              'channel': 'EHE', 'npts': len(data), 'sampling_rate': 200.0,
              'mseed': {'record_length': 512, 'encoding': 'STEIM2',
                        'filesize': 512, 'dataquality': 'D',
                        'number_of_records': 1, 'byteorder': '>'}}
     stats['starttime'] = UTCDateTime(2000, 1, 1)
     st = Stream([Trace(data=data, header=stats)])
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         # Write it.
         st.write(tempfile, format="MSEED")
         # Read it again and delete the temporary file.
         stream = read(tempfile)
     stream.verify()
     # Loop over the attributes to be able to assert them because a
     # dictionary is not a stats dictionary.
     # This also assures that there are no additional keys.
     for key in stats.keys():
         self.assertEqual(stats[key], stream[0].stats[key])
Example #19
0
 def test_write_stream_via_obspy(self):
     """
     Write streams, i.e. multiple files via obspy.core.Trace
     """
     testdata = np.array([111, 111, 111, 111, 111, 109, 106, 103, 103,
                          110, 121, 132, 139])
     with NamedTemporaryFile() as fh:
         testfile = fh.name
         self.file = os.path.join(self.path, '3cssan.reg.8.1.RNON.wav')
         tr = read(self.file, format='WAV')[0]
         np.testing.assert_array_equal(tr.data[:13], testdata)
         # write
         st2 = Stream([Trace(), Trace()])
         st2[0].data = tr.data.copy()       # copy the data
         st2[1].data = tr.data.copy() // 2  # be sure data are different
         st2.write(testfile, format='WAV', framerate=7000)
         # read without giving the WAV format option
         base, ext = os.path.splitext(testfile)
         testfile0 = "%s%03d%s" % (base, 0, ext)
         testfile1 = "%s%03d%s" % (base, 1, ext)
         tr30 = read(testfile0)[0]
         tr31 = read(testfile1)[0]
         self.assertEqual(tr30.stats, tr.stats)
         self.assertEqual(tr31.stats, tr.stats)
         np.testing.assert_array_equal(tr30.data[:13], testdata)
         np.testing.assert_array_equal(tr31.data[:13], testdata // 2)
         os.remove(testfile0)
         os.remove(testfile1)
 def test_writeAndReadDifferentEncodings(self):
     """
     Writes and read a file with different encoding via the obspy.core
     methods.
     """
     npts = 1000
     np.random.seed(815)  # make test reproducable
     data = np.random.randn(npts).astype('float64') * 1e3 + .5
     st = Stream([Trace(data=data)])
     # Loop over some record lengths.
     for encoding, value in ENCODINGS.iteritems():
         seed_dtype = value[2]
         # Special handling for the ASCII dtype. NumPy 1.7 changes the
         # default dtype of numpy.string_ from "|S1" to "|S32". Enforce
         # "|S1|" here to be consistent across NumPy versions.
         if encoding == 0:
             seed_dtype = "|S1"
         with NamedTemporaryFile() as tf:
             tempfile = tf.name
             # Write it once with the encoding key and once with the value.
             st[0].data = data.astype(seed_dtype)
             st.verify()
             st.write(tempfile, format="MSEED", encoding=encoding)
             st2 = read(tempfile)
             del st2[0].stats.mseed
             np.testing.assert_array_equal(st[0].data, st2[0].data,
                 "Arrays are not equal for encoding '%s'" %
                 ENCODINGS[encoding][0])
             del st2
             ms = _MSStruct(tempfile)
             ms.read(-1, 1, 1, 0)
             self.assertEqual(ms.msr.contents.encoding, encoding)
             del ms  # for valgrind
Example #21
0
def get_waveforms():
    events = get_events()[::-1]
    client = Client('GFZ')
    stream_raw = Stream()
    stream = RFStream()
    coords = inventory.get_coordinates(seedid[:-1] + 'Z')
    for i, event in enumerate(events):
        t = event.preferred_origin().time
        args = seedid.split('.') + [t + 4.9 * 60, t + 14.1 * 60]
        s = client.get_waveforms(*args)
        s.trim(t+5*60, t+14*60)
        s.decimate(int(round(s[0].stats.sampling_rate)) // 5, no_filter=True)
        stream_raw.extend(s)
        if i in (0, 2, 4):
            s = s.copy()
            stats = rfstats(station=coords, event=event, dist_range=(20, 95))
            if stats is None:
                continue
            s.trim(stats.onset - 25, stats.onset + 75)
            stats = obj2stats(station=coords, event=event)
            s = RFStream(s)
            for tr in s:
                tr.stats.update(stats)
            stream.extend(s)
    stream_raw.write(wavname, 'MSEED')
    stream.write(wavname2, 'SAC')
Example #22
0
 def test_readAndWriteViaObsPy(self):
     """
     Read and Write files via L{obspy.Stream}
     """
     # read trace
     tr = read(self.file)[0]
     # write comparison trace
     st2 = Stream()
     st2.traces.append(Trace())
     tr2 = st2[0]
     tr2.data = copy.deepcopy(tr.data)
     tr2.stats = copy.deepcopy(tr.stats)
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st2.write(tempfile, format="SAC")
         # read comparison trace
         tr3 = read(tempfile)[0]
     # check if equal
     self.assertEqual(tr3.stats["station"], tr.stats["station"])
     self.assertEqual(tr3.stats.npts, tr.stats.npts)
     self.assertEqual(tr.stats["sampling_rate"], tr.stats["sampling_rate"])
     self.assertEqual(tr.stats.get("channel"), tr.stats.get("channel"))
     self.assertEqual(tr.stats.get("starttime"), tr.stats.get("starttime"))
     self.assertEqual(tr.stats.sac.get("nvhdr"), tr.stats.sac.get("nvhdr"))
     np.testing.assert_equal(tr.data, tr3.data)
Example #23
0
    def normalize(self, method='trace_max', **kwargs):
        """
        Normalizes all trace in the stream.
        """
        logging.info("Normalizing {:} traces with method '{:}'...",
                len(self.traces), method)
        if method == 'trace_max':
            ObspyStream.normalize(self, global_max=False)
        elif method == 'global_max':
            ObspyStream.normalize(self, global_max=True)
        elif method == 'onebit':
            for tr in self.traces:
                tr.data = np.sign(tr.data)
        elif method == 'stalta':
            _apply = kwargs.get('apply', True)
            sta = kwargs.get('sta', 3.)
            lta = kwargs.get('lta', 10.)
            trigger_on = kwargs.get('trigger_on', 1.1)
            trigger_off = kwargs.get('trigger_off', 1.0)
            for tr in self.traces:
                df = tr.stats['sampling_rate']
                _sta = int(sta * df)
                _lta = int(lta * df)

                cft = trigger.recSTALTA(tr.data, _sta, _lta)
                tr.trg = trigger.triggerOnset(cft, trigger_on, trigger_off)
                
                if _apply:
                    for on, off in tr.trg:
                        tr.data[on:off] = 0
        else:
            raise ValueError("Unknown method '{:}'".format(method))
Example #24
0
 def test_readAndWriteViaObsPy(self):
     """
     Read and Write files via L{obspy.Trace}
     """
     gse2file = os.path.join(self.path, "data", "loc_RNON20040609200559.z")
     # read trace
     st1 = read(gse2file)
     st1.verify()
     tr1 = st1[0]
     # write comparison trace
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st2 = Stream()
         st2.traces.append(Trace())
         tr2 = st2[0]
         tr2.data = copy.deepcopy(tr1.data)
         tr2.stats = copy.deepcopy(tr1.stats)
         st2.write(tempfile, format="GSE2")
         # read comparison trace
         st3 = read(tempfile)
     st3.verify()
     tr3 = st3[0]
     # check if equal
     self.assertEqual(tr3.stats["station"], tr1.stats["station"])
     self.assertEqual(tr3.stats.npts, tr1.stats.npts)
     self.assertEqual(tr3.stats["sampling_rate"], tr1.stats["sampling_rate"])
     self.assertEqual(tr3.stats.get("channel"), tr1.stats.get("channel"))
     self.assertEqual(tr3.stats.get("starttime"), tr1.stats.get("starttime"))
     self.assertEqual(tr3.stats.get("calib"), tr1.stats.get("calib"))
     self.assertEqual(tr3.stats.gse2.get("vang"), tr1.stats.gse2.get("vang"))
     self.assertEqual(tr3.stats.gse2.get("calper"), tr1.stats.gse2.get("calper"))
     np.testing.assert_equal(tr3.data, tr1.data)
Example #25
0
 def test_convert_to_sac(self):
     """
     Test that an obspy trace is correctly written to SAC.
     All the header variables which are tagged as required by
     https://ds.iris.edu/files/sac-manual/manual/file_format.html
     are controlled in this test
     """
     # setUp is called before every test, not only once at the
     # beginning, that is we allocate the data just here
     # generate artificial mseed data
     np.random.seed(815)
     head = {'network': 'NL', 'station': 'HGN', 'location': '00',
             'channel': 'BHZ', 'calib': 1.0, 'sampling_rate': 40.0,
             'starttime': UTCDateTime(2003, 5, 29, 2, 13, 22, 43400)}
     data = np.random.randint(0, 5000, 11947).astype(np.int32)
     st = Stream([Trace(header=head, data=data)])
     # write them as SAC
     with NamedTemporaryFile() as tf:
         tmpfile = tf.name
         st.write(tmpfile, format="SAC")
         st2 = read(tmpfile, format="SAC")
     # check all the required entries (see url in docstring)
     self.assertEqual(st2[0].stats.starttime, st[0].stats.starttime)
     self.assertEqual(st2[0].stats.npts, st[0].stats.npts)
     self.assertEqual(st2[0].stats.sac.nvhdr, 6)
     self.assertAlmostEqual(st2[0].stats.sac.b, 0.000400)
     # compare with correct digit size (nachkommastellen)
     self.assertAlmostEqual((0.0004 + (st[0].stats.npts - 1) *
                            st[0].stats.delta) / st2[0].stats.sac.e, 1.0)
     self.assertEqual(st2[0].stats.sac.iftype, 1)
     self.assertEqual(st2[0].stats.sac.leven, 1)
     self.assertAlmostEqual(st2[0].stats.sampling_rate /
                            st[0].stats.sampling_rate, 1.0)
Example #26
0
class TemporarySDSDirectory(object):
    """
    Handles creation and deletion of a temporary SDS directory structure.
    To be used with "with" statement.
    """
    sampling_rate = 0.1
    networks = ("AB", "CD")
    stations = ("XYZ", "ZZZ3")
    locations = ("", "00")
    channels = ("HHZ", "HHN", "HHE", "BHZ", "BHN", "BHE")

    def __init__(self, year, doy, time=None):
        """
        Set which day's midnight (00:00 hours) is used as a day break in the
        testing (to split the test data into two files).

        If `time` is specified it overrides `year` and `doy`.
        """
        if time:
            self.time = time
        else:
            self.time = UTCDateTime("%d-%03dT00:00:00" % (year, doy))
        delta = 1.0 / self.sampling_rate

        self.stream = Stream()
        for net in self.networks:
            for sta in self.stations:
                for loc in self.locations:
                    for cha in self.channels:
                        tr = Trace(
                            data=np.arange(100, dtype=np.int32),
                            header=dict(
                                network=net, station=sta, location=loc,
                                channel=cha, sampling_rate=self.sampling_rate,
                                starttime=self.time - 30 * delta))

                        # cut into two seamless traces
                        tr1 = tr.slice(endtime=self.time + 5 * delta)
                        tr2 = tr.slice(starttime=self.time + 6 * delta)
                        self.stream.append(tr1)
                        self.stream.append(tr2)

    def __enter__(self):
        self.old_dir = os.getcwd()
        self.tempdir = tempfile.mkdtemp(prefix='obspy-sdstest-')
        for tr_ in self.stream:
            t_ = tr_.stats.starttime
            full_path = SDS_FMTSTR.format(year=t_.year, doy=t_.julday,
                                          sds_type="D", **tr_.stats)
            full_path = os.path.join(self.tempdir, full_path)
            dirname, filename = os.path.split(full_path)
            if not os.path.isdir(dirname):
                os.makedirs(dirname)
            tr_.write(full_path, format="MSEED")
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):  # @UnusedVariable
        os.chdir(self.old_dir)
        shutil.rmtree(self.tempdir)
Example #27
0
def plot_synth_real(real_template, synthetic, channels=False):
    r"""Plot multiple channels of data for real data and synthetic.

    :type real_template: obspy.Stream
    :param real_template: Stream of the real template
    :type synthetic: obspy.Stream
    :param synthetic: Stream of synthetic template
    :type channels: list of str
    :param channels: List of tuples of (station, channel) to plot, default is\
            False, which plots all.
    """
    from obspy.signal.cross_correlation import xcorr
    from obspy import Stream
    colours = ['k', 'r']
    labels = ['Real', 'Synthetic']
    if channels:
        real = []
        synth = []
        for stachan in channels:
            real.append(real_template.select(station=stachan[0],
                                             channel=stachan[1]))
            synth.append(synthetic.select(station=stachan[0],
                                          channel=stachan[1]))
        real_template = Stream(real)
        synthetic = Stream(synth)

    # Extract the station and channels
    stachans = list(set([(tr.stats.station, tr.stats.channel)
                         for tr in real_template]))
    fig, axes = plt.subplots(len(stachans), 1, sharex=True, figsize=(5, 10))
    axes = axes.ravel()
    for i, stachan in enumerate(stachans):
        real_tr = real_template.select(station=stachan[0],
                                       channel=stachan[1])[0]
        synth_tr = synthetic.select(station=stachan[0],
                                    channel=stachan[1])[0]
        shift, corr = xcorr(real_tr, synth_tr, 2)
        print('Shifting by: '+str(shift)+' samples')
        if corr < 0:
            synth_tr.data = synth_tr.data * -1
            corr = corr * -1
        if shift < 0:
            synth_tr.data = synth_tr.data[abs(shift):]
            real_tr.data = real_tr.data[0:len(synth_tr.data)]
        elif shift > 0:
            real_tr.data = real_tr.data[abs(shift):]
            synth_tr.data = synth_tr.data[0:len(real_tr.data)]
        for j, tr in enumerate([real_tr, synth_tr]):
            y = tr.data
            y = y / float(max(abs(y)))
            x = np.linspace(0, len(y) * tr.stats.delta, len(y))
            axes[i].plot(x, y, colours[j], linewidth=2.0, label=labels[j])
            axes[i].get_yaxis().set_ticks([])
        ylab = stachan[0]+'.'+stachan[1]+' cc='+str(round(corr, 2))
        axes[i].set_ylabel(ylab, rotation=0)
    plt.subplots_adjust(hspace=0)
    # axes[0].legend()
    axes[-1].set_xlabel('Time (s)')
    plt.show()
Example #28
0
    def add(self, stream, verbose=False):
        """
        Process all traces with compatible information and add their spectral
        estimates to the histogram containing the probabilistic psd.
        Also ensures that no piece of data is inserted twice.

        :type stream: :class:`~obspy.core.stream.Stream` or
                :class:`~obspy.core.trace.Trace`
        :param stream: Stream or trace with data that should be added to the
                probabilistic psd histogram.
        :returns: True if appropriate data were found and the ppsd statistics
                were changed, False otherwise.
        """
        self.__check_ppsd_length()
        # return later if any changes were applied to the ppsd statistics
        changed = False
        # prepare the list of traces to go through
        if isinstance(stream, Trace):
            stream = Stream([stream])
        # select appropriate traces
        stream = stream.select(id=self.id,
                               sampling_rate=self.sampling_rate)
        # save information on available data and gaps
        self.__insert_data_times(stream)
        self.__insert_gap_times(stream)
        # merge depending on skip_on_gaps set during __init__
        stream.merge(self.merge_method, fill_value=0)

        for tr in stream:
            # the following check should not be necessary due to the select()..
            if not self.__sanity_check(tr):
                msg = "Skipping incompatible trace."
                warnings.warn(msg)
                continue
            t1 = tr.stats.starttime
            t2 = tr.stats.endtime
            while t1 + self.ppsd_length <= t2:
                if self.__check_time_present(t1):
                    msg = "Already covered time spans detected (e.g. %s), " + \
                          "skipping these slices."
                    msg = msg % t1
                    warnings.warn(msg)
                else:
                    # throw warnings if trace length is different
                    # than ppsd_length..!?!
                    slice = tr.slice(t1, t1 + self.ppsd_length)
                    # XXX not good, should be working in place somehow
                    # XXX how to do it with the padding, though?
                    success = self.__process(slice)
                    if success:
                        self.__insert_used_time(t1)
                        if verbose:
                            print(t1)
                        changed = True
                t1 += (1 - self.overlap) * self.ppsd_length  # advance

            # enforce time limits, pad zeros if gaps
            # tr.trim(t, t+PPSD_LENGTH, pad=True)
        return changed
Example #29
0
    def write_data(self, path):
        """
        Writes median ground velocity values as ObsPy stream files. Timestamps and
        corresponting values are expected to span a full day. First sample shout be
        at 00:00 and last sample at 00:00 (+1day) - win_long / 2.

        :type path: string
        :param path: data path where output is written to
    
        :return: None
            Writes data and a file containing potentially incorrect values (due to
            data gaps) to the given path/directory.
        """

        # create empty stream
        mgv = Stream()
        # detect gaps is data
        # number of traces, starting indice is stored
        ntr = [0]
        for i in range(self.ts.size - 1):
            # gap if time between timestamps is greater (win_long * overlap)
            if self.ts[i+1] - self.ts[i] > (self.win_long * self.overlap):
                # append starting indice of new trace
                ntr.append(i+1)
        # get data for single traces
        for n in range(len(ntr)):
            # just one trace
            if len(ntr) == 1:
                data = self.Vs
                time = self.ts
            # last trace
            elif n == len(ntr) - 1:
                data = self.Vs[ntr[n]:]
                time = self.ts[ntr[n]:]
            # every other case
            else:
                data = self.Vs[ntr[n]: ntr[n+1]]
                time = self.ts[ntr[n]: ntr[n+1]]
            # create new trace and add data
            new = Trace(data=data)
            new.stats.starttime = UTCDateTime(time[0])
            new.stats.delta = self.win_long * self.overlap
            mgv += new
        # add stats to traces
        for i, tr in enumerate(mgv):
            tr.stats.network = "4D"
            tr.stats.station = self.stn
            tr.stats.channel = self.chn
        # obtain julian day
        julday = UTCDateTime(self.ts[0]).julday
        # write data
        mgv.write(path + "MGV.%s.%s.%03d_%.1f-%.1fHz.mseed" % (self.stn, self.chn, julday, self.fmin, self.fmax), format="MSEED")
        # write file containing errorneous data points. These values have been computed form data containing gaps
        if self.errors is not None:
            hdr = "Potentially incorrect values (timestamp, MGV value) due to \
                   data gaps in the window associated to the timestamps below"
            np.savetxt(path + "MGV_errval_%s.%s.%03d_%.1f-%.1fHz" % (self.stn, self.chn, julday, self.fmin, self.fmax), self.errors, header=hdr)
 def test_SavingSmallASCII(self):
     """
     Tests writing small ASCII strings.
     """
     st = Stream()
     st.append(Trace(data=np.fromstring("A" * 8, "|S1")))
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st.write(tempfile, format="MSEED")
Example #31
0
    def create_trace(self, station_to_cut, mp=False):
        station_to_cut_segments = PH5toMSeed.get_nonrestricted_segments(
            [station_to_cut], self.restricted)
        obspy_stream = Stream()
        for stc in station_to_cut_segments:
            das = self.ph5.query_das_t(stc.das, stc.component,
                                       stc.starttime,
                                       stc.endtime,
                                       stc.sample_rate,
                                       stc.sample_rate_multiplier)

            if not das:
                return

            das = [x for x in das]
            Das_tf = next(iter(das or []), None)
            if Das_tf is None:
                return
            else:
                das_t_start = (float(Das_tf['time/epoch_l']) +
                               float(Das_tf['time/micro_seconds_i']) / 1000000)

            if float(das_t_start) > float(stc.starttime):
                start_time = das_t_start

            else:
                start_time = stc.starttime

            nt = stc.notimecorrect

            if stc.sample_rate > 0:
                actual_sample_rate = float(
                    stc.sample_rate) / float(stc.sample_rate_multiplier)
            else:
                actual_sample_rate = 0

            if stc.sample_rate != 0:
                traces = self.ph5.cut(stc.das, start_time,
                                      stc.endtime,
                                      chan=stc.component,
                                      sample_rate=actual_sample_rate,
                                      apply_time_correction=nt, das_t=das)
            else:
                traces = self.ph5.textural_cut(stc.das,
                                               start_time,
                                               stc.endtime,
                                               chan=stc.component,
                                               das_t=das)
            if not isinstance(traces, list):
                return

            for trace in traces:
                if trace.nsamples == 0:
                    continue
                try:
                    obspy_trace = Trace(data=trace.data)
                except ValueError:
                    continue
                if self.format == "SAC":
                    Receiver_t = \
                        self.ph5.get_receiver_t_by_n_i(stc.receiver_n_i)
                    azimuth = Receiver_t['orientation/azimuth/value_f']
                    dip = Receiver_t['orientation/dip/value_f']
                    obspy_trace.stats.sac = {'kstnm': stc.seed_station,
                                             'kcmpnm': stc.seed_channel,
                                             'knetwk': stc.net_code,
                                             'stla': float(stc.latitude),
                                             'stlo': float(stc.longitude),
                                             'stel': float(stc.elev),
                                             'cmpaz': float(azimuth),
                                             'cmpinc': float(dip)}
                elif self.format == "GEOCSV":
                    Receiver_t = \
                        self.ph5.get_receiver_t_by_n_i(stc.receiver_n_i)
                    azimuth = Receiver_t['orientation/azimuth/value_f']
                    dip = Receiver_t['orientation/dip/value_f']
                    obspy_trace.stats.sensor_type = stc.sensor_type
                    obspy_trace.stats.elevation = float(stc.elev)
                    obspy_trace.stats.dip = float(dip)
                    obspy_trace.stats.depth = 0
                    obspy_trace.stats.back_azimuth = azimuth
                    obspy_trace.stats.experiment_id = stc.experiment_id
                    obspy_trace.stats.component = stc.component
                    obspy_trace.stats.response = self.get_response_obj(stc)
                    obspy_trace.stats.array = stc.array_code
                elif self.format.upper() == "SEGY1" or \
                        self.format.upper() == "SEGY2":
                    # These values are used to create the SEG-Y headers
                    obspy_trace.stats.receiver_id = stc.station
                    obspy_trace.stats.ttype = trace.ttype
                    obspy_trace.stats.byteorder = trace.byteorder
                    obspy_trace.stats.elevation = float(stc.elev)
                    obspy_trace.stats.component = stc.component
                    obspy_trace.stats.response = self.get_response_obj(stc)
                    obspy_trace.stats.array = stc.array_code
                    obspy_trace.stats.das = stc.das
                    obspy_trace.stats.shot_id = stc.shot_id
                    obspy_trace.stats.shot_lat = stc.shot_lat
                    obspy_trace.stats.shot_lng = stc.shot_lng
                    obspy_trace.stats.shot_elevation = stc.shot_elevation
                obspy_trace.stats.sampling_rate = actual_sample_rate
                obspy_trace.stats.location = stc.location
                obspy_trace.stats.station = stc.seed_station
                obspy_trace.stats.coordinates = AttribDict()
                obspy_trace.stats.coordinates.latitude = stc.latitude
                obspy_trace.stats.coordinates.longitude = stc.longitude
                obspy_trace.stats.channel = stc.seed_channel
                obspy_trace.stats.network = stc.net_code
                obspy_trace.stats.starttime = trace.start_time.getFdsnTime()
                if self.decimation:
                    obspy_trace.decimate(int(self.decimation))
                obspy_stream.append(obspy_trace)
        if len(obspy_stream.traces) < 1:
            return

        return obspy_stream
Example #32
0
def _read_su(filename,
             headonly=False,
             byteorder=None,
             unpack_trace_headers=False,
             **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Unix (SU) file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SU file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the header and omit the waveform
        data.
    :type byteorder: str or ``None``
    :param byteorder: Determines the endianness of the file. Either ``'>'`` for
        big endian or ``'<'`` for little endian. If it is ``None``, it will try
        to autodetect the endianness. The endianness is always valid for the
        whole file. Defaults to ``None``.
    :type unpack_trace_headers: bool, optional
    :param unpack_trace_headers: Determines whether or not all trace header
        values will be unpacked during reading. If ``False`` it will greatly
        enhance performance and especially memory usage with large files. The
        header values can still be accessed and will be calculated on the fly
        but tab completion will no longer work. Look in the headers.py for a
        list of all possible trace header values. Defaults to ``False``.
    :returns: A ObsPy :class:`~obspy.core.stream.Stream` object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/1.su_first_trace")
    >>> st #doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  #doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    ... | 2005-12-19T15:07:54.000000Z - ... | 4000.0 Hz, 8000 samples
    """
    # Read file to the internal segy representation.
    su_object = _read_su_file(filename,
                              endian=byteorder,
                              unpack_headers=unpack_trace_headers)

    # Create the stream object.
    stream = Stream()

    # Get the endianness from the first trace.
    endian = su_object.traces[0].endian
    # Loop over all traces.
    for tr in su_object.traces:
        # Create new Trace object for every segy trace and append to the Stream
        # object.
        trace = Trace()
        stream.append(trace)
        # skip data if headonly is set
        if headonly:
            trace.stats.npts = tr.npts
        else:
            trace.data = tr.data
        trace.stats.su = AttribDict()
        # If all values will be unpacked create a normal dictionary.
        if unpack_trace_headers:
            # Add the trace header as a new attrib dictionary.
            header = AttribDict()
            for key, value in tr.header.__dict__.items():
                setattr(header, key, value)
        # Otherwise use the LazyTraceHeaderAttribDict.
        else:
            # Add the trace header as a new lazy attrib dictionary.
            header = LazyTraceHeaderAttribDict(tr.header.unpacked_header,
                                               tr.header.endian)
        trace.stats.su.trace_header = header
        # Also set the endianness.
        trace.stats.su.endian = endian
        # The sampling rate should be set for every trace. It is a sample
        # interval in microseconds. The only sanity check is that is should be
        # larger than 0.
        tr_header = trace.stats.su.trace_header
        if tr_header.sample_interval_in_ms_for_this_trace > 0:
            trace.stats.delta = \
                float(tr.header.sample_interval_in_ms_for_this_trace) / \
                1E6
        # If the year is not zero, calculate the start time. The end time is
        # then calculated from the start time and the sampling rate.
        # 99 is often used as a placeholder.
        if tr_header.year_data_recorded > 0:
            year = tr_header.year_data_recorded
            # The SEG Y rev 0 standard specifies the year to be a 4 digit
            # number.  Before that it was unclear if it should be a 2 or 4
            # digit number. Old or wrong software might still write 2 digit
            # years. Every number <30 will be mapped to 2000-2029 and every
            # number between 30 and 99 will be mapped to 1930-1999.
            if year < 100:
                if year < 30:
                    year += 2000
                else:
                    year += 1900
            julday = tr_header.day_of_year
            julday = tr_header.day_of_year
            hour = tr_header.hour_of_day
            minute = tr_header.minute_of_hour
            second = tr_header.second_of_minute
            trace.stats.starttime = UTCDateTime(year=year,
                                                julday=julday,
                                                hour=hour,
                                                minute=minute,
                                                second=second)
    return stream
Example #33
0
 def test_coincidenceTrigger(self):
     """
     Test network coincidence trigger.
     """
     st = Stream()
     files = ["BW.UH1._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH2._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH4._.EHZ.D.2010.147.cut.slist.gz"]
     for filename in files:
         filename = os.path.join(self.path, filename)
         st += read(filename)
     # some prefiltering used for UH network
     st.filter('bandpass', freqmin=10, freqmax=20)
     # 1. no weighting, no stations specified, good settings
     # => 3 events, no false triggers
     # for the first test we make some additional tests regarding types
     res = coincidenceTrigger("recstalta", 3.5, 1, st.copy(), 3, sta=0.5,
                              lta=10)
     self.assertTrue(isinstance(res, list))
     self.assertTrue(len(res) == 3)
     expected_keys = ['time', 'coincidence_sum', 'duration', 'stations',
                      'trace_ids']
     expected_types = [UTCDateTime, float, float, list, list]
     for item in res:
         self.assertTrue(isinstance(item, dict))
         for key, _type in zip(expected_keys, expected_types):
             self.assertTrue(key in item)
             self.assertTrue(isinstance(item[key], _type))
     self.assertTrue(res[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
     self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
     self.assertTrue(4.2 < res[0]['duration'] < 4.8)
     self.assertTrue(res[0]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
     self.assertTrue(res[0]['coincidence_sum'] == 4)
     self.assertTrue(res[1]['time'] > UTCDateTime("2010-05-27T16:26:59"))
     self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
     self.assertTrue(3.2 < res[1]['duration'] < 3.7)
     self.assertTrue(res[1]['stations'] == ['UH2', 'UH3', 'UH1'])
     self.assertTrue(res[1]['coincidence_sum'] == 3)
     self.assertTrue(res[2]['time'] > UTCDateTime("2010-05-27T16:27:27"))
     self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
     self.assertTrue(4.2 < res[2]['duration'] < 4.4)
     self.assertTrue(res[2]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
     self.assertTrue(res[2]['coincidence_sum'] == 4)
     # 2. no weighting, station selection
     # => 2 events, no false triggers
     trace_ids = ['BW.UH1..SHZ', 'BW.UH3..SHZ', 'BW.UH4..EHZ']
     # ignore UserWarnings
     with warnings.catch_warnings(record=True):
         warnings.simplefilter('ignore', UserWarning)
         re = coincidenceTrigger("recstalta", 3.5, 1, st.copy(), 3,
                                 trace_ids=trace_ids, sta=0.5, lta=10)
         self.assertTrue(len(re) == 2)
         self.assertTrue(re[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
         self.assertTrue(re[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
         self.assertTrue(4.2 < re[0]['duration'] < 4.8)
         self.assertTrue(re[0]['stations'] == ['UH3', 'UH1', 'UH4'])
         self.assertTrue(re[0]['coincidence_sum'] == 3)
         self.assertTrue(re[1]['time'] > UTCDateTime("2010-05-27T16:27:27"))
         self.assertTrue(re[1]['time'] < UTCDateTime("2010-05-27T16:27:33"))
         self.assertTrue(4.2 < re[1]['duration'] < 4.4)
         self.assertTrue(re[1]['stations'] == ['UH3', 'UH1', 'UH4'])
         self.assertTrue(re[1]['coincidence_sum'] == 3)
     # 3. weighting, station selection
     # => 3 events, no false triggers
     trace_ids = {'BW.UH1..SHZ': 0.4, 'BW.UH2..SHZ': 0.35,
                  'BW.UH3..SHZ': 0.4, 'BW.UH4..EHZ': 0.25}
     res = coincidenceTrigger("recstalta", 3.5, 1, st.copy(), 1.0,
                              trace_ids=trace_ids, sta=0.5, lta=10)
     self.assertTrue(len(res) == 3)
     self.assertTrue(res[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
     self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
     self.assertTrue(4.2 < res[0]['duration'] < 4.8)
     self.assertTrue(res[0]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
     self.assertTrue(res[0]['coincidence_sum'] == 1.4)
     self.assertTrue(res[1]['time'] > UTCDateTime("2010-05-27T16:26:59"))
     self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
     self.assertTrue(3.2 < res[1]['duration'] < 3.7)
     self.assertTrue(res[1]['stations'] == ['UH2', 'UH3', 'UH1'])
     self.assertTrue(res[1]['coincidence_sum'] == 1.15)
     self.assertTrue(res[2]['time'] > UTCDateTime("2010-05-27T16:27:27"))
     self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
     self.assertTrue(4.2 < res[2]['duration'] < 4.4)
     self.assertTrue(res[2]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
     self.assertTrue(res[2]['coincidence_sum'] == 1.4)
     # 4. weighting, station selection, max_len
     # => 2 events, no false triggers, small event does not overlap anymore
     trace_ids = {'BW.UH1..SHZ': 0.6, 'BW.UH2..SHZ': 0.6}
     # ignore UserWarnings
     with warnings.catch_warnings(record=True):
         warnings.simplefilter('ignore', UserWarning)
         re = coincidenceTrigger("recstalta", 3.5, 1, st.copy(), 1.2,
                                 trace_ids=trace_ids,
                                 max_trigger_length=0.13, sta=0.5, lta=10)
         self.assertTrue(len(re) == 2)
         self.assertTrue(re[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
         self.assertTrue(re[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
         self.assertTrue(0.2 < re[0]['duration'] < 0.3)
         self.assertTrue(re[0]['stations'] == ['UH2', 'UH1'])
         self.assertTrue(re[0]['coincidence_sum'] == 1.2)
         self.assertTrue(re[1]['time'] > UTCDateTime("2010-05-27T16:27:27"))
         self.assertTrue(re[1]['time'] < UTCDateTime("2010-05-27T16:27:33"))
         self.assertTrue(0.18 < re[1]['duration'] < 0.2)
         self.assertTrue(re[1]['stations'] == ['UH2', 'UH1'])
         self.assertTrue(re[1]['coincidence_sum'] == 1.2)
     # 5. station selection, extremely sensitive settings
     # => 4 events, 1 false triggers
     res = coincidenceTrigger("recstalta", 2.5, 1, st.copy(), 2,
                              trace_ids=['BW.UH1..SHZ', 'BW.UH3..SHZ'],
                              sta=0.3, lta=5)
     self.assertTrue(len(res) == 5)
     self.assertTrue(res[3]['time'] > UTCDateTime("2010-05-27T16:27:01"))
     self.assertTrue(res[3]['time'] < UTCDateTime("2010-05-27T16:27:02"))
     self.assertTrue(1.5 < res[3]['duration'] < 1.7)
     self.assertTrue(res[3]['stations'] == ['UH3', 'UH1'])
     self.assertTrue(res[3]['coincidence_sum'] == 2.0)
     # 6. same as 5, gappy stream
     # => same as 5 (almost, duration of 1 event changes by 0.02s)
     st2 = st.copy()
     tr1 = st2.pop(0)
     t1 = tr1.stats.starttime
     t2 = tr1.stats.endtime
     td = t2 - t1
     tr1a = tr1.slice(starttime=t1, endtime=t1 + 0.45 * td)
     tr1b = tr1.slice(starttime=t1 + 0.6 * td, endtime=t1 + 0.94 * td)
     st2.insert(1, tr1a)
     st2.insert(3, tr1b)
     res = coincidenceTrigger("recstalta", 2.5, 1, st2, 2,
                              trace_ids=['BW.UH1..SHZ', 'BW.UH3..SHZ'],
                              sta=0.3, lta=5)
     self.assertTrue(len(res) == 5)
     self.assertTrue(res[3]['time'] > UTCDateTime("2010-05-27T16:27:01"))
     self.assertTrue(res[3]['time'] < UTCDateTime("2010-05-27T16:27:02"))
     self.assertTrue(1.5 < res[3]['duration'] < 1.7)
     self.assertTrue(res[3]['stations'] == ['UH3', 'UH1'])
     self.assertTrue(res[3]['coincidence_sum'] == 2.0)
     # 7. same as 3 but modify input trace ids and check output of trace_ids
     # and other additional information with ``details=True``
     st2 = st.copy()
     st2[0].stats.network = "XX"
     st2[1].stats.location = "99"
     st2[1].stats.network = ""
     st2[1].stats.location = "99"
     st2[1].stats.channel = ""
     st2[2].stats.channel = "EHN"
     st2[3].stats.network = ""
     st2[3].stats.channel = ""
     st2[3].stats.station = ""
     trace_ids = {'XX.UH1..SHZ': 0.4, '.UH2.99.': 0.35,
                  'BW.UH3..EHN': 0.4, '...': 0.25}
     res = coincidenceTrigger("recstalta", 3.5, 1, st2, 1.0,
                              trace_ids=trace_ids, details=True,
                              sta=0.5, lta=10)
     self.assertTrue(len(res) == 3)
     self.assertTrue(res[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
     self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
     self.assertTrue(4.2 < res[0]['duration'] < 4.8)
     self.assertTrue(res[0]['stations'] == ['UH3', 'UH2', 'UH1', ''])
     self.assertTrue(res[0]['trace_ids'][0] == st2[2].id)
     self.assertTrue(res[0]['trace_ids'][1] == st2[1].id)
     self.assertTrue(res[0]['trace_ids'][2] == st2[0].id)
     self.assertTrue(res[0]['trace_ids'][3] == st2[3].id)
     self.assertTrue(res[0]['coincidence_sum'] == 1.4)
     self.assertTrue(res[1]['time'] > UTCDateTime("2010-05-27T16:26:59"))
     self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
     self.assertTrue(3.2 < res[1]['duration'] < 3.7)
     self.assertTrue(res[1]['stations'] == ['UH2', 'UH3', 'UH1'])
     self.assertTrue(res[1]['trace_ids'][0] == st2[1].id)
     self.assertTrue(res[1]['trace_ids'][1] == st2[2].id)
     self.assertTrue(res[1]['trace_ids'][2] == st2[0].id)
     self.assertTrue(res[1]['coincidence_sum'] == 1.15)
     self.assertTrue(res[2]['time'] > UTCDateTime("2010-05-27T16:27:27"))
     self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
     self.assertTrue(4.2 < res[2]['duration'] < 4.4)
     self.assertTrue(res[2]['stations'] == ['UH3', 'UH2', 'UH1', ''])
     self.assertTrue(res[2]['trace_ids'][0] == st2[2].id)
     self.assertTrue(res[2]['trace_ids'][1] == st2[1].id)
     self.assertTrue(res[2]['trace_ids'][2] == st2[0].id)
     self.assertTrue(res[2]['trace_ids'][3] == st2[3].id)
     self.assertTrue(res[2]['coincidence_sum'] == 1.4)
     expected_keys = ['cft_peak_wmean', 'cft_std_wmean', 'cft_peaks',
                      'cft_stds']
     expected_types = [float, float, list, list]
     for item in res:
         for key, _type in zip(expected_keys, expected_types):
             self.assertTrue(key in item)
             self.assertTrue(isinstance(item[key], _type))
     # check some of the detailed info
     ev = res[-1]
     self.assertAlmostEqual(ev['cft_peak_wmean'], 18.101139518271076)
     self.assertAlmostEqual(ev['cft_std_wmean'], 4.800051726246676)
     self.assertAlmostEqual(ev['cft_peaks'][0], 18.985548683223936)
     self.assertAlmostEqual(ev['cft_peaks'][1], 16.852175794415011)
     self.assertAlmostEqual(ev['cft_peaks'][2], 18.64005853900883)
     self.assertAlmostEqual(ev['cft_peaks'][3], 17.572363634564621)
     self.assertAlmostEqual(ev['cft_stds'][0], 4.8909448258821362)
     self.assertAlmostEqual(ev['cft_stds'][1], 4.4446373508521804)
     self.assertAlmostEqual(ev['cft_stds'][2], 5.3499401252675964)
     self.assertAlmostEqual(ev['cft_stds'][3], 4.2723814539487703)
def main(args=None):

    if args is None:
        # Run Input Parser
        args = get_daylong_arguments()

    # Load Database
    # stdb>0.1.3
    try:
        db, stkeys = stdb.io.load_db(fname=args.indb, keys=args.stkeys)

    # stdb=0.1.3
    except:
        db = stdb.io.load_db(fname=args.indb)

        # Construct station key loop
        allkeys = db.keys()
        sorted(allkeys)

        # Extract key subset
        if len(args.stkeys) > 0:
            stkeys = []
            for skey in args.stkeys:
                stkeys.extend([s for s in allkeys if skey in s])
        else:
            stkeys = db.keys()
            sorted(stkeys)

    # Loop over station keys
    for stkey in list(stkeys):

        # Extract station information from dictionary
        sta = db[stkey]

        # Define path to see if it exists
        datapath = Path('DATA') / Path(stkey)
        if not datapath.is_dir():
            print()
            print('Path to ' + str(datapath) + ' doesn`t exist - creating it')
            datapath.mkdir(parents=True)

        # Establish client
        if len(args.UserAuth) == 0:
            client = Client(args.Server)
        else:
            client = Client(args.Server,
                            user=args.UserAuth[0],
                            password=args.UserAuth[1])

        # Get catalogue search start time
        if args.startT is None:
            tstart = sta.startdate
        else:
            tstart = args.startT

        # Get catalogue search end time
        if args.endT is None:
            tend = sta.startdate
        else:
            tend = args.endT

        if tstart > sta.enddate or tend < sta.startdate:
            continue

        # Temporary print locations
        tlocs = sta.location
        if len(tlocs) == 0:
            tlocs = ['']
        for il in range(0, len(tlocs)):
            if len(tlocs[il]) == 0:
                tlocs[il] = "--"
        sta.location = tlocs

        # Update Display
        print()
        print("|===============================================|")
        print("|===============================================|")
        print("|                   {0:>8s}                    |".format(
            sta.station))
        print("|===============================================|")
        print("|===============================================|")
        print("|  Station: {0:>2s}.{1:5s}                            |".format(
            sta.network, sta.station))
        print("|      Channel: {0:2s}; Locations: {1:15s}  |".format(
            sta.channel, ",".join(tlocs)))
        print("|      Lon: {0:7.2f}; Lat: {1:6.2f}                |".format(
            sta.longitude, sta.latitude))
        print("|      Start time: {0:19s}          |".format(
            sta.startdate.strftime("%Y-%m-%d")))
        print("|      End time:   {0:19s}          |".format(
            sta.enddate.strftime("%Y-%m-%d")))
        print("|-----------------------------------------------|")
        print("| Searching day-long files:                     |")
        print("|   Start: {0:19s}                  |".format(
            tstart.strftime("%Y-%m-%d")))
        print("|   End:   {0:19s}                  |".format(
            tend.strftime("%Y-%m-%d")))

        # Split into 24-hour long segments
        dt = 3600. * 24.

        t1 = tstart
        t2 = tstart + dt

        while t2 <= tend:

            # Time stamp
            tstamp = str(t1.year).zfill(4) + '.' + str(
                t1.julday).zfill(3) + '.'

            print()
            print(
                "***********************************************************")
            print("* Downloading day-long data for key " + stkey +
                  " and day " + str(t1.year) + "." + str(t1.julday))
            print("*")
            print("* Channels selected: " + str(args.channels) +
                  ' and vertical')

            # Define file names (to check if files already exist)
            # Horizontal 1 channel
            file1 = datapath / (tstamp + '.' + sta.channel + '1.SAC')
            # Horizontal 2 channel
            file2 = datapath / (tstamp + '.' + sta.channel + '2.SAC')
            # Vertical channel
            fileZ = datapath / (tstamp + '.' + sta.channel + 'Z.SAC')
            # Pressure channel
            fileP = datapath / (tstamp + '.' + sta.channel + 'H.SAC')

            if "P" not in args.channels:

                # If data files exist, continue
                if fileZ.exists() and file1.exists() and file2.exists():
                    if not args.ovr:
                        print("*   " + tstamp +
                              "*SAC                                 ")
                        print("*   -> Files already exist, " +
                              "continuing            ")
                        t1 += dt
                        t2 += dt
                        continue

                channels = sta.channel.upper()+'1,'+sta.channel.upper() + \
                    '2,'+sta.channel.upper()+'Z'

                # Get waveforms from client
                try:
                    print("*   " + tstamp +
                          "*SAC                                 ")
                    print("*   -> Downloading Seismic data... ")
                    sth = client.get_waveforms(network=sta.network,
                                               station=sta.station,
                                               location=sta.location[0],
                                               channel=channels,
                                               starttime=t1,
                                               endtime=t2,
                                               attach_response=True)
                    print("*      ...done")

                except:
                    print(" Error: Unable to download ?H? components - " +
                          "continuing")
                    t1 += dt
                    t2 += dt
                    continue

                st = sth

            elif "H" not in args.channels:

                # If data files exist, continue
                if fileZ.exists() and fileP.exists():
                    if not args.ovr:
                        print("*   " + tstamp +
                              "*SAC                                 ")
                        print("*   -> Files already exist, " +
                              "continuing            ")
                        t1 += dt
                        t2 += dt
                        continue

                channels = sta.channel.upper() + 'Z'

                # Get waveforms from client
                try:
                    print("*   " + tstamp +
                          "*SAC                                 ")
                    print("*   -> Downloading Seismic data... ")
                    sth = client.get_waveforms(network=sta.network,
                                               station=sta.station,
                                               location=sta.location[0],
                                               channel=channels,
                                               starttime=t1,
                                               endtime=t2,
                                               attach_response=True)
                    print("*      ...done")

                except:
                    print(" Error: Unable to download ?H? components - " +
                          "continuing")
                    t1 += dt
                    t2 += dt
                    continue
                try:
                    print("*   -> Downloading Pressure data...")
                    stp = client.get_waveforms(network=sta.network,
                                               station=sta.station,
                                               location=sta.location[0],
                                               channel='?DH',
                                               starttime=t1,
                                               endtime=t2,
                                               attach_response=True)
                    print("*      ...done")
                    if len(stp) > 1:
                        print("WARNING: There are more than one ?DH trace")
                        print("*   -> Keeping the highest sampling rate")
                        if stp[0].stats.sampling_rate > \
                                stp[1].stats.sampling_rate:
                            stp = Stream(traces=stp[0])
                        else:
                            stp = Stream(traces=stp[1])

                except:
                    print(" Error: Unable to download ?DH component - " +
                          "continuing")
                    t1 += dt
                    t2 += dt
                    continue

                st = sth + stp

            else:

                # If data files exist, continue
                if (fileZ.exists() and file1.exists() and file2.exists()
                        and fileP.exists()):
                    if not args.ovr:
                        print("*   " + tstamp +
                              "*SAC                                 ")
                        print("*   -> Files already exist, " +
                              "continuing            ")
                        t1 += dt
                        t2 += dt
                        continue

                channels = sta.channel.upper()+'1,'+sta.channel.upper() + \
                    '2,'+sta.channel.upper()+'Z'

                # Get waveforms from client
                try:
                    print("*   " + tstamp +
                          "*SAC                                 ")
                    print("*   -> Downloading Seismic data... ")
                    sth = client.get_waveforms(network=sta.network,
                                               station=sta.station,
                                               location=sta.location[0],
                                               channel=channels,
                                               starttime=t1,
                                               endtime=t2,
                                               attach_response=True)
                    print("*      ...done")

                except:
                    print(" Error: Unable to download ?H? components - " +
                          "continuing")
                    t1 += dt
                    t2 += dt
                    continue
                try:
                    print("*   -> Downloading Pressure data...")
                    stp = client.get_waveforms(network=sta.network,
                                               station=sta.station,
                                               location=sta.location[0],
                                               channel='?DH',
                                               starttime=t1,
                                               endtime=t2,
                                               attach_response=True)
                    print("*      ...done")
                    if len(stp) > 1:
                        print("WARNING: There are more than one ?DH trace")
                        print("*   -> Keeping the highest sampling rate")
                        if stp[0].stats.sampling_rate > \
                                stp[1].stats.sampling_rate:
                            stp = Stream(traces=stp[0])
                        else:
                            stp = Stream(traces=stp[1])

                except:
                    print(" Error: Unable to download ?DH component - " +
                          "continuing")
                    t1 += dt
                    t2 += dt
                    continue

                st = sth + stp

            # Detrend, filter
            st.detrend('demean')
            st.detrend('linear')
            st.filter('lowpass',
                      freq=0.5 * args.new_sampling_rate,
                      corners=2,
                      zerophase=True)
            st.resample(args.new_sampling_rate)

            # Check streams
            is_ok, st = utils.QC_streams(t1, t2, st)
            if not is_ok:
                continue

            sth = st.select(component='1') + st.select(component='2') + \
                st.select(component='Z')

            # Remove responses
            print("*   -> Removing responses - Seismic data")
            sth.remove_response(pre_filt=args.pre_filt, output=args.units)

            # Extract traces - Z
            trZ = sth.select(component='Z')[0]
            trZ = utils.update_stats(trZ, sta.latitude, sta.longitude,
                                     sta.elevation, 'Z')
            trZ.write(str(fileZ), format='SAC')

            # Extract traces - H
            if "H" in args.channels:
                tr1 = sth.select(component='1')[0]
                tr2 = sth.select(component='2')[0]
                tr1 = utils.update_stats(tr1, sta.latitude, sta.longitude,
                                         sta.elevation, '1')
                tr2 = utils.update_stats(tr2, sta.latitude, sta.longitude,
                                         sta.elevation, '2')
                tr1.write(str(file1), format='SAC')
                tr2.write(str(file2), format='SAC')

            # Extract traces - P
            if "P" in args.channels:
                stp = st.select(component='H')
                print("*   -> Removing responses - Pressure data")
                stp.remove_response(pre_filt=args.pre_filt)
                trP = stp[0]
                trP = utils.update_stats(trP, sta.latitude, sta.longitude,
                                         sta.elevation, 'P')
                trP.write(str(fileP), format='SAC')

            t1 += dt
            t2 += dt
def multiprocess_fft(inlist):
    tr1=inlist[0]
    tr2=inlist[1]
    window_length=inlist[2]
    overlap = inlist[3]
    try:
        return noise.noisecorr(tr1,tr2,window_length,overlap)
    except:
        return None
        
##############################################
ref_curve = np.loadtxt("Average_phase_velocity_love")
no_cores = 4 # multiprocessing might cause probems on windows machines, because of missing "if __name__ == '__main__':"
joblist = []
for jday in np.arange(365):       
    stream1 = Stream()
    stream2 = Stream()
    
    try:
        stream1 += read("preprocessed_data/SULZ*.%d.*" %jday)
        stream2 += read("preprocessed_data/VDL*.%d.*" %jday)
    except:
        continue
        
    dist,az,baz = gps2dist_azimuth(stream1[0].stats.sac.stla,stream1[0].stats.sac.stlo,
                                  stream2[0].stats.sac.stla,stream2[0].stats.sac.stlo)

    try:
        stream1,stream2 = noise.adapt_timespan(stream1,stream2)
    except:
        continue
Example #36
0
from obspy import Stream
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np

def FileToList(fileName):
    f=open(fileName,"r",encoding=None)
    fdata=f.readlines()
    data=[]
    for itr in fdata:
        #print(itr)
        if(itr[:2]=='20'):
            data.append([ii.strip() for ii in itr.split() if(len(ii)>0)])
    return data

stream = Stream()
stream = pread("after/SC.XJI.2008133160000.D.00.BHN.sac")
stream += pread("after/SC.XJI.2008133160000.D.00.BHZ.sac")
stream += pread("after/SC.XJI.2008133160001.D.00.BHE.sac")
stream.detrend()
ptd=[]
"""
for ii in range(1000):
        ptd.append(stream[0].data[ii:ii+1000])
plt.matshow(ptd)
plt.show()
"""

nzy1,nzd1,nzs1=(stream[0].stats.sac.nzyear,
                stream[0].stats.sac.nzjday,
                stream[2].stats.sac.nzhour*3600+
Example #37
0
def main():

    # Run Input Parser
    args = arguments.get_lkss_arguments()

    # Load Database
    db = stdb.io.load_db(fname=args.indb)

    # Construct station key loop
    allkeys = db.keys()

    # Extract key subset
    if len(args.stkeys) > 0:
        stkeys = []
        for skey in args.stkeys:
            stkeys.extend([s for s in allkeys if skey in s])
    else:
        stkeys = db.keys()

    # Loop over station keys
    for stkey in list(stkeys):

        # Extract station information from dictionary
        sta = db[stkey]

        # Define path to see if it exists
        datapath = 'P_DATA/' + stkey
        if not os.path.isdir(datapath):
            print('Path to ' + datapath + ' doesn`t exist - continuing')
            continue

        # Temporary print locations
        tlocs = sta.location
        if len(tlocs) == 0:
            tlocs = ['']
        for il in range(0, len(tlocs)):
            if len(tlocs[il]) == 0:
                tlocs[il] = "--"
        sta.location = tlocs

        # Update Display
        print(" ")
        print(" ")
        print("|===============================================|")
        print("|                   {0:>8s}                    |".format(
            sta.station))
        print("|===============================================|")
        print("|  Station: {0:>2s}.{1:5s}                            |".format(
            sta.network, sta.station))
        print("|      Channel: {0:2s}; Locations: {1:15s}  |".format(
            sta.channel, ",".join(tlocs)))
        print("|      Lon: {0:7.2f}; Lat: {1:6.2f}                |".format(
            sta.longitude, sta.latitude))
        print("|-----------------------------------------------|")

        rfRstream = Stream()
        rfTstream = Stream()

        for folder in os.listdir(datapath):

            filename = datapath + "/" + folder + "/RF_Data.pkl"
            if os.path.isfile(filename):
                file = open(filename, "rb")
                rfdata = pickle.load(file)
                if rfdata[0].stats.snr > args.snr and \
                        rfdata[0].stats.cc > args.cc:

                    rfRstream.append(rfdata[1])
                    rfTstream.append(rfdata[2])
                file.close()

        if len(rfRstream) == 0:
            continue

        if args.no_outl:
            # Remove outliers wrt variance
            varR = np.array([np.var(tr.data) for tr in rfRstream])
            medvarR = np.median(varR)
            madvarR = 1.4826 * np.median(np.abs(varR - medvarR))
            robustR = np.abs((varR - medvarR) / madvarR)
            outliersR = np.arange(len(rfRstream))[robustR > 2.5]
            for i in outliersR[::-1]:
                rfRstream.remove(rfRstream[i])
                rfTstream.remove(rfTstream[i])

            # Do the same for transverse
            varT = np.array([np.var(tr.data) for tr in rfTstream])
            medvarT = np.median(varT)
            madvarT = 1.4826 * np.median(np.abs(varT - medvarT))
            robustT = np.abs((varT - medvarT) / madvarT)
            outliersT = np.arange(len(rfTstream))[robustT > 2.5]
            for i in outliersT[::-1]:
                rfRstream.remove(rfRstream[i])
                rfTstream.remove(rfTstream[i])

        if args.bp:
            # Filter
            rfRstream.filter('bandpass',
                             freqmin=args.bp[0],
                             freqmax=args.bp[1],
                             corners=2,
                             zerophase=True)
            rfTstream.filter('bandpass',
                             freqmin=args.bp[0],
                             freqmax=args.bp[1],
                             corners=2,
                             zerophase=True)

        # Binning
        rf_tmp = binning.bin(rfRstream,
                             rfTstream,
                             typ='baz',
                             nbin=args.nbaz + 1,
                             pws=args.pws)

        azcorr, *_ = utils.decompose(rf_tmp[0],
                                     rf_tmp[1],
                                     args.trange[0],
                                     args.trange[1],
                                     plot_f=args.plot_f,
                                     plot_comps=args.plot_comps)
        print("Best fit azcorr: " + "{0:5.1f}".format(azcorr))

        # Bootstrap statistics?
        if args.boot:
            azcorr, err_azcorr = utils.get_bootstrap(rf_tmp[0],
                                                     rf_tmp[1],
                                                     args.trange[0],
                                                     args.trange[1],
                                                     plot_hist=True)
            print("Bootstrap azcorr and uncertainty: " +
                  "{0:5.1f}, {1:5.1f}".format(azcorr, err_azcorr))
Example #38
0
#%%

# STATION, CHANNEL (DDF --> 400 Hz), NETWWORK AND LOCATION CODES
sta = 'LB01'  # STATION
cha = 'HDF'  # CHANNEL
net = 'Z4'  #
loc = ''  # location, it depends mostly of which network you are in.
client = Client('138.253.113.19',
                16022)  # ip, port - ip's 138.253.113.19 or 138.253.112.23

for x in range(0, 20):

    t1 = UTCDateTime(
        secondary[x][1])  #the format is year:day_of_the_year:month
    t2 = t1 + 6
    st = Stream()
    st = client.get_waveforms(net, sta, '', cha, t1 - 20, t2 + 60)
    #print(st)

    st.detrend(type='linear')
    st.detrend(type='demean')
    st.filter(type='bandpass', freqmin=0.2, freqmax=2)
    st.plot(color='b', starttime=t1 - 15, endtime=t2 + 30)

#%%

count = 0

#for x in range(0,100):
for x in range(0, len(secondary)):
    primary = secondary[x][0]
Example #39
0
def get_daily_data(st, year, mo, day, sr):
    dayst = Stream()
    tr = Stream()
    # set client
    client = Client("IRIS")
    t1 = UTCDateTime(year, mo, day)
    t2 = t1 + timedelta(days=1)
    for ii in range(len(st)):
        net = st[ii].stats.network
        sta = st[ii].stats.station
        comp = st[ii].stats.channel
        try:
            tr = client.get_waveforms(net, sta, "*", comp, t1 - 2, t2 + 2)
        except:
            print("No data for " + net + " " + sta + " " + comp + " " +
                  str(t1) + " " + str(t2))
        else:
            print("Data available for " + net + " " + sta + " " + comp + " " +
                  str(t1) + " " + str(t2))
            tr.detrend()
            tr.merge()
            #print(tr)
            if isinstance(tr[0].data, np.ma.masked_array):
                tr[0].data = tr[0].data.filled()
            tr.filter("bandpass", freqmin=2, freqmax=7)
            tr.trim(starttime=t1 - 2,
                    endtime=t2 + 2,
                    nearest_sample=1,
                    pad=1,
                    fill_value=0)
            tr.interpolate(sampling_rate=sr, starttime=t1)
            tr.trim(starttime=t1,
                    endtime=t2,
                    nearest_sample=1,
                    pad=1,
                    fill_value=0)
            dayst += tr
    return dayst
Example #40
0
def make_template(df, sr):
    client = Client("IRIS")
    # make templates
    regional = df['Regional']
    eventid = regional + str(df['ID'])
    detail = get_event_by_id(eventid, includesuperseded=True)
    phases = get_phase_dataframe(detail, catalog=regional)
    phases = phases[phases['Status'] == 'manual']
    print(phases)
    phases = phases[~phases.
                    duplicated(keep='first', subset=['Channel', 'Phase'])]
    print(phases)
    st = Stream()
    tr = Stream()
    print(phases)
    for ii in range(len(phases)):
        net = phases.iloc[ii]['Channel'].split('.')[0]
        sta = phases.iloc[ii]['Channel'].split('.')[1]
        comp = phases.iloc[ii]['Channel'].split('.')[2]
        #phase=phases.iloc[ii]['Phase']
        arr = UTCDateTime(phases.iloc[ii]['Arrival Time'])
        #print(int(np.round(arr.microsecond/(1/sr*10**6))*1/sr*10**6)==1000000)
        if int(np.round(arr.microsecond / (1 / sr * 10**6)) * 1 / sr *
               10**6) == 1000000:
            arr.microsecond = 0
            arr.second = arr.second + 1
        else:
            arr.microsecond = int(
                np.round(arr.microsecond / (1 / sr * 10**6)) * 1 / sr * 10**6)
        t1 = arr - 1
        t2 = arr + 9
        try:
            tr = client.get_waveforms(net, sta, "*", comp, t1 - 2, t2 + 2)
        except:
            print("No data for " + net + " " + sta + " " + comp + " " +
                  str(t1) + " " + str(t2))
        else:
            print("Data available for " + net + " " + sta + " " + comp + " " +
                  str(t1) + " " + str(t2))
            tr.detrend()
            tr.trim(starttime=t1 - 2,
                    endtime=t2 + 2,
                    nearest_sample=1,
                    pad=1,
                    fill_value=0)
            tr.filter("bandpass", freqmin=2, freqmax=7)
            tr.interpolate(sampling_rate=sr, starttime=t1)
            tr.trim(starttime=t1,
                    endtime=t2,
                    nearest_sample=1,
                    pad=1,
                    fill_value=0)
            st += tr
    return st
Example #41
0
class SEG2(object):
    """
    Class to read and write SEG 2 formatted files. The main reason this is
    realized as a class is for the ease of passing the various parameters from
    one function to the next.

    Do not change the file_pointer attribute while using this class. It will
    be used to keep track of which parts have been read yet and which not.
    """
    def __init__(self):
        pass

    def readFile(self, file_object):
        """
        Reads the following file and will return a Stream object. If
        file_object is a string it will be treated as a filename, otherwise it
        will be expected to be a file like object with read(), seek() and
        tell() methods.

        If it is a file_like object, file.seek(0, 0) is expected to be the
        beginning of the SEG-2 file.
        """
        # Read the file if it is a filename.
        if isinstance(file_object, basestring):
            self.file_pointer = open(file_object, 'rb')
        else:
            self.file_pointer = file_object
            self.file_pointer.seek(0, 0)

        self.stream = Stream()

        # Read the file descriptor block. This will also determine the
        # endianness.
        self.readFileDescriptorBlock()

        # Loop over every trace, read it and append it to the Stream.
        for tr_pointer in self.trace_pointers:
            self.file_pointer.seek(tr_pointer, 0)
            self.stream.append(self.parseNextTrace())

        return self.stream

    def readFileDescriptorBlock(self):
        """
        Handles the reading of the file descriptor block and the free form
        section following it.
        """
        file_descriptor_block = self.file_pointer.read(32)

        # Determine the endianness and check if the block id is valid.
        if (unpack('B', file_descriptor_block[0])[0] == 0x55) and \
           (unpack('B', file_descriptor_block[1])[0] == 0x3a):
            self.endian = '<'
        elif (unpack('B', file_descriptor_block[0])[0] == 0x3a) and \
            (unpack('B', file_descriptor_block[1])[0] == 0x55):
            self.endian = '>'
        else:
            msg = 'Wrong File Descriptor Block ID'
            raise SEG2InvalidFileError(msg)

        # Check the revision number.
        revision_number = unpack('%sH' % self.endian,
                                file_descriptor_block[2:4])[0]
        if revision_number != 1:
            msg = '\nOnly SEG 2 revision 1 is officially supported. This file '
            msg += 'has revision %i. Reading it might fail.' % revision_number
            msg += '\nPlease contact the ObsPy developers with a sample file.'
            warnings.warn(msg)
        size_of_trace_pointer_sub_block = unpack('%sH' % self.endian,
                                       file_descriptor_block[4:6])[0]
        number_of_traces = unpack('%sH' % self.endian,
                                  file_descriptor_block[6:8])[0]

        # Define the string and line terminators.
        size_of_string_terminator = unpack('B', file_descriptor_block[8])[0]
        first_string_terminator_char = unpack('c', file_descriptor_block[9])[0]
        second_string_terminator_char = unpack('c',
                                               file_descriptor_block[10])[0]
        size_of_line_terminator = unpack('B', file_descriptor_block[11])[0]
        first_line_terminator_char = unpack('c', file_descriptor_block[12])[0]
        second_line_terminator_char = unpack('c', file_descriptor_block[13])[0]

        # Assemble the string terminator.
        if size_of_string_terminator == 1:
            self.string_terminator = first_string_terminator_char
        elif size_of_string_terminator == 2:
            self.string_terminator = first_string_terminator_char + \
                                     second_string_terminator_char
        else:
            msg = 'Wrong size of string terminator.'
            raise SEG2InvalidFileError(msg)
        # Assemble the line terminator.
        if size_of_line_terminator == 1:
            self.line_terminator = first_line_terminator_char
        elif size_of_line_terminator == 2:
            self.line_terminator = first_line_terminator_char + \
                                     second_line_terminator_char
        else:
            msg = 'Wrong size of line terminator.'
            raise SEG2InvalidFileError(msg)

        # Read the trace pointer sub-block and retrieve all the pointers.
        trace_pointer_sub_block = \
                self.file_pointer.read(size_of_trace_pointer_sub_block)
        self.trace_pointers = []
        for _i in xrange(number_of_traces):
            index = _i * 4
            self.trace_pointers.append(
                unpack('%sL' % self.endian,
                       trace_pointer_sub_block[index:index + 4])[0])

        # The rest of the header up to where the first trace pointer points is
        # a free form section.
        self.stream.stats = AttribDict()
        self.stream.stats.seg2 = AttribDict()
        self.parseFreeForm(self.file_pointer.read(
                           self.trace_pointers[0] - self.file_pointer.tell()),
                           self.stream.stats.seg2)

        # Get the time information from the file header.
        # XXX: Need some more generic date/time parsers.
        time = self.stream.stats.seg2.ACQUISITION_TIME
        date = self.stream.stats.seg2.ACQUISITION_DATE
        time = time.strip().split(':')
        date = date.strip().split('/')
        hour, minute, second = int(time[0]), int(time[1]), float(time[2])
        day, month, year = int(date[0]), MONTHS[date[1].lower()], int(date[2])
        self.starttime = UTCDateTime(year, month, day, hour, minute, second)

    def parseNextTrace(self):
        """
        Parse the next trace in the trace pointer list and return a Trace
        object.
        """
        trace_descriptor_block = self.file_pointer.read(32)
        # Check if the trace descripter block id is valid.
        if unpack('%sH' % self.endian, trace_descriptor_block[0:2])[0] != \
           0x4422:
            msg = 'Invalid trace descripter block id.'
            raise SEG2InvalidFileError(msg)
        size_of_this_block = unpack('%sH' % self.endian,
                                    trace_descriptor_block[2:4])[0]
        number_of_samples_in_data_block = \
                unpack('%sL' % self.endian, trace_descriptor_block[8:12])[0]
        data_format_code = unpack('B', trace_descriptor_block[12])[0]

        # Parse the data format code.
        if data_format_code == 4:
            dtype = 'float32'
            sample_size = 4
        elif data_format_code == 5:
            dtype = 'float64'
            sample_size = 8
        elif data_format_code == 1:
            dtype = 'int16'
            sample_size = 2
        elif data_format_code == 2:
            dtype = 'int32'
            sample_size = 4
        elif data_format_code == 3:
            msg = ('\nData format code 3 (20-bit SEG-D floating point) not '
                   'supported yet.\nPlease contact the ObsPy developers with '
                   'a sample file.')
            raise NotImplementedError(msg)
        else:
            msg = 'Unrecognized data format code'
            raise SEG2InvalidFileError(msg)

        # The rest of the trace block is free form.
        header = {}
        header['seg2'] = AttribDict()
        self.parseFreeForm(
                         self.file_pointer.read(size_of_this_block - 32),
                          header['seg2'])
        header['delta'] = float(header['seg2']['SAMPLE_INTERVAL'])
        # Set to the file's starttime.
        header['starttime'] = deepcopy(self.starttime)
        if 'DELAY' in header['seg2']:
            if float(header['seg2']['DELAY']) != 0:
                msg = "Non-zero value found in Trace's 'DELAY' field. " + \
                      "This is not supported/tested yet and might lead " + \
                      "to a wrong starttime of the Trace. Please contact " + \
                      "the ObsPy developers with a sample file."
                warnings.warn(msg)
        header['calib'] = float(header['seg2']['DESCALING_FACTOR'])
        # Unpack the data.
        data = np.fromstring(self.file_pointer.read(
                number_of_samples_in_data_block * sample_size), dtype=dtype)
        # Integrate SEG2 file header into each trace header
        tmp = self.stream.stats.seg2.copy()
        tmp.update(header['seg2'])
        header['seg2'] = tmp
        return Trace(data=data, header=header)

    def parseFreeForm(self, free_form_str, attrib_dict):
        """
        Parse the free form section stored in free_form_str and save it in
        attrib_dict.
        """
        # Separate the strings.
        strings = free_form_str.split(self.string_terminator)
        # This is not fully according to the SEG-2 format specification (or
        # rather the specification only speaks about on offset of 2 bytes
        # between strings and a string_terminator between two free form
        # strings. The file I have show the following separation between two
        # strings: 'random offset byte', 'string_terminator',
        # 'random offset byte'
        # Therefore every string has to be at least 3 bytes wide to be
        # acceptable after being split at the string terminator.
        strings = [_i for _i in strings if len(_i) >= 3]
        # Every string has the structure OPTION<SPACE>VALUE. Write to
        # stream.stats attribute.
        for string in strings:
            string = string.strip()
            string = string.split(' ')
            key = string[0].strip()
            value = ' '.join(string[1:]).strip()
            setattr(attrib_dict, key, value)
        # Parse the notes string again.
        if hasattr(attrib_dict, 'NOTE'):
            notes = attrib_dict.NOTE.split(self.line_terminator)
            attrib_dict.NOTE = AttribDict()
            for note in notes:
                note = note.strip()
                note = note.split(' ')
                key = note[0].strip()
                value = ' '.join(note[1:]).strip()
                setattr(attrib_dict.NOTE, key, value)
Example #42
0
def rot_syn_RT(event_no, start_buff, end_buff, taper, taper_frac,
            plot_scale_fac, filt, freq_min, freq_max, min_dist, max_dist,
            vmodel, basin_width, basin):

    from obspy import UTCDateTime
    from obspy import Stream, Trace
    from obspy import read
    from obspy.geodetics import gps2dist_azimuth
    import numpy as np
    import os
    import matplotlib.pyplot as plt
    import time

    import sys # don't show any warnings
    import warnings

    if not sys.warnoptions:
        warnings.simplefilter("ignore")

    show_data = 0
    start_time_wc = time.time()
    print('This is event number: ' + str(event_no) + ' vmod is ' + vmodel)

    #%% find event details for origin time, lat, lon
    ev_file = '/Users/vidale/Documents/PyCode/LAB/ricardo/event_list.txt'
    file_ev = open(ev_file, 'r')
    for line in file_ev:           # pull numbers off the rest of the lines
        split_line = line.split()
        event = split_line[0]
        if event == event_no:
            ev_lat       = float(split_line[3])
            ev_lon       = float(split_line[2])
            t1           = UTCDateTime(split_line[5])
            date_label  = split_line[5][0:10]
            year1        = split_line[5][0:4]
    print(event_no + str(t1) + ' ' + date_label + ' ' + year1 + '  ' + str(ev_lat) + ' ' + str(ev_lon))

    #%% find event details for origin time, lat, lon
    badt_file = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_data/bad_trace.txt'
    file_badt = open(badt_file, 'r')
    badt_lines = file_badt.readlines()
    badt_event   = []
    badt_station = []
    badt_compo   = []

    for line in badt_lines:           # pull numbers off all the lines
        split_line = line.split()
        badt_event.append(  split_line[0])
        badt_station.append(split_line[1])
        badt_compo.append(  split_line[2])
    print(str(len(badt_event)) + ' bad traces in list')

    #%% Open station location file
    sta_file = '/Users/vidale/Documents/PyCode/LAB/ricardo/ricardo_stations.txt'
    file_st = open(sta_file, 'r')
    line = file_st.readline()      # read first line to skip header information
    lines = file_st.readlines()
    print(str(len(lines)) + ' stations read from ' + sta_file)

    # Load station coords into arrays, many more stations than used
    station_index = range(len(lines))
    st_num   = []
    st_netw  = []
    st_name  = []
    st_dist  = []
    st_az    = []
    st_baz   = []
    st_lat   = []
    st_lon   = []
    for line in lines:
        split_line = line.split()
        st_num.append( split_line[0])
        st_netw.append(split_line[2])
        st_name.append(split_line[3])
        st_lat.append( split_line[4])
        st_lon.append( split_line[5])
        distance = gps2dist_azimuth( ev_lat, ev_lon, float(split_line[4]), float(split_line[5])) # Get traveltime and azimuth
        st_dist.append(distance[0]/1000.) # azimuth
        st_az.append(distance[1]) # azimuth
        st_baz.append(distance[2]) # back-azimuth
    print('number of stations in list is ' + str(len(st_num)) + ' or ' + str(station_index))

    #%% Load data and synthetic waveforms
    st_dat = Stream()
    st_synE = Stream()
    st_synN = Stream()
    st_synZ = Stream()
    if vmodel == 'cvmhy':
        fname_dat     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_data/vel/ve_' + event_no + '.mseed'
        fname_synE     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvmhy/ve_'  + event_no + '_cvmhy.mseed'
        fname_synN     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvmhy/vn_'  + event_no + '_cvmhy.mseed'
        fname_synZ     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvmhy/vz_'  + event_no + '_cvmhy.mseed'
    elif vmodel == 'cvms426-223':
        fname_dat     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_data/vel/ve_' + event_no + '.mseed'
        fname_synE     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvms426-223/ve_'  + event_no + '_cvms426-223.mseed'
        fname_synN     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvms426-223/vn_'  + event_no + '_cvms426-223.mseed'
        fname_synZ     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvms426-223/vz_'  + event_no + '_cvms426-223.mseed'
    elif vmodel == 'cvmhn':
        fname_dat     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_data/vel/ve_' + event_no + '.mseed'
        fname_synE     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvmhn/ve_'  + event_no + '_cvmhn.mseed'
        fname_synN     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvmhn/vn_'  + event_no + '_cvmhn.mseed'
        fname_synZ     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvmhn/vz_'  + event_no + '_cvmhn.mseed'
    elif vmodel == 'cvms400-100':
        fname_dat     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_data/vel/ve_' + event_no + '.mseed'
        fname_synE     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvms400-100/ve_'  + event_no + '_cvms400-100.mseed'
        fname_synN     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvms400-100/vn_'  + event_no + '_cvms400-100.mseed'
        fname_synZ     = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/vel/cvms400-100/vz_'  + event_no + '_cvms400-100.mseed'
    st_dat=read(fname_dat)
    st_synE=read(fname_synE)
    st_synN=read(fname_synN)
    st_synZ=read(fname_synZ)
    print('In sgram file ' + str(st_synZ[609].data[0]) + '  ' + st_synZ[609].stats.station + '  ' + str(len(st_synZ)))
    print('In arrays st_name[609] ' + st_name[609] + ' st_name[610] ' + st_name[610])
    print('In arrays st_name[0] ' + st_name[0] + ' st_name[1] ' + st_name[1])

    print('1st data trace has : ' + str(len(st_synE[0].data)) + ' time pts ')
    print('synE has ' + str(len(st_synE)) + ' traces')
    print('synN has ' + str(len(st_synN)) + ' traces')
    print('synZ has ' + str(len(st_synZ)) + ' traces')

    #%% select data by distance (and azimuth?), and cull synthetics to match data
    st_dat_select = Stream()
    st_synE_select = Stream()
    st_synN_select = Stream()
    st_synZ_select = Stream()
    for tr in st_dat: # examine traces one by one
        for ii in range(len(st_name)):  # find matching entry in station roster
            if (tr.stats.network == st_netw[ii] and tr.stats.station == st_name[ii]): # find station in inventory
                if (st_dist[ii] < max_dist) and (st_dist[ii] > min_dist): # exclude stations too close or too far
                    # basin is roughly within 15 km of these 3 stations
                    distance = gps2dist_azimuth( 33.99053, -118.36171, float(st_lat[ii]), float(st_lon[ii])) # station BHP
                    dist1 = distance[0]/1000  # convert m to km
                    distance = gps2dist_azimuth( 33.88110, -118.17568, float(st_lat[ii]), float(st_lon[ii])) # station LTP
                    dist2 = distance[0]/1000
                    distance = gps2dist_azimuth( 33.80776, -117.98116, float(st_lat[ii]), float(st_lon[ii])) # station BRE
                    dist3 = distance[0]/1000
#                    print(tr.stats.station + ' ' + str(dist1) + ' ' + str(dist2) + ' ' + str(dist3))
                    if basin == False or (dist1 < basin_width) or (dist2 < basin_width) or (dist3 < basin_width):  # keep stations only within X km of basin axis
#                        print('selected: ' + tr.stats.station)
#                        tr.stats.distance = st_dist[ii] # add distance to trace metadata
#                        st_datE_select += tr
                        tr.stats.distance = st_dist[ii] # add distance to trace metadata
                        st_synE[ii].stats.distance = st_dist[ii]
                        st_synN[ii].stats.distance = st_dist[ii]
                        st_synZ[ii].stats.distance = st_dist[ii]
                        st_dat_select += tr
                        st_synE_select += st_synE[ii]
                        st_synN_select += st_synN[ii]
                        st_synZ_select += st_synZ[ii]
#                        print(tr.stats.station + ' counter ' + str(ii) + ' ' + 'num ' + str(int(st_num[ii])))
                    else:
                        print('Not in basin: '  + tr.stats.station)
                else:
                    print('Too far: '  + tr.stats.station)
    print('now data has ' + str(len(st_dat_select)) + ' traces')
    print('now synH has '  + str(len(st_synE_select)) + ' traces')
    print('now synS has '  + str(len(st_synN_select)) + ' traces')
    print('now synS has '  + str(len(st_synZ_select)) + ' traces')

    #%%  reject data and sythetics on bad trace list, either individual components or A for all components
    st_dat_good = Stream()
    for tr in st_dat_select: # examine traces one by one
        do_write = 1
        for ii in range(len(badt_event)):
            if event_no == badt_event[ii] and tr.stats.station == badt_station[ii]: # find station in inventory
                if badt_compo[ii] == 'A' or badt_compo[ii] == 'E' or badt_compo[ii] == 'N' or badt_compo[ii] == 'Z':
                    do_write = 0
        if do_write == 1:
            st_dat_good += tr
    print('After rejecting labeled bad traces ones, dat has '       + str(len(st_dat_good))       + ' traces')

    st_synE_good = Stream()
    for tr in st_synE_select: # examine traces one by one
        do_write = 1
        for ii in range(len(badt_event)):
            if event_no == badt_event[ii] and tr.stats.station == badt_station[ii]: # find station in inventory
                if badt_compo[ii] == 'E' or badt_compo[ii] == 'N' or badt_compo[ii] == 'A':
                    do_write = 0
        if do_write == 1:
            st_synE_good += tr
    print('After rejecting labeled bad traces ones, synE has '       + str(len(st_synE_good))       + ' traces')

    st_synN_good = Stream()
    for tr in st_synN_select: # examine traces one by one
        do_write = 1
        for ii in range(len(badt_event)):
            if event_no == badt_event[ii] and tr.stats.station == badt_station[ii]: # find station in inventory
                if badt_compo[ii] == 'E' or badt_compo[ii] == 'N' or badt_compo[ii] == 'A':
                    do_write = 0
        if do_write == 1:
            st_synN_good += tr
    print('After rejecting labeled bad traces ones, synN has '       + str(len(st_synN_good))       + ' traces')

    st_synZ_good = Stream()
    for tr in st_synZ_select: # examine traces one by one
        do_write = 1
        for ii in range(len(badt_event)):
            if event_no == badt_event[ii] and tr.stats.station == badt_station[ii]: # find station in inventory
                if badt_compo[ii] == 'Z' or badt_compo[ii] == 'A':
                    do_write = 0
        if do_write == 1:
            st_synZ_good += tr
    print('After rejecting labeled bad traces ones, synZ has '       + str(len(st_synZ_good))       + ' traces')

#%%  rotate horizontals, so N becomes radial, E becomes transverse
    trN     = Trace()
    trE     = Trace()
    ntrace = len(st_synE_good)
    for iii in range(ntrace):  # loop over all good stations
#        print('In rotate loop, ntrace is ' + str(ntrace))
        for ii in range(len(st_name)):  # find matching entry in station roster
            if (st_synE_good[iii].stats.network == st_netw[ii] and st_synE_good[iii].stats.station == st_name[ii]): # find station in inventory
                    distance = gps2dist_azimuth( ev_lat, ev_lon, float(st_lat[ii]), float(st_lon[ii])) # station 14825
                    baz = distance[2]  # back-azimuth
        ba = np.radians(baz)
        trN = st_synN_good[iii].copy()
        trE = st_synE_good[iii].copy()
#        print(st_synE_good[iii].stats.station + ' ' + str(baz))
        st_synN_good[iii].data = - (trE.data * np.sin(ba)) - (trN.data * np.cos(ba))
        st_synE_good[iii].data = - (trE.data * np.cos(ba)) + (trN.data * np.sin(ba))

    pathRT = '/Users/vidale/Documents/PyCode/LAB/ricardo/Mseed_syn/rot/' + vmodel + '/v'
    st_synE_good.write(pathRT + 't_' + event_no + '_' + vmodel + '.mseed', format='MSEED')
    st_synN_good.write(pathRT + 'r_' + event_no + '_' + vmodel + '.mseed', format='MSEED')
    st_synZ_good.write(pathRT + 'z_' + event_no + '_' + vmodel + '.mseed', format='MSEED')

    #%%  detrend, taper, filter
    if taper:
        st_dat_good.detrend( type='simple')
        st_synE_good.detrend(type='simple')
        st_synN_good.detrend(type='simple')
        st_synZ_good.detrend(type='simple')
    if filt:
        st_dat_good.filter( 'bandpass', freqmin=freq_min, freqmax=freq_max, corners=4, zerophase=True)
        st_synE_good.filter('bandpass', freqmin=freq_min, freqmax=freq_max, corners=4, zerophase=True)
        st_synN_good.filter('bandpass', freqmin=freq_min, freqmax=freq_max, corners=4, zerophase=True)
        st_synZ_good.filter('bandpass', freqmin=freq_min, freqmax=freq_max, corners=4, zerophase=True)
    if taper:
        st_dat_good.detrend( type='simple')
        st_synE_good.detrend(type='simple')
        st_synN_good.detrend(type='simple')
        st_synZ_good.detrend(type='simple')

    #%%
    # plot traces
    if vmodel == 'cvms426-223':
        fig_index = 10
    elif vmodel == 'cvmhy':
        fig_index = 11
    elif vmodel == 'cvms400-100':
        fig_index = 12
    elif vmodel == 'cvmhn':
        fig_index = 13
    plt.close(fig_index)
    plt.figure(fig_index,figsize=(10,8))
    plt.xlim(-start_buff,end_buff)
    plt.ylim(min_dist,max_dist)

    # find max
    maxE = 0
    for tr in st_synE_good:
        tr_max = max(abs(tr.data))*tr.stats.distance
        if tr_max > maxE:
            maxE = tr_max
    maxN = 0
    for tr in st_synN_good:
        tr_max = max(abs(tr.data))*tr.stats.distance
        if tr_max > maxN:
            maxN = tr_max
    maxZ = 0
    for tr in st_synZ_good:
        tr_max = max(abs(tr.data))*tr.stats.distance
        if tr_max > maxZ:
            maxZ = tr_max
    print('Max E, N, and Z synthetic are ' + str(maxE) + '  ' + str(maxN) + '  ' + str(maxZ))

    max_all = max(maxZ, maxN, maxE)
    plot_fac = plot_scale_fac * (max_dist - min_dist) / max_all

    if show_data:
        for tr in st_dat_good:
            dist_offset = tr.stats.distance # km
            ttt = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime - t1)
    #        if red_plot == 1:
    #            shift = red_time + (dist_offset - red_dist) * red_slow
    #            ttt = ttt - shift
    #        plt.plot(ttt, (tr.data - np.median(tr.data))*plot_scale_fac /(tr.data.max()
    #            - tr.data.min()) + dist_offset, color = 'green')
            plt.plot(ttt, (tr.data - np.median(tr.data))*plot_scale_fac /(tr.data.max()
                - tr.data.min()) + dist_offset, color = 'black')
            print(str(tr.stats.distance) + ' distance ' + tr.stats.station + ' station')    #plt.title(fname1)

    #print labels whether or not data is shown
    for tr in st_dat_good:
        dist_offset = tr.stats.distance # km
        plt.text(s = tr.stats.network + ' ' + tr.stats.station ,x = end_buff*0.95,y = dist_offset
                + max_dist*0.015, color = 'black')  #label traces
#
    for tr in st_synE_good:
        dist_offset = tr.stats.distance
        ttt = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime - t1)#    These lines used to cause a crash in Spyder
        plt.plot(ttt, (tr.data * plot_fac * tr.stats.distance) + dist_offset, color = 'black')

    for tr in st_dat_good:
        dist_offset = tr.stats.distance # km
        plt.text(s = tr.stats.network + ' ' + tr.stats.station ,x = end_buff*0.95,y = dist_offset
                + max_dist*0.015, color = 'black')  #label traces

    for tr in st_synN_good:
        dist_offset = tr.stats.distance
        ttt = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime - t1)#    These lines used to cause a crash in Spyder
        plt.plot(ttt, (tr.data * plot_fac * tr.stats.distance) + dist_offset, color = 'green')

    for tr in st_synZ_good:
        dist_offset = tr.stats.distance
        ttt = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime - t1)#    These lines used to cause a crash in Spyder
        plt.plot(ttt, (tr.data * plot_fac * tr.stats.distance) + dist_offset, color = 'red')

    plt.xlabel('Time (s)')
    plt.ylabel('Epicentral distance from event (km)')
#    plt.title(fname1[8:18] + ' vs ' + fname2[8:18])
    plt.title(date_label + ' ' + event_no + ' black E green N red Z of vmodel ' + vmodel)
    plt.show()

    #  Save processed files
#    fname1 = 'Pro_Files/HD' + date_label1 + 'sel.mseed'
#    fname2 = 'Pro_Files/HD' + date_label2 + 'sel.mseed'
#    st1good.write(fname1,format = 'MSEED')
#    st2good.write(fname2,format = 'MSEED')

    elapsed_time_wc = time.time() - start_time_wc
    print('This job took ' + str(elapsed_time_wc) + ' seconds')
    os.system('say "Done"')
Example #43
0
def _subspace_process(streams,
                      lowcut,
                      highcut,
                      filt_order,
                      sampling_rate,
                      multiplex,
                      align,
                      shift_len,
                      reject,
                      no_missed=True,
                      stachans=None,
                      parallel=False,
                      plot=False,
                      cores=1):
    """
    Process stream data, internal function.

    :type streams: list
    :param streams: List of obspy.core.stream.Stream to be used to \
        generate the subspace detector.  These should be pre-clustered \
        and aligned.
    :type lowcut: float
    :param lowcut: Lowcut in Hz, can be None to not apply filter
    :type highcut: float
    :param highcut: Highcut in Hz, can be None to not apply filter
    :type filt_order: int
    :param filt_order: Number of corners for filter.
    :type sampling_rate: float
    :param sampling_rate: Desired sampling rate in Hz
    :type multiplex: bool
    :param multiplex: Whether to multiplex the data or not.  Data are \
        multiplexed according to the method of Harris, see the multi \
        function for details.
    :type stachans: list of tuple
    :param stachans: list of tuples of (station, channel) to use.
    :type align: bool
    :param align: Whether to align the data or not - needs to be done \
        at some point
    :type shift_len: float
    :param shift_len: Maximum shift allowed for alignment in seconds.
    :type reject: float
    :param reject: Minimum correlation for traces, only used if align=True.
    :type no_missed: bool
    :param: no_missed: Reject streams with missed traces, defaults to True. \
        A missing trace from lots of events will reduce the quality of the \
        subspace detector if multiplexed.  Only used when multi is set to True.
    :type plot: bool
    :param plot: Passed down to align traces - used to check alignment process.

    :return: Processed streams
    :rtype: list
    :return: Station, channel pairs in order
    :rtype: list of tuple
    :return: List of delays
    :rtype: list
    """
    from multiprocessing import Pool, cpu_count
    processed_streams = []
    if not stachans:
        input_stachans = list(
            set([(tr.stats.station, tr.stats.channel) for st in streams
                 for tr in st.sort()]))
    else:
        input_stachans = stachans
    input_stachans.sort()  # Make sure stations and channels are in order
    # Check that all channels are the same length in seconds
    first_length = len(streams[0][0].data) /\
        streams[0][0].stats.sampling_rate
    for st in streams:
        for tr in st:
            if not len(tr) / tr.stats.sampling_rate == first_length:
                raise IOError(
                    'All channels of all streams must be the same length')
    for st in streams:
        if not parallel:
            processed_stream = Stream()
            for stachan in input_stachans:
                dummy, tr = _internal_process(st=st,
                                              lowcut=lowcut,
                                              highcut=highcut,
                                              filt_order=filt_order,
                                              sampling_rate=sampling_rate,
                                              first_length=first_length,
                                              stachan=stachan)
                processed_stream += tr
            processed_streams.append(processed_stream)
        else:
            pool = Pool(processes=min(cores, cpu_count()))
            results = [
                pool.apply_async(
                    _internal_process, (st, ), {
                        'lowcut': lowcut,
                        'highcut': highcut,
                        'filt_order': filt_order,
                        'sampling_rate': sampling_rate,
                        'first_length': first_length,
                        'stachan': stachan,
                        'i': i
                    }) for i, stachan in enumerate(input_stachans)
            ]
            pool.close()
            try:
                processed_stream = [p.get() for p in results]
            except KeyboardInterrupt as e:  # pragma: no cover
                pool.terminate()
                raise e
            pool.join()
            processed_stream.sort(key=lambda tup: tup[0])
            processed_stream = Stream([p[1] for p in processed_stream])
            processed_streams.append(processed_stream)
        if no_missed and multiplex:
            for tr in processed_stream:
                if np.count_nonzero(tr.data) == 0:
                    processed_streams.remove(processed_stream)
                    Logger.info('Removed stream with empty trace')
                    break
    if align:
        processed_streams = align_design(design_set=processed_streams,
                                         shift_len=shift_len,
                                         reject=reject,
                                         multiplex=multiplex,
                                         plot=plot,
                                         no_missed=no_missed)
    output_streams = []
    for processed_stream in processed_streams:
        if len(processed_stream) == 0:
            # If we have removed all of the traces then onwards!
            continue
        # Need to order the stream according to input_stachans
        _st = Stream()
        for stachan in input_stachans:
            tr = processed_stream.select(station=stachan[0],
                                         channel=stachan[1])
            if len(tr) >= 1:
                _st += tr[0]
            elif multiplex and len(tr) == 0:
                raise IndexError('Missing data for %s.%s' %
                                 (stachan[0], stachan[1]))
        if multiplex:
            st = multi(stream=_st)
            st = Stream(Trace(st))
            st[0].stats.station = 'Multi'
            st[0].stats.sampling_rate = sampling_rate
        else:
            st = _st
        for tr in st:
            # Normalize the data
            norm = np.linalg.norm(tr.data)
            if not norm == 0:
                tr.data /= norm
        output_streams.append(st)
    return output_streams, input_stachans
Example #44
0
def read_traces(tracefile, **kwargs):
    """
    Reads the traces produced by Raysum and stores them into a list
    of Stream objects

    Args:
        tracefile (str):
            Name of file containing traces
        dt (float):
            Sample distance in seconds
        geom (array):
            Array of [baz, slow] values
        rot (int):
            ID for rotation: 0 is NEZ, 1 is RTZ, 2 is PVH
        shift (float):
            Time shift in seconds

    Returns:
        (list): streamlist: List of Stream objects

    """

    # Unpack the arguments
    args = AttribDict({**kwargs})

    kwlist = ['tracefile', 'dt', 'geom', 'rot', 'shift']

    for k in args:
        if k not in kwlist:
            raise (Exception('Incorrect kwarg: ', k))

    def _make_stats(net=None,
                    sta=None,
                    stime=None,
                    dt=None,
                    slow=None,
                    baz=None,
                    wvtype=None,
                    channel=None,
                    taxis=None):
        """
        Updates the ``stats`` doctionary from an obspy ``Trace`` object.

        Args:
            net (str): Network name
            sta (str): Station name
            stime (:class:`~obspy.core.UTCDateTime`): Start time of trace
            dt (float): Sampling distance in seconds
            slow (float): Slowness value (s/km)
            baz (float): Back-azimuth value (degrees)
            wvtype (str): Wave type ('P', 'SV', or 'SH')
            channel (str): Channel name
            taxis (:class:`~numpy.ndarray`): Time axis in seconds

        Returns:
            (:class:`~obspy.core.Trace`):
                tr: Trace with updated stats
        """

        stats = AttribDict()
        stats.baz = baz
        stats.slow = slow
        stats.station = sta
        stats.network = net
        stats.starttime = stime
        stats.delta = dt
        stats.channel = channel
        stats.wvtype = wvtype
        stats.taxis = taxis

        return stats

    # Read traces from file
    try:
        df = pd.read_csv(tracefile)
    except:
        raise (Exception("Can't read " + str(tracefile)))

    # Component names
    if args.rot == 0:
        component = ['N', 'E', 'Z']
    elif args.rot == 1:
        component = ['R', 'T', 'Z']
    elif args.rot == 2:
        component = ['P', 'V', 'H']
    else:
        raise (Exception('invalid "rot" value: not in 0, 1, 2'))

    # Number of "event" traces produced
    ntr = np.max(df.itr)

    # Time axis
    npts = len(df[df.itr == 0].trace1.values)
    taxis = np.arange(npts) * args.dt - args.shift

    streams = []

    for itr in range(ntr):

        # Split by trace ID
        ddf = df[df.itr == itr]

        # Store into trace by channel with stats information
        # Channel 1

        stats = _make_stats(net='',
                            sta='synt',
                            stime=UTCDateTime(),
                            dt=args.dt,
                            slow=args.geom[itr][1],
                            baz=args.geom[itr][0],
                            channel='BH' + component[0],
                            taxis=taxis)
        tr1 = Trace(data=ddf.trace1.values, header=stats)

        # Channel 2
        stats = _make_stats(net='',
                            sta='synt',
                            stime=UTCDateTime(),
                            dt=args.dt,
                            slow=args.geom[itr][1],
                            baz=args.geom[itr][0],
                            channel='BH' + component[1],
                            taxis=taxis)
        tr2 = Trace(data=ddf.trace2.values, header=stats)

        # Channel 3
        stats = _make_stats(net='',
                            sta='synt',
                            stime=UTCDateTime(),
                            dt=args.dt,
                            slow=args.geom[itr][1],
                            baz=args.geom[itr][0],
                            channel='BH' + component[2],
                            taxis=taxis)
        tr3 = Trace(data=ddf.trace3.values, header=stats)

        # Store into Stream object and append to list
        stream = Stream(traces=[tr1, tr2, tr3])
        streams.append(stream)

    return streams
Example #45
0
def party_relative_mags(party, self_files, shift_len, align_len, svd_len,
                        reject, sac_dir, min_amps, calibrate=False,
                        method='PCA'):
    """
    Calculate the relative moments for detections in a Family using
    mag_calc.svd_moments()

    :param party: Party of detections
    :param shift_len: Maximum shift length used in waveform alignment
    :param align_len: Length of waveform used for correlation in alignment
    :param svd_len: Length of waveform used in relative amplitude calc
    :param reject: Min cc threshold for accepted measurement
    :param sac_dir: Root directory of waveforms
    :param min_amps: Minimum number of relative measurements per pair
    :param calibrate: Flag for calibration to a priori Ml's
    :param method: 'PCA' or 'LSQR'
    :return:
    """

    # First read-in self detection names
    selfs = []
    for self_file in self_files:
        with open(self_file, 'r') as f:
            rdr = csv.reader(f)
            for row in rdr:
                selfs.append(str(row[0]))
    for fam in party.families:
        print('Starting work on family %s' % fam.template.name)
        if len(fam) == 1:
            print('Only self-detection. Moving on.')
            continue
        temp = fam.template
        prepick = temp.prepick
        events = [det.event for det in fam.detections]
        # Here we'll read in the waveforms and trim from stefan's directory
        # of SAC files so as not to duplicate data
        ev_dirs = ['%s%s' % (sac_dir, str(ev.resource_id).split('/')[-1])
                   for ev in events]
        streams = []
        if len([i for i, ev_dir in enumerate(ev_dirs)
                    if ev_dir.split('/')[-1] in selfs]) == 0:
            print('Family %s has no self detection. Investigate'
                  % fam.template.name)
            continue
        self_ind = [i for i, ev_dir in enumerate(ev_dirs)
                    if ev_dir.split('/')[-1] in selfs][0]
        # Read in Z components of events which we wrote for stefan
        # Many of these ev_dirs will not exist!
        for i, ev_dir in enumerate(ev_dirs):
            raw_st = Stream()
            print('Reading %s' % ev_dir)
            for wav_file in glob('%s/*Z.sac' % ev_dir):
                print('...file %s' % wav_file)
                raw_tr = read(wav_file)[0]
                start = raw_tr.stats.starttime + raw_tr.stats.sac['a'] - 3.
                end = start + 10
                raw_tr.trim(starttime=start, endtime=end)
                raw_st.traces.append(raw_tr)
            streams.append(raw_st)
        print('Moved self detection to top of list')
        # Move the self detection to the first element
        streams.insert(0, streams.pop(self_ind))
        print('Template Stream: %s' % str(streams[0]))
        if len(streams[0]) == 0:
            print('Template %s waveforms did not get written to SAC.' %
                  temp.name)
            continue
        # Front/back clip hardcoded relative to wavs starting 3 s before pick
        front_clip = 3.0 - shift_len - 0.05 - prepick
        back_clip = front_clip + align_len + (2 * shift_len) + 0.05
        wrk_streams = [] # For aligning
        # Process streams then copy to both ccc_streams and svd_streams
        bad_streams = []
        for i, st in enumerate(list(streams)):
            try:
                shortproc(st=streams[i], lowcut=temp.lowcut,
                          highcut=temp.highcut, filt_order=temp.filt_order,
                          samp_rate=temp.samp_rate)
                wrk_streams.append(st.copy())
            except ValueError as e:
                print('ValueError reads:')
                print(str(e))
                print('Attempting to remove bad trace at {}'.format(
                    str(e).split(' ')[-1]))
                bad_tr = str(e).split(' ')[-1][:-1] # Eliminate trailing "'"
                print('Sta and chan names: {}'.format(bad_tr.split('.')))
                try:
                    tr = streams[i].select(station=bad_tr.split('.')[0],
                                           channel=bad_tr.split('.')[1])[0]
                    streams[i].traces.remove(tr)
                    shortproc(st=streams[i], lowcut=temp.lowcut,
                              highcut=temp.highcut,
                              filt_order=temp.filt_order,
                              samp_rate=temp.samp_rate)
                    wrk_streams.append(st.copy())
                except IndexError as e:
                    print(str(e))
                    print('Funkyness. Removing entire stream')
                    bad_streams.append(st)
        if len(bad_streams) > 0:
            for bst in bad_streams:
                streams.remove(bst)
        svd_streams = copy.deepcopy(streams) # For svd
        ccc_streams = copy.deepcopy(streams)
        # work out cccoh for each event with template
        cccohs = cc_coh_dets(streams=ccc_streams, shift=shift_len,
                             length=svd_len, wav_prepick=3.,
                             corr_prepick=0.05)
        for st in wrk_streams:
            for tr in st:
                tr.trim(starttime=tr.stats.starttime + front_clip,
                        endtime=tr.stats.starttime + back_clip)
        st_chans = list(set([(tr.stats.station, tr.stats.channel)
                             for st in wrk_streams for tr in st]))
        st_chans.sort()
        # Align streams with just P arrivals, then use longer st for svd
        print('Now aligning svd_streams')
        shift_inds = int(shift_len * fam.template.samp_rate)
        for st_chan in st_chans:
            trs = []
            for i, st in enumerate(wrk_streams):
                if len(st.select(station=st_chan[0], channel=st_chan[-1])) > 0:
                    trs.append((i, st.select(station=st_chan[0],
                                             channel=st_chan[-1])[0]))
            inds, traces = zip(*trs)
            shifts, ccs = stacking.align_traces(trace_list=list(traces),
                                                shift_len=shift_inds,
                                                positive=True,
                                                master=traces[0].copy())
            # We now have shifts based on P correlation, shift and trim
            # larger wavs for svd
            for j, shift in enumerate(shifts):
                st = svd_streams[inds[j]]
                if ccs[j] < reject:
                    svd_streams[inds[j]].remove(st.select(
                        station=st_chan[0], channel=st_chan[-1])[0])
                    print('Removing trace due to low cc value: %s' % ccs[j])
                    continue
                strt_tr = st.select(
                    station=st_chan[0], channel=st_chan[-1])[0].stats.starttime
                strt_tr += (3.0 - prepick - shift)
                st.select(station=st_chan[0],
                          channel=st_chan[-1])[0].trim(strt_tr,strt_tr
                                                       + svd_len)
        if method == 'LSQR':
            print('Using least-squares method')
            event_list = []
            for stachan in st_chans:
                st_list = []
                for i, st in enumerate(svd_streams):
                    if len(st.select(station=stachan[0],
                                     channel=stachan[-1])) > 0:
                        st_list.append(i)
                event_list.append(st_list)
            # event_list = np.asarray(event_list).tolist()
            u, sigma, v, sta_chans = svd(stream_list=svd_streams, full=True)
            try:
                M, events_out = svd_moments(u, sigma, v, sta_chans, event_list)
            except IOError as e:
                print('Family %s raised error %s' % (fam.template.name, e))
                continue
        elif method == 'PCA':
            print('Using principal component method')
            # Now loop over all detections and do svd for each matching
            # chan with temp
            events_out = []
            template = svd_streams[0]
            M = []
            for i, st in enumerate(svd_streams):
                if len(st) == 0:
                    print('Event not located, skipping')
                    continue
                ev_r_amps = []
                # For each pair of template:detection (including temp:temp)
                for tr in template:
                    if len(st.select(station=tr.stats.station,
                                     channel=tr.stats.channel)) > 0:
                        det_tr = st.select(station=tr.stats.station,
                                           channel=tr.stats.channel)[0]
                        # Convoluted way of getting two 'vert' vectors
                        data_mat = np.vstack((tr.data, det_tr.data)).T
                        U, sig, Vt = scipy.linalg.svd(data_mat,
                                                      full_matrices=True)
                        # Vt is 2x2 for two events
                        # Per Shelly et al., 2016 eq. 4
                        ev_r_amps.append(Vt[0][1] / Vt[0][0])
                if len(ev_r_amps) < min_amps:
                    print('Fewer than 4 amplitude picks, skipping.')
                    continue
                M.append(np.median(ev_r_amps))
                events_out.append(i)
        # If we have a Mag for template, calibrate moments
        if calibrate and len(fam.template.event.magnitudes) > 0:
            # Convert the template magnitude to seismic moment
            temp_mag = fam.template.event.magnitudes[-1].mag
            temp_mo = local_to_moment(temp_mag)
            # Extrapolate from the template moment - relative moment relationship to
            # Get the moment for relative moment = 1.0
            norm_mo = temp_mo / M[0]
            # Template is the last event in the list
            # Now these are weights which we can multiple the moments by
            moments = np.multiply(M, norm_mo)
            # Now convert to Mw
            Mw = [2.0 / 3.0 * (np.log10(m) - 9.0) for m in moments]
            Mw2, evs2 = remove_outliers(Mw, events_out)
            # Convert to local
            Ml = [0.88 * m + 0.73 for m in Mw2]
            #Normalize moments to template mag
            # Add calibrated mags to detection events
            for i, eind in enumerate(evs2):
                fam.detections[eind-1].event.magnitudes = [
                    Magnitude(mag=Mw2[i], magnitude_type='Mw')]
                fam.detections[eind-1].event.comments.append(
                    Comment(text=str(cccohs[eind-1])))
                fam.detections[eind-1].event.magnitudes.append(
                    Magnitude(mag=Ml[i], magnitude_type='ML'))
            fam.catalog = Catalog(events=[det.event for det in fam.detections])
    return party, cccohs
Example #46
0
def rotate_stream(st,
                  event_latitude,
                  event_longitude,
                  inventory=None,
                  mode="ALL->RT"):
    """
    Rotate a stream to radial and transverse components based on the
    station information and event information

    :param st: input stream
    :type st: obspy.Stream
    :param event_latitude: event latitude
    :type event_latitude: float
    :param event_longitude: event longitude
    :type event_longitude: float
    :param inv: station inventory information. If you want to rotate
    "12" components, you need to provide inventory since only station
    and station_longitude is not enough.
    :type inv: obspy.Inventory
    :param mode: rotation mode, could be one of:
        1) "NE->RT": rotate only North and East channel to RT
        2) "12->RT": rotate only 1 and 2 channel, like "BH1" and "BH2" to RT
        3) "ALL->RT": rotate all components to RT
        4) "RT->NE": rotate RT to NE
    :return: rotated stream(obspy.Stream)
    """

    rotated_stream = Stream()

    mode = mode.upper()
    mode_options = ["NE->RT", "ALL->RT", "12->RT", "RT->NE"]
    if mode not in mode_options:
        raise ValueError("rotate_stream mode(%s) should be within %s" %
                         (mode, mode_options))

    if mode in ["12->RT", "ALL->RT"] and inventory is None:
        raise ValueError("Mode %s required inventory(stationxml) "
                         "information provided(to rotate '12')" % mode)

    sorted_st_dict = sort_stream_by_station(st)

    if len(sorted_st_dict) == 1:
        # if there is only one station
        rotate_one_station_stream(st,
                                  event_latitude,
                                  event_longitude,
                                  inventory=inventory,
                                  mode=mode)
    else:
        # if there are multiple stations
        for sta_stream in sorted_st_dict.itervalues():
            nw = sta_stream[0].stats.network
            station = sta_stream[0].stats.station
            loc = sta_stream[0].stats.location

            if loc == "S3":
                # SPECFEM TUNE
                station_inv = inventory.select(network=nw, station=station)
            else:
                station_inv = inventory.select(network=nw,
                                               station=station,
                                               location=loc)

            rotate_one_station_stream(sta_stream,
                                      event_latitude,
                                      event_longitude,
                                      inventory=station_inv,
                                      mode=mode)
            rotated_stream += sta_stream
            st = rotated_stream
Example #47
0
def run_tutorial(plot=False, process_len=3600, num_cores=cpu_count()):
    """Main function to run the tutorial dataset."""
    # First we want to load our templates
    template_names = glob.glob('tutorial_template_*.ms')

    if len(template_names) == 0:
        raise IOError('Template files not found, have you run the template ' +
                      'creation tutorial?')

    templates = [read(template_name) for template_name in template_names]

    # Work out what stations we have and get the data for them
    stations = []
    for template in templates:
        for tr in template:
            stations.append((tr.stats.station, tr.stats.channel))
    # Get a unique list of stations
    stations = list(set(stations))

    # We will loop through the data chunks at a time, these chunks can be any
    # size, in general we have used 1 day as our standard, but this can be
    # as short as five minutes (for MAD thresholds) or shorter for other
    # threshold metrics. However the chunk size should be the same as your
    # template process_len.

    # You should test different parameters!!!
    start_time = UTCDateTime(2016, 1, 4)
    end_time = UTCDateTime(2016, 1, 5)
    chunks = []
    chunk_start = start_time
    while chunk_start < end_time:
        chunk_end = chunk_start + process_len
        if chunk_end > end_time:
            chunk_end = end_time
        chunks.append((chunk_start, chunk_end))
        chunk_start += process_len

    unique_detections = []

    # Set up a client to access the GeoNet database
    client = Client("GEONET")

    # Note that these chunks do not rely on each other, and could be paralleled
    # on multiple nodes of a distributed cluster, see the SLURM tutorial for
    # an example of this.
    for t1, t2 in chunks:
        # Generate the bulk information to query the GeoNet database
        bulk_info = []
        for station in stations:
            bulk_info.append(('NZ', station[0], '*',
                              station[1][0] + 'H' + station[1][-1], t1, t2))

        # Note this will take a little while.
        print('Downloading seismic data, this may take a while')
        st = client.get_waveforms_bulk(bulk_info)
        # Merge the stream, it will be downloaded in chunks
        st.merge(fill_value='interpolate')

        # Pre-process the data to set frequency band and sampling rate
        # Note that this is, and MUST BE the same as the parameters used for
        # the template creation.
        print('Processing the seismic data')
        st = pre_processing.shortproc(
            st, lowcut=2.0, highcut=9.0, filt_order=4, samp_rate=20.0,
            debug=0, num_cores=num_cores, starttime=t1, endtime=t2)
        # Convert from list to stream
        st = Stream(st)

        # Now we can conduct the matched-filter detection
        detections = match_filter.match_filter(
            template_names=template_names, template_list=templates,
            st=st, threshold=8.0, threshold_type='MAD', trig_int=6.0,
            plotvar=plot, plotdir='.', cores=num_cores, debug=0,
            plot_format='png')

        # Now lets try and work out how many unique events we have just to
        # compare with the GeoNet catalog of 20 events on this day in this
        # sequence
        for master in detections:
            keep = True
            for slave in detections:
                if not master == slave and abs(master.detect_time -
                                               slave.detect_time) <= 1.0:
                    # If the events are within 1s of each other then test which
                    # was the 'best' match, strongest detection
                    if not master.detect_val > slave.detect_val:
                        keep = False
                        print('Removed detection at %s with cccsum %s'
                              % (master.detect_time, master.detect_val))
                        print('Keeping detection at %s with cccsum %s'
                              % (slave.detect_time, slave.detect_val))
                        break
            if keep:
                unique_detections.append(master)
                print('Detection at :' + str(master.detect_time) +
                      ' for template ' + master.template_name +
                      ' with a cross-correlation sum of: ' +
                      str(master.detect_val))
                # We can plot these too
                if plot:
                    stplot = st.copy()
                    template = templates[template_names.index(
                        master.template_name)]
                    lags = sorted([tr.stats.starttime for tr in template])
                    maxlag = lags[-1] - lags[0]
                    stplot.trim(starttime=master.detect_time - 10,
                                endtime=master.detect_time + maxlag + 10)
                    plotting.detection_multiplot(
                        stplot, template, [master.detect_time.datetime])
    print('We made a total of ' + str(len(unique_detections)) + ' detections')
    return unique_detections
Example #48
0
def sum_adj_on_component(adj_stream, meta_info, weight_flag=False,
                         weight_dict=None):
    """
    Sum adjoint source on different channels but same component
    together, like "II.AAK.00.BHZ" and "II.AAK.10.BHZ" to form
    "II.AAK.MXZ". Also, misfit values will be added accordingly.
    Please remember after adding, the channel will be renamed to
    "MX" by default if weight_flag is False. If weight_flag is true,
    then the channel is depandant on the dict key.

    :param adj_stream: adjoint source stream
    :param weight_dict: weight dictionary, should be something like
        {"MXZ":{"II.AAK.00.BHZ": 0.5, "II.AAK.10.BHZ": 0.5},
         "MXR":{"II.AAK.00.BHR": 0.3, "II.AAK.10.BHR": 0.7},
         "MXT":{"II.AAK..BHT": 1.0}}
    :return: summed adjoint source stream
    """
    if weight_flag and weight_dict is None:
        raise ValueError("weight_dict should be assigned if you want")

    new_stream = Stream()
    new_meta = {}
    done_comps = []

    if not weight_flag:
        # just add same components together without weight
        for tr in adj_stream:
            comp = tr.stats.channel[-1]
            if comp not in done_comps:
                comp_tr = tr.copy()
                comp_tr.stats.location = ""
                comp_tr.stats.channel = "MX" + comp
                new_stream.append(comp_tr)
                new_meta[comp_tr.id] = meta_info[tr.id].copy()
            else:
                comp_tr = new_stream.select("*%s" % comp)
                comp_tr.data += tr.data
                new_meta[comp_tr.id]["misfit"] += meta_info[tr.id]["misfit"]
    else:
        # sum using components weight
        for comp, comp_weights in weight_dict.iteritems():
            for chan_id, chan_weight in comp_weights.iteritems():
                if comp not in done_comps:
                    done_comps.append(comp)
                    adj_tr = adj_stream.select(id=chan_id)[0]
                    comp_tr = adj_tr.copy()
                    comp_tr.data *= chan_weight
                    comp_tr.stats.location = ""
                    comp_tr.stats.channel = comp
                    new_stream.append(comp_tr)
                    new_meta[comp_tr.id] = meta_info[adj_tr.id].copy()
                    new_meta[comp_tr.id]["misfit"] = \
                        chan_weight * meta_info[adj_tr.id]["misfit"]
                else:
                    adj_tr = adj_stream.select(id=chan_id)[0]
                    comp_tr = new_stream.select(channel="*%s" % comp)[0]
                    comp_tr.data += chan_weight * adj_tr.data
                    new_meta[comp_tr.id]["misfit"] += \
                        chan_weight * meta_info[adj_tr.id]["misfit"]

    return new_stream, new_meta
Example #49
0
#i=where(abs(data)>spike_threshold)[0]
#data[i]=data[i-1]

#Interpolate to regular itnerval
t_in=arange(t[0],t[-1],60)
f=interp1d(t,data)
data_in=f(t_in)
#Processing
data_in=data_in-mean(data_in)
dt=t_in[2]-t_in[1]
#filter
tcorner=3*3600
data_in=highpass(data_in,1./tcorner,1./dt,2)

#Put in sac file
st=Stream(Trace())
st[0].stats.station=station
st[0].stats.delta=dt
st[0].stats.starttime=t1
st[0].data=data_in
st[0].trim(starttime=time_epi-tprior,endtime=time_epi+tcut)
st[0].data=st[0].data-mean(st[0].data[0:20])
st.write(fout,format='SAC')

#Make plot and save
#Plot processed
#plt.close("all")
plt.figure(figsize=(10,3))
plt.plot(st[0].times()/60,st[0].data)
plt.grid()
plt.ylim([-0.3,0.3])
Example #50
0
def stochastic_simulation(home,
                          project_name,
                          rupture_name,
                          GF_list,
                          time_epi,
                          model_name,
                          rise_time_depths,
                          moho_depth_in_km,
                          total_duration=100,
                          hf_dt=0.01,
                          stress_parameter=50,
                          kappa=0.04,
                          Qexp=0.6,
                          component='N',
                          Pwave=False,
                          high_stress_depth=1e4):
    '''
    Run stochastic HF sims
    
    stress parameter is in bars
    '''

    from numpy import genfromtxt, pi, logspace, log10, mean, where, exp, arange, zeros, argmin, rad2deg, arctan2, real
    from pyproj import Geod
    from obspy.geodetics import kilometer2degrees
    from obspy.taup import TauPyModel
    from mudpy.forward import get_mu
    from obspy import Stream, Trace
    from sys import stdout
    import warnings

    print 'stress is ' + str(stress_parameter)

    #I don't condone it but this cleans up the warnings
    warnings.filterwarnings("ignore")

    #initalize  output object
    st = Stream()

    #Load the source
    fault = genfromtxt(home + project_name + '/output/ruptures/' +
                       rupture_name)

    #Onset times for each subfault
    onset_times = fault[:, 12]

    #Load stations
    sta = genfromtxt(home + project_name + '/data/station_info/' + GF_list,
                     usecols=[0],
                     dtype='S')
    lonlat = genfromtxt(home + project_name + '/data/station_info/' + GF_list,
                        usecols=[1, 2])

    #load velocity structure
    structure = genfromtxt(home + project_name + '/structure/' + model_name)

    #Frequencies vector
    f = logspace(log10(hf_dt), log10(1 / (2 * hf_dt)) + 0.01, 50)
    omega = 2 * pi * f

    #Output time vector (0 is origin time)
    t = arange(0, total_duration, hf_dt)

    #Projection object for distance calculations
    g = Geod(ellps='WGS84')

    #Create taup velocity model object, paste on top of iaspei91
    #taup_create.build_taup_model(home+project_name+'/structure/bbp_norcal.tvel',output_folder=home+project_name+'/structure/')
    velmod = TauPyModel(model=home + project_name + '/structure/maule',
                        verbose=True)

    #Moments
    slip = (fault[:, 8]**2 + fault[:, 9]**2)**0.5
    subfault_M0 = slip * fault[:, 10] * fault[:, 11] * fault[:, 13]
    subfault_M0 = subfault_M0 * 1e7  #to dyne-cm
    M0 = subfault_M0.sum()
    relative_subfault_M0 = subfault_M0 / M0

    #Corner frequency scaling
    i = where(slip > 0)[0]  #Non-zero faults
    N = len(i)  #number of subfaults
    dl = mean((fault[:, 10] + fault[:, 11]) / 2)  #perdominant length scale
    dl = dl / 1000  # to km

    #Tau=p perturbation
    tau_perturb = 0.1

    #Deep faults receive a higher stress
    stress_multiplier = 3

    #Loop over stations
    for ksta in range(len(lonlat)):

        print '... working on ' + component + ' component semistochastic waveform for station ' + sta[
            ksta]

        #initalize output seismogram
        tr = Trace()
        tr.stats.station = sta[ksta]
        tr.stats.delta = hf_dt
        tr.stats.starttime = time_epi
        hf = zeros(len(t))

        #Loop over subfaults
        for kfault in range(len(fault)):

            #Print status to screen
            if kfault % 150 == 0:
                if kfault == 0:
                    stdout.write('      [')
                    stdout.flush()
                stdout.write('.')
                stdout.flush()
            if kfault == len(fault) - 1:
                stdout.write(']\n')
                stdout.flush()

            #Include only subfaults with non-zero slip
            if subfault_M0[kfault] > 0:

                #Get subfault to station distance
                lon_source = fault[kfault, 1]
                lat_source = fault[kfault, 2]
                azimuth, baz, dist = g.inv(lon_source, lat_source,
                                           lonlat[ksta, 0], lonlat[ksta, 1])
                dist_in_degs = kilometer2degrees(dist / 1000.)

                #Source depth?
                z_source = fault[kfault, 3]

                #No change
                stress = stress_parameter

                #Is subfault in an SMGA?
                #radius_in_km=15.0
                #smga_center_lon=-69.709200
                #smga_center_lat=-19.683600
                #in_smga=is_subfault_in_smga(lon_source,lat_source,smga_center_lon,smga_center_lat,radius_in_km)
                #
                ###Apply multiplier?
                #if in_smga==True:
                #    stress=stress_parameter*stress_multiplier
                #    print "%.4f,%.4f is in SMGA, stress is %d" % (lon_source,lat_source,stress)
                #else:
                #    stress=stress_parameter

                #Apply multiplier?
                #if slip[kfault]>7.5:
                #    stress=stress_parameter*stress_multiplier
                ##elif lon_source>-72.057 and lon_source<-71.2 and lat_source>-30.28:
                ##    stress=stress_parameter*stress_multiplier
                #else:
                #    stress=stress_parameter

                #Apply multiplier?
                #if z_source>high_stress_depth:
                #    stress=stress_parameter*stress_multiplier
                #else:
                #    stress=stress_parameter

                # Frankel 95 scaling of corner frequency #verified this looks the same in GP
                # Right now this applies the same factor to all faults
                fc_scale = (M0) / (N * stress * dl**3 * 1e21)  #Frankel scaling
                small_event_M0 = stress * dl**3 * 1e21

                #Get rho, alpha, beta at subfault depth
                zs = fault[kfault, 3]
                mu, alpha, beta = get_mu(structure, zs, return_speeds=True)
                rho = mu / beta**2

                #Get radiation scale factor
                Spartition = 1 / 2**0.5
                if component == 'N':
                    component_angle = 0
                elif component == 'E':
                    component_angle = 90

                rho = rho / 1000  #to g/cm**3
                beta = (beta / 1000) * 1e5  #to cm/s
                alpha = (alpha / 1000) * 1e5

                #Verified this produces same value as in GP
                CS = (2 * Spartition) / (4 * pi * (rho) * (beta**3))
                CP = 2 / (4 * pi * (rho) * (alpha**3))

                #Get local subfault rupture speed
                beta = beta / 100  #to m/s
                vr = get_local_rupture_speed(zs, beta, rise_time_depths)
                vr = vr / 1000  #to km/s
                dip_factor = get_dip_factor(fault[kfault, 5], fault[kfault, 8],
                                            fault[kfault, 9])

                #Subfault corner frequency
                c0 = 2.0  #GP2015 value
                fc_subfault = (c0 * vr) / (dip_factor * pi * dl)

                #get subfault source spectrum
                #S=((relative_subfault_M0[kfault]*M0/N)*f**2)/(1+fc_scale*(f/fc_subfault)**2)
                S = small_event_M0 * (omega**2 / (1 + (f / fc_subfault)**2))
                frankel_conv_operator = fc_scale * (
                    (fc_subfault**2 + f**2) /
                    (fc_subfault**2 + fc_scale * f**2))
                S = S * frankel_conv_operator

                #get high frequency decay
                P = exp(-pi * kappa * f)

                #get quarter wavelength amplificationf actors
                # pass rho in kg/m^3 (this units nightmare is what I get for following Graves' code)
                I = get_amplification_factors(f, structure, zs, beta,
                                              rho * 1000)

                #Get other geometric parameters necessar for radiation pattern
                strike = fault[kfault, 4]
                dip = fault[kfault, 5]
                ss = fault[kfault, 8]
                ds = fault[kfault, 9]
                rake = rad2deg(arctan2(ds, ss))

                #Get ray paths for all direct S arrivals
                Ppaths = velmod.get_ray_paths(zs,
                                              dist_in_degs,
                                              phase_list=['P', 'p'])

                #Get ray paths for all direct S arrivals
                try:
                    Spaths = velmod.get_ray_paths(zs,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                except:
                    Spaths = velmod.get_ray_paths(zs + tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])

                #sometimes there's no S, weird I know. Check twice.
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs + tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs + 5 * tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs - 5 * tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs + 5 * tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs - 10 * tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs + 10 * tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs - 50 * tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs + 50 * tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs - 75 * tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    Spaths = velmod.get_ray_paths(zs + 75 * tau_perturb,
                                                  dist_in_degs,
                                                  phase_list=['S', 's'])
                if len(Spaths) == 0:
                    print 'ERROR: I give up, no direct S in spite of multiple attempts at subfault ' + str(
                        kfault)

                #Get direct s path and moho reflection
                mohoS = None
                directS = Spaths[0]
                directP = Ppaths[0]
                #print len(Spaths)
                if len(Spaths) == 1:  #only direct S
                    pass
                else:
                    #turn_depth=zeros(len(Spaths)-1) #turning depth of other non-direct rays
                    #for k in range(1,len(Spaths)):
                    #    turn_depth[k-1]=Spaths[k].path['depth'].max()
                    ##If there's a ray that turns within 2km of Moho, callt hat guy the Moho reflection
                    #deltaz=abs(turn_depth-moho_depth_in_km)
                    #i=argmin(deltaz)
                    #if deltaz[i]<2: #Yes, this is a moho reflection
                    #    mohoS=Spaths[i+1]
                    #else:
                    #    mohoS=None
                    mohoS = Spaths[-1]

                #######         Build Direct P ray           ######
                if Pwave == True:
                    take_off_angle_P = directP.takeoff_angle

                    #Get attenuation due to geometrical spreading (from the path length)
                    path_length_P = get_path_length(directP, zs, dist_in_degs)
                    path_length_P = path_length_P * 100  #to cm

                    #Get effect of intrinsic aptimeenuation for that ray (path integrated)
                    Q_P = get_attenuation(f,
                                          structure,
                                          directS,
                                          Qexp,
                                          Qtype='P')

                    #Build the entire path term
                    G_P = (I * Q_P) / path_length_P

                    #Get conically averaged radiation pattern terms
                    RP = conically_avg_P_radiation_pattern(
                        strike, dip, rake, azimuth, take_off_angle_P)
                    RP = abs(RP)

                    #Get partition of Pwave into Z and N,E components
                    incidence_angle = directP.incident_angle
                    Npartition, Epartition, Zpartition = get_P_wave_partition(
                        incidence_angle, azimuth)
                    if component == 'Z':
                        Ppartition = Zpartition
                    elif component == 'N':
                        Ppartition = Npartition
                    else:
                        Ppartition = Epartition

                    #And finally multiply everything together to get the subfault amplitude spectrum
                    AP = CP * S * G_P * P * RP * Ppartition

                    #Generate windowed time series
                    duration = 1. / fc_subfault + 0.09 * (dist / 1000)
                    w = windowed_gaussian(duration,
                                          hf_dt,
                                          window_type='saragoni_hart')

                    #Go to frequency domain, apply amplitude spectrum and ifft for final time series
                    hf_seis_P = apply_spectrum(w, AP, f, hf_dt)

                    #What time after OT should this time series start at?
                    time_insert = directP.path['time'][-1] + onset_times[kfault]
                    i = argmin(abs(t - time_insert))
                    j = i + len(hf_seis_P)

                    #Check seismogram doesn't go past last sample
                    if i < len(
                            hf
                    ) - 1:  #if i (the beginning of the seimogram) is less than the length
                        if j > len(
                                hf
                        ):  #seismogram goes past total_duration length, trim it
                            len_paste = len(hf) - i
                            j = len(hf)
                            #Add seismogram
                            hf[i:j] = hf[i:j] + real(hf_seis_P[0:len_paste])
                        else:  #Lengths are fine
                            hf[i:j] = hf[i:j] + real(hf_seis_P)
                    else:  #Seismogram starts after end of available space
                        pass

                #######         Build Direct S ray           ######
                take_off_angle_S = directS.takeoff_angle

                #Get attenuation due to geometrical spreading (from the path length)
                path_length_S = get_path_length(directS, zs, dist_in_degs)
                path_length_S = path_length_S * 100  #to cm

                #Get effect of intrinsic aptimeenuation for that ray (path integrated)
                Q_S = get_attenuation(f, structure, directS, Qexp)

                #Build the entire path term
                G_S = (I * Q_S) / path_length_S

                #Get conically averaged radiation pattern terms
                if component == 'Z':
                    RP_vert = conically_avg_vert_radiation_pattern(
                        strike, dip, rake, azimuth, take_off_angle_S)
                    #And finally multiply everything together to get the subfault amplitude spectrum
                    AS = CS * S * G_S * P * RP_vert
                else:
                    RP = conically_avg_radiation_pattern(
                        strike, dip, rake, azimuth, take_off_angle_S,
                        component_angle)
                    RP = abs(RP)
                    #And finally multiply everything together to get the subfault amplitude spectrum
                    AS = CS * S * G_S * P * RP

                #Generate windowed time series
                duration = 1. / fc_subfault + 0.063 * (dist / 1000)
                w = windowed_gaussian(duration,
                                      hf_dt,
                                      window_type='saragoni_hart')
                #w=windowed_gaussian(3*duration,hf_dt,window_type='cua',ptime=Ppaths[0].path['time'][-1],stime=Spaths[0].path['time'][-1])

                #Go to frequency domain, apply amplitude spectrum and ifft for final time series
                hf_seis_S = apply_spectrum(w, AS, f, hf_dt)

                #What time after OT should this time series start at?
                time_insert = directS.path['time'][-1] + onset_times[kfault]
                #print 'ts = '+str(time_insert)+' , Td = '+str(duration)
                #time_insert=Ppaths[0].path['time'][-1]
                i = argmin(abs(t - time_insert))
                j = i + len(hf_seis_S)

                #Check seismogram doesn't go past last sample
                if i < len(
                        hf
                ) - 1:  #if i (the beginning of the seimogram) is less than the length
                    if j > len(
                            hf
                    ):  #seismogram goes past total_duration length, trim it
                        len_paste = len(hf) - i
                        j = len(hf)
                        #Add seismogram
                        hf[i:j] = hf[i:j] + real(hf_seis_S[0:len_paste])
                    else:  #Lengths are fine
                        hf[i:j] = hf[i:j] + real(hf_seis_S)
                else:  #Beginning of seismogram is past end of available space
                    pass

                #######         Build Moho reflected S ray           ######
    #            if mohoS==None:
    #                pass
    #            else:
    #                if kfault%100==0:
    #                    print '... ... building Moho reflected S wave'
    #                take_off_angle_mS=mohoS.takeoff_angle
    #
    #                #Get attenuation due to geometrical spreading (from the path length)
    #                path_length_mS=get_path_length(mohoS,zs,dist_in_degs)
    #                path_length_mS=path_length_mS*100 #to cm
    #
    #                #Get effect of intrinsic aptimeenuation for that ray (path integrated)
    #                Q_mS=get_attenuation(f,structure,mohoS,Qexp)
    #
    #                #Build the entire path term
    #                G_mS=(I*Q_mS)/path_length_mS
    #
    #                #Get conically averaged radiation pattern terms
    #                if component=='Z':
    #                    RP_vert=conically_avg_vert_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_mS)
    #                    #And finally multiply everything together to get the subfault amplitude spectrum
    #                    A=C*S*G_mS*P*RP_vert
    #                else:
    #                    RP=conically_avg_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_mS,component_angle)
    #                    RP=abs(RP)
    #                    #And finally multiply everything together to get the subfault amplitude spectrum
    #                    A=C*S*G_mS*P*RP
    #
    #                #Generate windowed time series
    #                duration=1./fc_subfault+0.063*(dist/1000)
    #                w=windowed_gaussian(duration,hf_dt,window_type='saragoni_hart')
    #                #w=windowed_gaussian(3*duration,hf_dt,window_type='cua',ptime=Ppaths[0].path['time'][-1],stime=Spaths[0].path['time'][-1])
    #
    #                #Go to frequency domain, apply amplitude spectrum and ifft for final time series
    #                hf_seis=apply_spectrum(w,A,f,hf_dt)
    #
    #                #What time after OT should this time series start at?
    #                time_insert=mohoS.path['time'][-1]+onset_times[kfault]
    #                #print 'ts = '+str(time_insert)+' , Td = '+str(duration)
    #                #time_insert=Ppaths[0].path['time'][-1]
    #                i=argmin(abs(t-time_insert))
    #                j=i+len(hf_seis)
    #
    #                #Add seismogram
    #                hf[i:j]=hf[i:j]+hf_seis
    #
    #                #Done, reset
    #                mohoS=None

    #Done add to trace and stream
        tr.data = hf / 100  #convert to m/s**2
        st += tr

    return st
Example #51
0
def _read_css(filename, **kwargs):
    """
    Reads a CSS waveform file and returns a Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: CSS file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :returns: Stream with Traces specified by given file.
    """
    # read metafile with info on single traces
    with open(filename, "rb") as fh:
        lines = fh.readlines()
    basedir = os.path.dirname(filename)
    traces = []
    # read single traces
    for line in lines:
        npts = int(line[79:87])
        dirname = line[148:212].strip().decode()
        wfdisc_dfile = line[213:245].strip().decode()
        dfilename = os.path.join(basedir, dirname, wfdisc_dfile)
        offset = int(line[246:256])
        dtype = DTYPE[line[143:145]]
        if isinstance(dtype, tuple):
            read_fmt = np.dtype(dtype[0])
            fmt = dtype[1]
        else:
            read_fmt = np.dtype(dtype)
            fmt = read_fmt

        try:
            # assumed that the waveform file is not compressed
            fh = open(dfilename, "rb")
        except FileNotFoundError as e:
            # If does not find the waveform file referenced in the wfdisc,
            # it will try to open a compressed .gz suffix file instead.
            try:
                fh = gzip.open(dfilename + '.gz', "rb")
            except FileNotFoundError:
                raise e

        # Read one segment of binary data
        fh.seek(offset)
        data = fh.read(read_fmt.itemsize * npts)
        fh.close()
        data = from_buffer(data, dtype=read_fmt)
        data = np.require(data, dtype=fmt)

        header = {}
        header['station'] = line[0:6].strip().decode()
        header['channel'] = line[7:15].strip().decode()
        header['starttime'] = UTCDateTime(float(line[16:33]))
        header['sampling_rate'] = float(line[88:99])
        header['calib'] = float(line[100:116])
        header['calper'] = float(line[117:133])
        tr = Trace(data, header=header)
        traces.append(tr)
    return Stream(traces=traces)
Example #52
0
    def calculate_rfs(self):
        """
        Method to generate receiver functions from displacement traces.

        Returns:
            (list):
                rflist: Stream containing Radial and Transverse receiver functions

        """

        if self.args.rot == 0:
            msg = "Receiver functions cannot be calculated with 'rot == 0'\n"
            raise (Exception(msg))
            return

        if self.args.rot == 1:
            cmpts = ['R', 'T', 'Z']
        elif self.args.rot == 2:
            cmpts = ['V', 'H', 'P']
        else:
            raise (Exception('rotation ID invalid: ' + str(self.args.rot)))

        rflist = []

        # Cycle through list of displacement streams
        for stream in self.streams:

            # Calculate time axis
            npts = stream[0].stats.npts
            taxis = np.arange(-npts / 2., npts / 2.) * stream[0].stats.delta

            # Extract 3-component traces from stream
            rtr = stream.select(component=cmpts[0])[0]
            ttr = stream.select(component=cmpts[1])[0]
            ztr = stream.select(component=cmpts[2])[0]

            # Deep copy and re-initialize data to 0.
            rfr = rtr.copy()
            rfr.data = np.zeros(len(rfr.data))
            rft = ttr.copy()
            rft.data = np.zeros(len(rft.data))

            # Fourier transform
            ft_rfr = fft(rtr.data)
            ft_rft = fft(ttr.data)
            ft_ztr = fft(ztr.data)

            # Spectral division to calculate receiver functions
            if self.args.wvtype == 'P':
                rfr.data = fftshift(np.real(ifft(np.divide(ft_rfr, ft_ztr))))
                rft.data = fftshift(np.real(ifft(np.divide(ft_rft, ft_ztr))))
            elif self.args.wvtype == 'SV':
                rfr.data = fftshift(np.real(ifft(np.divide(-ft_ztr, ft_rfr))))
            elif self.args.wvtype == 'SH':
                rft.data = fftshift(np.real(ifft(np.divide(-ft_ztr, ft_rft))))

            # Update stats
            rfr.stats.channel = 'RF' + cmpts[0]
            rft.stats.channel = 'RF' + cmpts[1]
            rfr.stats.taxis = taxis
            rft.stats.taxis = taxis

            # Store in Stream
            rfstream = Stream(traces=[rfr, rft])

            # Append to list
            rflist.append(rfstream)

        self.rfs = rflist

        return
Example #53
0
 def test_coincidenceTriggerWithSimilarityChecking(self):
     """
     Test network coincidence trigger with cross correlation similarity
     checking of given event templates.
     """
     st = Stream()
     files = ["BW.UH1._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH2._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHN.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHE.D.2010.147.cut.slist.gz",
              "BW.UH4._.EHZ.D.2010.147.cut.slist.gz"]
     for filename in files:
         filename = os.path.join(self.path, filename)
         st += read(filename)
     # some prefiltering used for UH network
     st.filter('bandpass', freqmin=10, freqmax=20)
     # set up template event streams
     times = ["2010-05-27T16:24:33.095000", "2010-05-27T16:27:30.370000"]
     templ = {}
     for t in times:
         t = UTCDateTime(t)
         st_ = st.select(station="UH3").slice(t, t + 2.5).copy()
         templ.setdefault("UH3", []).append(st_)
     times = ["2010-05-27T16:27:30.574999"]
     for t in times:
         t = UTCDateTime(t)
         st_ = st.select(station="UH1").slice(t, t + 2.5).copy()
         templ.setdefault("UH1", []).append(st_)
     trace_ids = {"BW.UH1..SHZ": 1,
                  "BW.UH2..SHZ": 1,
                  "BW.UH3..SHZ": 1,
                  "BW.UH4..EHZ": 1}
     similarity_thresholds = {"UH1": 0.8, "UH3": 0.7}
     with warnings.catch_warnings(record=True) as w:
         # avoid getting influenced by the warning filters getting set up
         # differently in obspy-runtests.
         # (e.g. depending on options "-v" and "-q")
         warnings.resetwarnings()
         trig = coincidenceTrigger(
             "classicstalta", 5, 1, st.copy(), 4, sta=0.5, lta=10,
             trace_ids=trace_ids, event_templates=templ,
             similarity_threshold=similarity_thresholds)
         # two warnings get raised
         self.assertEqual(len(w), 2)
     # check floats in resulting dictionary separately
     self.assertAlmostEqual(trig[0].pop('duration'), 3.9600000381469727)
     self.assertAlmostEqual(trig[1].pop('duration'), 1.9900000095367432)
     self.assertAlmostEqual(trig[2].pop('duration'), 1.9200000762939453)
     self.assertAlmostEqual(trig[3].pop('duration'), 3.9200000762939453)
     self.assertAlmostEqual(trig[0]['similarity'].pop('UH1'), 0.94149447384)
     self.assertAlmostEqual(trig[0]['similarity'].pop('UH3'), 1)
     self.assertAlmostEqual(trig[1]['similarity'].pop('UH1'), 0.65228204570)
     self.assertAlmostEqual(trig[1]['similarity'].pop('UH3'), 0.72679293429)
     self.assertAlmostEqual(trig[2]['similarity'].pop('UH1'), 0.89404458774)
     self.assertAlmostEqual(trig[2]['similarity'].pop('UH3'), 0.74581409371)
     self.assertAlmostEqual(trig[3]['similarity'].pop('UH1'), 1)
     self.assertAlmostEqual(trig[3]['similarity'].pop('UH3'), 1)
     remaining_results = \
         [{'coincidence_sum': 4.0,
           'similarity': {},
           'stations': ['UH3', 'UH2', 'UH1', 'UH4'],
           'time': UTCDateTime(2010, 5, 27, 16, 24, 33, 210000),
           'trace_ids': ['BW.UH3..SHZ', 'BW.UH2..SHZ', 'BW.UH1..SHZ',
                         'BW.UH4..EHZ']},
          {'coincidence_sum': 3.0,
           'similarity': {},
           'stations': ['UH3', 'UH1', 'UH2'],
           'time': UTCDateTime(2010, 5, 27, 16, 25, 26, 710000),
           'trace_ids': ['BW.UH3..SHZ', 'BW.UH1..SHZ', 'BW.UH2..SHZ']},
          {'coincidence_sum': 3.0,
           'similarity': {},
           'stations': ['UH2', 'UH1', 'UH3'],
           'time': UTCDateTime(2010, 5, 27, 16, 27, 2, 260000),
           'trace_ids': ['BW.UH2..SHZ', 'BW.UH1..SHZ', 'BW.UH3..SHZ']},
          {'coincidence_sum': 4.0,
           'similarity': {},
           'stations': ['UH3', 'UH2', 'UH1', 'UH4'],
           'time': UTCDateTime(2010, 5, 27, 16, 27, 30, 510000),
           'trace_ids': ['BW.UH3..SHZ', 'BW.UH2..SHZ', 'BW.UH1..SHZ',
                         'BW.UH4..EHZ']}]
     self.assertTrue(trig == remaining_results)
def main():
    parser = ArgumentParser(prog='seedlink_plotter',
                            description='Plot a realtime seismogram of a station',
                            formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '-s', '--seedlink_streams', type=str, required=True,
        help='The seedlink stream selector string. It has the format '
             '"stream1[:selectors1],stream2[:selectors2],...", with "stream" '
             'in "NETWORK"_"STATION" format and "selector" a space separated '
             'list of "LOCATION""CHANNEL", e.g. '
             '"IU_KONO:BHE BHN,MN_AQU:HH?.D".')
    parser.add_argument(
        '--scale', type=int, help='the scale to apply on data ex:50000', required=False)

    # Real-time parameters
    parser.add_argument('--seedlink_server', type=str,
                        help='the seedlink server to connect to with port. "\
                        "ex: rtserver.ipgp.fr:18000 ', required=True)
    parser.add_argument(
        '--x_scale', type=_parse_time_with_suffix_to_minutes,
        help='the number of minute to plot per line'
             ' The following suffixes can be used as well: "s" for seconds, '
             '"m" for minutes, "h" for hours and "d" for days.',
        default=60)
    parser.add_argument('-b', '--backtrace_time',
                        help='the number of seconds to plot (3600=1h,86400=24h). The '
                        'following suffixes can be used as well: "m" for minutes, '
                        '"h" for hours and "d" for days.', required=True,
                        type=_parse_time_with_suffix_to_seconds)
    parser.add_argument('--x_position', type=int,
                        help='the x position of the graph', required=False, default=0)
    parser.add_argument('--y_position', type=int,
                        help='the y position of the graph', required=False, default=0)
    parser.add_argument(
        '--x_size', type=int, help='the x size of the graph', required=False, default=800)
    parser.add_argument(
        '--y_size', type=int, help='the y size of the graph', required=False, default=600)
    parser.add_argument(
        '--title_size', type=int, help='the title size of each station in multichannel', required=False, default=10)
    parser.add_argument(
        '--time_legend_size', type=int, help='the size of time legend in multichannel', required=False, default=10)
    parser.add_argument(
        '--tick_format', type=str, help='the tick format of time legend ', required=False, default=None)
    parser.add_argument(
        '--time_tick_nb', type=int, help='the number of time tick', required=False)
    parser.add_argument(
        '--without-decoration', required=False, action='store_true',
        help=('the graph window will have no decorations. that means the '
              'window is not controlled by the window manager and can only '
              'be closed by killing the respective process.'))
    parser.add_argument(
        '--line_plot', help='regular real time plot for single station', required=False, action='store_true')
    parser.add_argument(
        '--rainbow', help='', required=False, action='store_true')
    parser.add_argument(
        '--nb_rainbow_colors', help='the numbers of colors for rainbow mode', required=False, default=10)
    parser.add_argument(
        '--update_time',
        help='time in seconds between each graphic update.'
        ' The following suffixes can be used as well: "s" for seconds, '
        '"m" for minutes, "h" for hours and "d" for days.',
        required=False, default=10,
        type=_parse_time_with_suffix_to_seconds)
    parser.add_argument('--events', required=False, default=None, type=float,
                        help='plot events using obspy.neries, specify minimum magnitude')
    parser.add_argument(
        '--events_update_time', required=False, default=10,
        help='time in minutes between each event data update. '
             ' The following suffixes can be used as well: "s" for seconds, '
             '"m" for minutes, "h" for hours and "d" for days.',
        type=_parse_time_with_suffix_to_minutes)
    parser.add_argument('-f', '--fullscreen', default=False,
                        action="store_true",
                        help='set to full screen on startup')
    parser.add_argument('-v', '--verbose', default=False,
                        action="store_true", dest="verbose",
                        help='show verbose debugging output')
    parser.add_argument('--force', default=False, action="store_true",
                        help='skip warning message and confirmation prompt '
                             'when opening a window without decoration')
    # parse the arguments
    args = parser.parse_args()

    if args.verbose:
        loglevel = logging.DEBUG
    else:
        loglevel = logging.CRITICAL
    logging.basicConfig(level=loglevel)

    # before anything else: warn user about window without decoration
    if args.without_decoration and not args.force:
        warning_ = ("Warning: You are about to open a window without "
                    "decoration that is not controlled via your Window "
                    "Manager. You can exit with <Ctrl>-C (as long as you do "
                    "not switch to another window with e.g. <Alt>-<Tab>)."
                    "\n\nType 'y' to continue.. ")
        if input_func(warning_) != "y":
            print("Aborting.")
            sys.exit()

    now = UTCDateTime()
    stream = Stream()
    events = Catalog()
    lock = threading.Lock()

    # cl is the seedlink client
    seedlink_client = SeedlinkUpdater(stream, myargs=args, lock=lock)
    if OBSPY_VERSION < [1, 0]:
        seedlink_client.slconn.setSLAddress(args.seedlink_server)
    else:
        seedlink_client.slconn.set_sl_address(args.seedlink_server)
    seedlink_client.multiselect = args.seedlink_streams

    # tes if drum plot or line plot
    if any([x in args.seedlink_streams for x in ", ?*"]) or args.line_plot:
        drum_plot = False
        if args.time_tick_nb is None:
            args.time_tick_nb = 5
        if args.tick_format is None:
            args.tick_format = '%H:%M:%S'
        round_start = UTCDateTime(now.year, now.month, now.day, now.hour, 0, 0)
        round_start = round_start + 3600 - args.backtrace_time
        seedlink_client.begin_time = (round_start).format_seedlink()

    else:
        drum_plot = True
        if args.time_tick_nb is None:
            args.time_tick_nb = 13
        if args.tick_format is None:
            args.tick_format = '%d/%m/%y %Hh'
    seedlink_client.begin_time = (now - args.backtrace_time).format_seedlink()

    seedlink_client.initialize()
    ids = seedlink_client.getTraceIDs()
    # start cl in a thread
    thread = threading.Thread(target=seedlink_client.run)
    thread.setDaemon(True)
    thread.start()

    # start another thread for event updating if requested
    if args.events is not None:
        event_updater = EventUpdater(
            stream=stream, events=events, myargs=args, lock=lock)
        thread = threading.Thread(target=event_updater.run)
        thread.setDaemon(True)
        thread.start()

    # Wait few seconds to get data for the first plot
    time.sleep(2)

    master = SeedlinkPlotter(stream=stream, events=events, myargs=args,
                             lock=lock, drum_plot=drum_plot,
                             trace_ids=ids)
    master.mainloop()
Example #55
0
        f1 = open('tmp1', 'r')
        f2 = open('tmp2', 'w')
        for line in f1:
            f2.write(line.replace('x', ''))
        f1.close()
        f2.close()

        #Now read the data
        gps = genfromtxt('tmp2')

        #Delete temporary files
        remove('tmp1')
        remove('tmp2')

        #Initalize obspy stream object
        n = Stream(Trace())
        e = Stream(Trace())
        u = Stream(Trace())

        #Fill gaps with zeros
        t = gps[:, 0]
        print 'dt=' + str(dt)
        gap_positions = where(diff(t) > dt)[0] + 1
        print str(len(gap_positions) + 1) + ' segments (' + str(
            len(gap_positions)) + ' gaps) found'
        if len(gap_positions) > 0:  #There are gaps
            for i in range(len(gap_positions)):
                if i == 0:
                    #Fill with data (first trace)
                    n[0].data = gps[0:gap_positions[0],
                                    2] / 100  #It's in cm, convert to m
Example #56
0
    def getWaveformNSCL(self, seedname, starttime, duration):
        """
        Gets a regular expression of channels from a start time for a duration
        in seconds. The regular expression must represent all characters of
        the 12-character NNSSSSSCCCLL pattern e.g. "US.....[BSHE]HZ.." is
        valid, but "US.....[BSHE]H" is not. Complex regular expressions are
        permitted "US.....BHZ..|CU.....[BH]HZ.."

        .. rubric:: Notes

        For detailed information regarding the usage of regular expressions
        in the query, see also the documentation for CWBQuery ("CWBQuery.doc")
        available at ftp://hazards.cr.usgs.gov/CWBQuery/.
        Using ".*" regular expression might or might not work. If the 12
        character seed name regular expression is less than 12 characters it
        might get padded with spaces on the server side.

        :type seedname: str
        :param seedname: The 12 character seedname or 12 character regexp
            matching channels
        :type start: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param start: The starting date/time to get
        :type duration: float
        :param duration: The duration in seconds to get
        :rtype: :class:`~obspy.core.stream.Stream`
        :returns: Stream object with requested data

        .. rubric:: Example

        >>> from obspy.neic import Client
        >>> from obspy import UTCDateTime
        >>> client = Client()
        >>> t = UTCDateTime() - 5 * 3600  # 5 hours before now
        >>> st = client.getWaveformNSCL("IUANMO BH.00", t, 10)
        >>> print st  # doctest: +ELLIPSIS
        3 Trace(s) in Stream:
        IU.ANMO.00.BH... | 20.0 Hz, 201 samples
        IU.ANMO.00.BH... | 20.0 Hz, 201 samples
        IU.ANMO.00.BH... | 20.0 Hz, 201 samples
        """
        start = str(UTCDateTime(starttime)).replace("T", " ").replace("Z", "")
        line = "'-dbg' '-s' '%s' '-b' '%s' '-d' '%s'\t" % \
            (seedname, start, duration)
        if self.debug:
            print ascdate() + " " + asctime() + " line=" + line
        success = False
        while not success:
            try:
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                with NamedTemporaryFile() as tf:
                    if self.debug:
                        print ascdate(), asctime(), "connecting temp file", \
                            tf.name
                    s.connect((self.host, self.port))
                    s.setblocking(0)
                    s.send(line)
                    if self.debug:
                        print ascdate(), asctime(), "Connected - start reads"
                    slept = 0
                    maxslept = self.timeout / 0.05
                    totlen = 0
                    while True:
                        try:
                            data = s.recv(102400)
                            if self.debug:
                                print ascdate(), asctime(), "read len", \
                                    str(len(data)), " total", str(totlen)
                            if data.find("EOR") >= 0:
                                if self.debug:
                                    print ascdate(), asctime(), "<EOR> seen"
                                tf.write(data[0:data.find("<EOR>")])
                                totlen += len(data[0:data.find("<EOR>")])
                                tf.seek(0)
                                try:
                                    st = read(tf.name, 'MSEED')
                                except Exception, e:
                                    st = Stream()
                                st.trim(starttime, starttime + duration)
                                s.close()
                                success = True
                                break
                            else:
                                totlen += len(data)
                                tf.write(data)
                                slept = 0
                        except socket.error as e:
                            if slept > maxslept:
                                print ascdate(), asctime(), \
                                    "Timeout on connection", \
                                    "- try to reconnect"
                                slept = 0
                                s.close()
                            sleep(0.05)
                            slept += 1
Example #57
0
def _read_segy(filename,
               headonly=False,
               byteorder=None,
               textual_header_encoding=None,
               unpack_trace_headers=False,
               **kwargs):  # @UnusedVariable
    """
    Reads a SEG Y file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SEG Y rev1 file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the header and omit the waveform
        data.
    :type byteorder: str or ``None``
    :param byteorder: Determines the endianness of the file. Either ``'>'`` for
        big endian or ``'<'`` for little endian. If it is ``None``, it will try
        to autodetect the endianness. The endianness is always valid for the
        whole file. Defaults to ``None``.
    :type textual_header_encoding: str or ``None``
    :param textual_header_encoding: The encoding of the textual header. Can be
        ``'EBCDIC'``, ``'ASCII'`` or ``None``. If it is ``None``, autodetection
        will be attempted. Defaults to ``None``.
    :type unpack_trace_headers: bool, optional
    :param unpack_trace_headers: Determines whether or not all trace header
        values will be unpacked during reading. If ``False`` it will greatly
        enhance performance and especially memory usage with large files. The
        header values can still be accessed and will be calculated on the fly
        but tab completion will no longer work. Look in the headers.py for a
        list of all possible trace header values. Defaults to ``False``.
    :returns: A ObsPy :class:`~obspy.core.stream.Stream` object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/00001034.sgy_first_trace")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    Seq. No. in line:    1 | 2009-06-22T14:47:37.000000Z - ... 2001 samples
    """
    # Read file to the internal segy representation.
    segy_object = _read_segyrev1(
        filename,
        endian=byteorder,
        textual_header_encoding=textual_header_encoding,
        unpack_headers=unpack_trace_headers)
    # Create the stream object.
    stream = Stream()
    # SEGY has several file headers that apply to all traces. They will be
    # stored in Stream.stats.
    stream.stats = AttribDict()
    # Get the textual file header.
    textual_file_header = segy_object.textual_file_header
    # The binary file header will be a new AttribDict
    binary_file_header = AttribDict()
    for key, value in segy_object.binary_file_header.__dict__.items():
        setattr(binary_file_header, key, value)
    # Get the data encoding and the endianness from the first trace.
    data_encoding = segy_object.traces[0].data_encoding
    endian = segy_object.traces[0].endian
    textual_file_header_encoding = segy_object.textual_header_encoding.upper()
    # Add the file wide headers.
    stream.stats.textual_file_header = textual_file_header
    stream.stats.binary_file_header = binary_file_header
    # Also set the data encoding, endianness and the encoding of the
    # textual_file_header.
    stream.stats.data_encoding = data_encoding
    stream.stats.endian = endian
    stream.stats.textual_file_header_encoding = \
        textual_file_header_encoding

    # Convert traces to ObsPy Trace objects.
    for tr in segy_object.traces:
        stream.append(
            tr.to_obspy_trace(headonly=headonly,
                              unpack_trace_headers=unpack_trace_headers))

    return stream
Example #58
0
def backproject(home,
                project_name,
                run_name,
                model_name,
                fault_name,
                stations_list,
                traveltimes_name,
                time_epi,
                Tmax,
                stack_order=4):
    '''
    Form the waveform stacks and back rooject toe ach source as a function of
    time. The output will be a stream object with one trace that contains the 
    stack at each source point
    '''

    from numpy import genfromtxt, load
    from obspy import read, Stream, Trace
    from mudpy.strong_motion import Nstack

    #laod npz file with travel times
    t = load(home + project_name + '/travel_times/' + traveltimes_name +
             '.npz')
    delay_times = t[t.files[0]]

    #load sources
    sources = genfromtxt(home + project_name + '/data/model_info/' +
                         fault_name)

    #get station paths
    station_paths = genfromtxt(home + project_name + '/data/station_info/' +
                               stations_list,
                               usecols=3,
                               dtype='S')

    #read data
    for ksite in range(len(station_paths)):

        if ksite == 0:
            st = read(station_paths[0])
        else:
            st += read(station_paths[ksite])

        #trim and remove baseline
        st[ksite].trim(starttime=time_epi,
                       endtime=time_epi + Tmax,
                       pad=True,
                       fill_value=0)

    #Form stack for each source point
    stack = Stream(Trace())

    for ksource in range(len(sources)):

        #This gets reinitalized for each new source
        st_for_stack = st.copy()

        print('... working on stack %d of %d ' % (ksource, len(sources)))

        for ksite in range(len(st)):

            #get current delay time
            dt = delay_times[ksource, ksite]

            #delay waveform by cropping the right amount of samples
            N_crop_samples = int(dt / st[ksite].stats.delta)
            data = st[ksite].data
            data = data[N_crop_samples:-1]

            #place back in a trace object
            st_for_stack[ksite].data = data

            #Trim to pad ends with zeros
            st_for_stack[ksite].trim(endtime=st[0].stats.endtime,
                                     pad=True,
                                     fill_value=0)

        #Form the stack
        if ksource == 0:
            stack[0] = Nstack(st_for_stack, N=stack_order, normalize=True)
        else:
            stack += Nstack(st_for_stack, N=stack_order, normalize=True)

    stack.write(home + project_name + '/output/models/' + run_name +
                '.stacks.mseed',
                fomrat='MSEED')
Example #59
0
def _read_ah2(filename):
    """
    Reads an AH v2 waveform file and returns a Stream object.

    :type filename: str
    :param filename: AH v2 file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :returns: Stream with Traces specified by given file.
    """
    def _unpack_trace(data):
        ah_stats = AttribDict({
            'version': '2.0',
            'event': AttribDict(),
            'station': AttribDict(),
            'record': AttribDict(),
            'extras': []
        })

        # station info
        data.unpack_int()  # undocumented extra int?
        ah_stats.station.code = _unpack_string(data)
        data.unpack_int()  # here too?
        ah_stats.station.channel = _unpack_string(data)
        data.unpack_int()  # and again?
        ah_stats.station.type = _unpack_string(data)
        ah_stats.station.recorder = _unpack_string(data)
        ah_stats.station.sensor = _unpack_string(data)
        ah_stats.station.azimuth = data.unpack_float()  # degrees E from N
        ah_stats.station.dip = data.unpack_float()  # up = -90, down = +90
        ah_stats.station.latitude = data.unpack_double()
        ah_stats.station.longitude = data.unpack_double()
        ah_stats.station.elevation = data.unpack_float()
        ah_stats.station.gain = data.unpack_float()
        ah_stats.station.normalization = data.unpack_float()  # A0

        npoles = data.unpack_int()
        ah_stats.station.poles = []
        for _i in range(npoles):
            r = data.unpack_float()
            i = data.unpack_float()
            ah_stats.station.poles.append(complex(r, i))

        nzeros = data.unpack_int()
        ah_stats.station.zeros = []
        for _i in range(nzeros):
            r = data.unpack_float()
            i = data.unpack_float()
            ah_stats.station.zeros.append(complex(r, i))
        ah_stats.station.comment = _unpack_string(data)

        # event info
        ah_stats.event.latitude = data.unpack_double()
        ah_stats.event.longitude = data.unpack_double()
        ah_stats.event.depth = data.unpack_float()
        ot_year = data.unpack_int()
        ot_mon = data.unpack_int()
        ot_day = data.unpack_int()
        ot_hour = data.unpack_int()
        ot_min = data.unpack_int()
        ot_sec = data.unpack_float()
        try:
            ot = UTCDateTime(ot_year, ot_mon, ot_day, ot_hour, ot_min, ot_sec)
        except Exception:
            ot = None
        ah_stats.event.origin_time = ot
        data.unpack_int()  # and again?
        ah_stats.event.comment = _unpack_string(data)

        # record info
        ah_stats.record.type = dtype = data.unpack_int()  # data type
        ah_stats.record.ndata = ndata = data.unpack_uint()  # number of samples
        ah_stats.record.delta = data.unpack_float()  # sampling interval
        ah_stats.record.max_amplitude = data.unpack_float()
        at_year = data.unpack_int()
        at_mon = data.unpack_int()
        at_day = data.unpack_int()
        at_hour = data.unpack_int()
        at_min = data.unpack_int()
        at_sec = data.unpack_float()
        at = UTCDateTime(at_year, at_mon, at_day, at_hour, at_min, at_sec)
        ah_stats.record.start_time = at
        ah_stats.record.units = _unpack_string(data)
        ah_stats.record.inunits = _unpack_string(data)
        ah_stats.record.outunits = _unpack_string(data)
        data.unpack_int()  # and again?
        ah_stats.record.comment = _unpack_string(data)
        data.unpack_int()  # and again?
        ah_stats.record.log = _unpack_string(data)

        # user attributes
        nusrattr = data.unpack_int()
        ah_stats.usrattr = {}
        for _i in range(nusrattr):
            key = _unpack_string(data)
            value = _unpack_string(data)
            ah_stats.usrattr[key] = value

        # unpack data using dtype from record info
        if dtype == 1:
            # float
            temp = data.unpack_farray(ndata, data.unpack_float)
        elif dtype == 6:
            # double
            temp = data.unpack_farray(ndata, data.unpack_double)
        else:
            # e.g. 3 (vector), 2 (complex), 4 (tensor)
            msg = 'Unsupported AH v2 record type %d'
            raise NotImplementedError(msg % (dtype))

        tr = Trace(np.array(temp))
        tr.stats.ah = ah_stats
        tr.stats.delta = ah_stats.record.delta
        tr.stats.starttime = ah_stats.record.start_time
        tr.stats.station = ah_stats.station.code
        tr.stats.channel = ah_stats.station.channel
        return tr

    st = Stream()
    with open(filename, "rb") as fh:
        # loop as long we can read records
        while True:
            try:
                # read first 8 bytes with XDR library
                data = xdrlib.Unpacker(fh.read(8))
                # check magic version number
                magic = data.unpack_int()
            except EOFError:
                break
            if magic != 1100:
                raise Exception('Not a AH v2 file')
            try:
                # get record length
                length = data.unpack_uint()
                # read rest of record into XDR unpacker
                data = xdrlib.Unpacker(fh.read(length))
                tr = _unpack_trace(data)
                st.append(tr)
            except EOFError:
                break
        return st
Example #60
0
def _read_ah1(filename):
    """
    Reads an AH v1 waveform file and returns a Stream object.

    :type filename: str
    :param filename: AH v1 file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :returns: Stream with Traces specified by given file.
    """
    def _unpack_trace(data):
        ah_stats = AttribDict({
            'version': '1.0',
            'event': AttribDict(),
            'station': AttribDict(),
            'record': AttribDict(),
            'extras': []
        })

        # station info
        ah_stats.station.code = _unpack_string(data)
        ah_stats.station.channel = _unpack_string(data)
        ah_stats.station.type = _unpack_string(data)
        ah_stats.station.latitude = data.unpack_float()
        ah_stats.station.longitude = data.unpack_float()
        ah_stats.station.elevation = data.unpack_float()
        ah_stats.station.gain = data.unpack_float()
        ah_stats.station.normalization = data.unpack_float()  # A0
        poles = []
        zeros = []
        for _i in range(0, 30):
            r = data.unpack_float()
            i = data.unpack_float()
            poles.append(complex(r, i))
            r = data.unpack_float()
            i = data.unpack_float()
            zeros.append(complex(r, i))
        # first value describes number of poles/zeros
        npoles = int(poles[0].real) + 1
        nzeros = int(zeros[0].real) + 1
        ah_stats.station.poles = poles[1:npoles]
        ah_stats.station.zeros = zeros[1:nzeros]

        # event info
        ah_stats.event.latitude = data.unpack_float()
        ah_stats.event.longitude = data.unpack_float()
        ah_stats.event.depth = data.unpack_float()
        ot_year = data.unpack_int()
        ot_mon = data.unpack_int()
        ot_day = data.unpack_int()
        ot_hour = data.unpack_int()
        ot_min = data.unpack_int()
        ot_sec = data.unpack_float()
        try:
            ot = UTCDateTime(ot_year, ot_mon, ot_day, ot_hour, ot_min, ot_sec)
        except Exception:
            ot = None
        ah_stats.event.origin_time = ot
        ah_stats.event.comment = _unpack_string(data)

        # record info
        ah_stats.record.type = dtype = data.unpack_int()  # data type
        ah_stats.record.ndata = ndata = data.unpack_uint()  # number of samples
        ah_stats.record.delta = data.unpack_float()  # sampling interval
        ah_stats.record.max_amplitude = data.unpack_float()
        at_year = data.unpack_int()
        at_mon = data.unpack_int()
        at_day = data.unpack_int()
        at_hour = data.unpack_int()
        at_min = data.unpack_int()
        at_sec = data.unpack_float()
        at = UTCDateTime(at_year, at_mon, at_day, at_hour, at_min, at_sec)
        ah_stats.record.start_time = at
        ah_stats.record.abscissa_min = data.unpack_float()
        ah_stats.record.comment = _unpack_string(data)
        ah_stats.record.log = _unpack_string(data)

        # extras
        ah_stats.extras = data.unpack_array(data.unpack_float)

        # unpack data using dtype from record info
        if dtype == 1:
            # float
            temp = data.unpack_farray(ndata, data.unpack_float)
        elif dtype == 6:
            # double
            temp = data.unpack_farray(ndata, data.unpack_double)
        else:
            # e.g. 3 (vector), 2 (complex), 4 (tensor)
            msg = 'Unsupported AH v1 record type %d'
            raise NotImplementedError(msg % (dtype))
        tr = Trace(np.array(temp))
        tr.stats.ah = ah_stats
        tr.stats.delta = ah_stats.record.delta
        tr.stats.starttime = ah_stats.record.start_time
        tr.stats.station = ah_stats.station.code
        tr.stats.channel = ah_stats.station.channel
        return tr

    st = Stream()
    with open(filename, "rb") as fh:
        # read with XDR library
        data = xdrlib.Unpacker(fh.read())
        # loop as long we can read records
        while True:
            try:
                tr = _unpack_trace(data)
                st.append(tr)
            except EOFError:
                break
        return st