def test_info_empty(): # Seismogram and TimeSeries seis = Seismogram() seis.set_live() assert len(seis.elog.get_error_log()) == 0 logging_helper.info(seis, 'dummy_func', '1') assert len(seis.elog.get_error_log()) == 1
def test_ExtractComponent(): seis = Seismogram() seis.live = 1 seis.data = dmatrix(np.random.rand(3, 6)) seis.npts = 6 ts = [] for i in range(3): ts.append(ExtractComponent(seis, i)) for i in range(3): assert (ts[i].data == seis.data[i]).all()
def test_Ensemble(Ensemble): md = Metadata() md['double'] = 3.14 md['bool'] = True md['long'] = 7 es = Ensemble(md, 3) if isinstance(es, TimeSeriesEnsemble): d = TimeSeries(10) d = make_constant_data_ts(d) es.member.append(d) es.member.append(d) es.member.append(d) else: d = Seismogram(10) d = make_constant_data_seis(d) es.member.append(d) es.member.append(d) es.member.append(d) es.sync_metadata(['double', 'long']) assert es.member[0].is_defined('bool') assert es.member[0]['bool'] == True assert not es.member[0].is_defined('double') assert not es.member[0].is_defined('long') es.sync_metadata() assert es.member[1].is_defined('double') assert es.member[1].is_defined('long') assert es.member[1]['double'] == 3.14 assert es.member[1]['long'] == 7 es.update_metadata(Metadata({'k': 'v'})) assert es['k'] == 'v'
def makeseis(): """ Builds Seismogram object used in this tutorial. Components have amplitudes scaled by 1, 2, and 3 for 0, 1, and 2 respectively. """ d = Seismogram() setbasics(d, 1000) y = rickerwave(2.0, 0.005) ny = len(y) # this algorithm only works because calls to set_npts is setbasics # initialize data array to all zeros. Output will not look right if # ny>1000 but parameters above assure that isn't so. Just be careful # editing parameters to rickerwave for k in range(3): for i in range(min(ny, 1000)): d.data[k, i] = y[i] / float(k + 1) return d
def test_clear_aliases(self): ss = Seismogram() ss.erase('starttime') assert not ss.is_defined('starttime') ss['t0'] = 0 self.mdschema.Seismogram.clear_aliases(ss) assert not ss.is_defined('t0') assert ss.is_defined('starttime')
def test_clear_aliases(self): ss = Seismogram() ss.erase("starttime") assert not ss.is_defined("starttime") ss["t0"] = 0 self.mdschema.Seismogram.clear_aliases(ss) assert not ss.is_defined("t0") assert ss.is_defined("starttime")
def test_windowdata(): npts=1000 ts=TimeSeries() setbasics(ts,npts) for i in range(npts): ts.data[i]=float(i) t3c=Seismogram() setbasics(t3c,npts) for k in range(3): for i in range(npts): t3c.data[k,i]=100*(k+1)+float(i) win=TimeWindow(2,3) d=WindowData(ts,win) print('t y') for j in range(d.npts): print(d.time(j),d.data[j]) assert(len(d.data) == 101) assert(d.t0==2.0) assert(d.endtime() == 3.0) d=WindowData(t3c,win) print('t x0 x1 x2') for j in range(d.npts): print(d.time(j),d.data[0,j],d.data[1,j],d.data[2,j]) assert(d.data.columns() == 101) assert(d.t0==2.0) assert(d.endtime() == 3.0) print('testing error handling') t3c.kill() d=WindowData(t3c,win) assert(d.npts == 1000 and (not d.live)) d=WindowData(ts,win,preserve_history=True) print('Error message posted') print(d.elog.get_error_log()) assert(d.elog.size() == 1) # this still throws an error but the message will be different d=WindowData(ts,win,preserve_history=True,instance='0') print('Error message posted') print(d.elog.get_error_log()) assert(d.elog.size() == 1)
def makeseisens(d, n=20, moveout=True, moveout_dt=0.05): """ Makes a TimeSeries ensemble as copies of d. If moveout is true applies a linear moveout to members using moveout_dt times count of member in ensemble. """ result = SeismogramEnsemble() for i in range(n): y = Seismogram(d) # this makes a required deep copy if (moveout): y.t0 += float(i) * moveout_dt result.member.append(y) return result
def convolve_wavelet(d, w): """ Convolves wavelet w with 3C data stored in Seismogram object d to create simulated data d*w. Returns a copy of d with the data matrix replaced by the convolved data. """ dsim = Seismogram(d) # We use scipy convolution routine which requires we copy the # data out of the d.data container one channel at a time. wavelet = [] n = w.npts for i in range(n): wavelet.append(w.data[i]) for k in range(3): work = [] n = d.npts for i in range(n): work.append(d.data[k, i]) # Warning important to put work first and wavelet second in this call # or timing will be foobarred work = signal.convolve(work, wavelet) for i in range(n): dsim.data[k, i] = work[i] return dsim
def addnoise(d, nscale=1.0, padlength=1024, npoles=3, corners=[0.1, 1.0]): """ Helper function to add noise to Seismogram d. The approach is a little weird in that we shift the data to the right by padlength adding filtered random data to the front of the signal. The padding is compensated by changes to t0 to preserve relative time 0.0. The purpose of this is to allow adding colored noise to a simulated 3C seismogram. :param d: Seismogram data to which noise is to be added and padded :param nscale: noise scale for gaussian normal noise :param padlength: data padded on front by this many sample of noise :param npoles: number of poles for butterworth filter :param corners: 2 component array with corner frequencies for butterworth bandpass. """ nd = d.npts n = nd + padlength result = Seismogram(d) result.set_npts(n) newt0 = d.t0 - d.dt * padlength result.set_t0(newt0) # at the time this code was written we had a hole in the ccore # api wherein operator+ and operator+= were not defined. Hence in this # loop we stack the noise and signal inline. for k in range(3): dnoise = nscale * randn(n) sos = signal.butter(npoles, corners, btype='bandpass', output='sos', fs=20.0) nfilt = signal.sosfilt(sos, dnoise) for i in range(n): result.data[k, i] = nfilt[i] for i in range(nd): t = d.time(i) ii = result.sample_number(t) # We don't test range here because we know we won't # go outside bounds because of the logic of this function result.data[k, ii] += d.data[k, i] return result
def _deepcopy(self, d): """ Private helper method for immediately above. Necessary because copy.deepcopy doesn't work with our pybind11 wrappers. There may be a fix, but for now we have to use copy constructors specific to each object type. """ if (isinstance(d, TimeSeries)): return TimeSeries(d) elif (isinstance(d, Seismogram)): return Seismogram(d) elif (isinstance(d, TimeSeriesEnsemble)): return TimeSeriesEnsemble(d) elif (isinstance(d, SeismogramEnsemble)): return SeismogramEnsemble(d) else: raise RuntimeError( "SeismicPlotter._deepcopy: received and unsupported data type=", type(d))
def test_Ensemble(Ensemble): md = Metadata() md["double"] = 3.14 md["bool"] = True md["long"] = 7 es = Ensemble(md, 3) if isinstance(es, TimeSeriesEnsemble): d = TimeSeries(10) d = make_constant_data_ts(d) es.member.append(d) es.member.append(d) es.member.append(d) else: d = Seismogram(10) d = make_constant_data_seis(d) es.member.append(d) es.member.append(d) es.member.append(d) es.set_live( ) # new method for LoggingEnsemble needed because default is dead es.sync_metadata(["double", "long"]) assert es.member[0].is_defined("bool") assert es.member[0]["bool"] == True assert not es.member[0].is_defined("double") assert not es.member[0].is_defined("long") es.sync_metadata() assert es.member[1].is_defined("double") assert es.member[1].is_defined("long") assert es.member[1]["double"] == 3.14 assert es.member[1]["long"] == 7 es.update_metadata(Metadata({"k": "v"})) assert es["k"] == "v" # From here on we test features not in CoreEnsemble but only in # LoggingEnsemble. Note that we use pybind11 aliasing to # define TimeSeriesEnsemble == LoggingEnsemble<TimeSeries> and # SeismogramEnsemble == LoggingEnsemble<Seismogram>. # Should be initially marked live assert es.live() es.elog.log_error("test_ensemble", "test complaint", ErrorSeverity.Complaint) es.elog.log_error("test_ensemble", "test invalid", ErrorSeverity.Invalid) assert es.elog.size() == 2 assert es.live() es.kill() assert es.dead() # resurrect es es.set_live() assert es.live() # validate checks for for any live members - this tests that feature assert es.validate() # need this temporary copy for the next test_ if isinstance(es, TimeSeriesEnsemble): escopy = TimeSeriesEnsemble(es) else: escopy = SeismogramEnsemble(es) for d in escopy.member: d.kill() assert not escopy.validate() # Reuse escopy for pickle test escopy = pickle.loads(pickle.dumps(es)) assert escopy.is_defined("bool") assert escopy["bool"] == True assert escopy.is_defined("double") assert escopy.is_defined("long") assert escopy["double"] == 3.14 assert escopy["long"] == 7 assert escopy.live() assert escopy.elog.size() == 2 assert escopy.member[0].is_defined("bool") assert escopy.member[0]["bool"] == True assert escopy.member[0].is_defined("double") assert escopy.member[0].is_defined("long") assert es.member[1].is_defined("double") assert es.member[1].is_defined("long") assert es.member[1]["double"] == 3.14 assert es.member[1]["long"] == 7 if isinstance(es, TimeSeriesEnsemble): assert es.member[1].data == escopy.member[1].data else: assert (es.member[1].data[:] == escopy.member[1].data[:]).all()
def test_operators(): d = _CoreTimeSeries(10) d1 = make_constant_data_ts(d, nsamp=10) dsave = _CoreTimeSeries(d1) d = _CoreTimeSeries(6) d2 = make_constant_data_ts(d, t0=-0.2, nsamp=6, val=2.0) dsave = _CoreTimeSeries(d1) d1 += d2 assert np.allclose(d1.data, [3, 3, 3, 3, 1, 1, 1, 1, 1, 1]) d1 = _CoreTimeSeries(dsave) d = d1 + d2 assert np.allclose(d.data, [3, 3, 3, 3, 1, 1, 1, 1, 1, 1]) d1 = _CoreTimeSeries(dsave) d1 *= 2.5 assert np.allclose(d1.data, [2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5]) d3 = TimeSeries(10) d4 = TimeSeries(6) d3 = make_constant_data_ts(d3, nsamp=10) d4 = make_constant_data_ts(d4, t0=-0.2, nsamp=6, val=2.0) dsave = _CoreTimeSeries(d3) d3 = TimeSeries(dsave) d3 += d4 assert np.allclose(d3.data, [3, 3, 3, 3, 1, 1, 1, 1, 1, 1]) d3 = TimeSeries(dsave) d = d3 + d4 assert np.allclose(d.data, [3, 3, 3, 3, 1, 1, 1, 1, 1, 1]) d1 = _CoreTimeSeries(dsave) d3 = TimeSeries(dsave) d3 *= 2.5 assert np.allclose(d3.data, [2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5]) x = np.linspace(-0.7, 1.2, 20) for t in x: d3 = TimeSeries(dsave) d4.t0 = t d3 += d4 # These are selected asserts of the incremental test above # visually d4 moves through d3 as the t0 value advance. Assert # tests end member: skewed left, inside, and skewed right d3 = TimeSeries(dsave) d4.t0 = -0.7 # no overlap test d3 += d4 assert np.allclose(d3.data, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) d3 = TimeSeries(dsave) d4.t0 = -0.3 # overlap left d3 += d4 assert np.allclose(d3.data, [3, 3, 3, 1, 1, 1, 1, 1, 1, 1]) d3 = TimeSeries(dsave) d4.t0 = 0.3 # d4 inside d3 test d3 += d4 assert np.allclose(d3.data, [1, 1, 1, 3, 3, 3, 3, 3, 3, 1]) d3 = TimeSeries(dsave) d4.t0 = 0.7 # partial overlap right d3 += d4 assert np.allclose(d3.data, [1, 1, 1, 1, 1, 1, 1, 3, 3, 3]) d3 = TimeSeries(dsave) d4.t0 = 1.0 # no overlap test right d3 += d4 assert np.allclose(d3.data, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) # Repeat the same test for Seismogram objects # This section is edited cut-paste of above # Intentionally do not test _CoreSeismogram directly because # currently if it works for Seismogram it will for _CoreSeismogram d = _CoreSeismogram(10) d1 = make_constant_data_seis(d, nsamp=10) dsave = _CoreSeismogram(d1) d = _CoreSeismogram(6) d2 = make_constant_data_seis(d, t0=-0.2, nsamp=6, val=2.0) dsave = _CoreSeismogram(d1) d1 += d2 assert np.allclose( d1.data, np.array([ [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ]), ) d1 = _CoreSeismogram(dsave) d = d1 + d2 assert np.allclose( d.data, np.array([ [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ]), ) d1 = _CoreSeismogram(dsave) d1 *= 2.5 assert np.allclose( d1.data, np.array([ [2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5], [2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5], [2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5], ]), ) d3 = Seismogram(10) d4 = Seismogram(6) d3 = make_constant_data_seis(d3, nsamp=10) d4 = make_constant_data_seis(d4, t0=-0.2, nsamp=6, val=2.0) dsave = Seismogram(d3) d3 += d4 assert np.allclose( d3.data, np.array([ [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ]), ) d3 = Seismogram(dsave) d = d3 + d4 assert np.allclose( d.data, np.array([ [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ]), ) d3 = Seismogram(dsave) d3 *= 2.5 assert np.allclose( d1.data, np.array([ [2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5], [2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5], [2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5], ]), ) x = np.linspace(-0.7, 1.2, 20) for t in x: d3 = Seismogram(dsave) d4.t0 = t d3 += d4 # These are selected asserts of the incremental test above # visually d4 moves through d3 as the t0 value advance. Assert # tests end member: skewed left, inside, and skewed right d3 = Seismogram(dsave) d4.t0 = -0.7 # no overlap test d3 += d4 assert np.allclose( d3.data, np.array([ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ]), ) d3 = Seismogram(dsave) d4.t0 = -0.3 # overlap left d3 += d4 assert np.allclose( d3.data, np.array([ [3, 3, 3, 1, 1, 1, 1, 1, 1, 1], [3, 3, 3, 1, 1, 1, 1, 1, 1, 1], [3, 3, 3, 1, 1, 1, 1, 1, 1, 1], ]), ) d3 = Seismogram(dsave) d4.t0 = 0.3 # d4 inside d3 test d3 += d4 assert np.allclose( d3.data, np.array([ [1, 1, 1, 3, 3, 3, 3, 3, 3, 1], [1, 1, 1, 3, 3, 3, 3, 3, 3, 1], [1, 1, 1, 3, 3, 3, 3, 3, 3, 1], ]), ) d3 = Seismogram(dsave) d4.t0 = 0.7 # partial overlap right d3 += d4 assert np.allclose( d3.data, np.array([ [1, 1, 1, 1, 1, 1, 1, 3, 3, 3], [1, 1, 1, 1, 1, 1, 1, 3, 3, 3], [1, 1, 1, 1, 1, 1, 1, 3, 3, 3], ]), ) d3 = Seismogram(dsave) d4.t0 = 1.0 # no overlap test right d3 += d4 assert np.allclose( d3.data, np.array([ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ]), ) # Repeat exactly for - test but different numeric results # just omit *= tests d = _CoreTimeSeries(10) d1 = make_constant_data_ts(d, nsamp=10) dsave = _CoreTimeSeries(d1) d = _CoreTimeSeries(6) d2 = make_constant_data_ts(d, t0=-0.2, nsamp=6, val=2.0) dsave = _CoreTimeSeries(d1) d1 -= d2 assert np.allclose(d1.data, [-1, -1, -1, -1, 1, 1, 1, 1, 1, 1]) d1 = _CoreTimeSeries(dsave) d = d1 - d2 assert np.allclose(d.data, [-1, -1, -1, -1, 1, 1, 1, 1, 1, 1]) d3 = TimeSeries(10) d4 = TimeSeries(6) d3 = make_constant_data_ts(d3, nsamp=10) d4 = make_constant_data_ts(d4, t0=-0.2, nsamp=6, val=2.0) dsave = _CoreTimeSeries(d3) d3 = TimeSeries(dsave) d3 -= d4 assert np.allclose(d3.data, [-1, -1, -1, -1, 1, 1, 1, 1, 1, 1]) d3 = TimeSeries(dsave) d = d3 - d4 assert np.allclose(d.data, [-1, -1, -1, -1, 1, 1, 1, 1, 1, 1]) x = np.linspace(-0.7, 1.2, 20) for t in x: d3 = TimeSeries(dsave) d4.t0 = t d3 -= d4 # These are selected asserts of the incremental test above # visually d4 moves through d3 as the t0 value advance. Assert # tests end member: skewed left, inside, and skewed right d3 = TimeSeries(dsave) d4.t0 = -0.7 # no overlap test d3 -= d4 assert np.allclose(d3.data, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) d3 = TimeSeries(dsave) d4.t0 = -0.3 # overlap left d3 -= d4 assert np.allclose(d3.data, [-1, -1, -1, 1, 1, 1, 1, 1, 1, 1]) d3 = TimeSeries(dsave) d4.t0 = 0.3 # d4 inside d3 test d3 -= d4 assert np.allclose(d3.data, [1, 1, 1, -1, -1, -1, -1, -1, -1, 1]) d3 = TimeSeries(dsave) d4.t0 = 0.7 # partial overlap right d3 -= d4 assert np.allclose(d3.data, [1, 1, 1, 1, 1, 1, 1, -1, -1, -1]) d3 = TimeSeries(dsave) d4.t0 = 1.0 # no overlap test right d3 -= d4 assert np.allclose(d3.data, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) # Repeat the same test for Seismogram objects # This section is edited cut-paste of above # Intentionally do not test _CoreSeismogram directly because # currently if it works for Seismogram it will for _CoreSeismogram d = _CoreSeismogram(10) d1 = make_constant_data_seis(d, nsamp=10) dsave = _CoreSeismogram(d1) d = _CoreSeismogram(6) d2 = make_constant_data_seis(d, t0=-0.2, nsamp=6, val=2.0) dsave = _CoreSeismogram(d1) d1 -= d2 assert np.allclose( d1.data, np.array([ [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ]), ) d1 = _CoreSeismogram(dsave) d = d1 - d2 assert np.allclose( d.data, np.array([ [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ]), ) d3 = Seismogram(10) d4 = Seismogram(6) d3 = make_constant_data_seis(d3, nsamp=10) d4 = make_constant_data_seis(d4, t0=-0.2, nsamp=6, val=2.0) dsave = Seismogram(d3) d3 -= d4 assert np.allclose( d3.data, np.array([ [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ]), ) d3 = Seismogram(dsave) d = d3 - d4 assert np.allclose( d.data, np.array([ [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ]), ) x = np.linspace(-0.7, 1.2, 20) for t in x: d3 = Seismogram(dsave) d4.t0 = t d3 -= d4 # These are selected asserts of the incremental test above # visually d4 moves through d3 as the t0 value advance. Assert # tests end member: skewed left, inside, and skewed right d3 = Seismogram(dsave) d4.t0 = -0.7 # no overlap test d3 -= d4 assert np.allclose( d3.data, np.array([ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ]), ) d3 = Seismogram(dsave) d4.t0 = -0.3 # overlap left d3 -= d4 assert np.allclose( d3.data, np.array([ [-1, -1, -1, 1, 1, 1, 1, 1, 1, 1], [-1, -1, -1, 1, 1, 1, 1, 1, 1, 1], [-1, -1, -1, 1, 1, 1, 1, 1, 1, 1], ]), ) d3 = Seismogram(dsave) d4.t0 = 0.3 # d4 inside d3 test d3 -= d4 assert np.allclose( d3.data, np.array([ [1, 1, 1, -1, -1, -1, -1, -1, -1, 1], [1, 1, 1, -1, -1, -1, -1, -1, -1, 1], [1, 1, 1, -1, -1, -1, -1, -1, -1, 1], ]), ) d3 = Seismogram(dsave) d4.t0 = 0.7 # partial overlap right d3 -= d4 assert np.allclose( d3.data, np.array([ [1, 1, 1, 1, 1, 1, 1, -1, -1, -1], [1, 1, 1, 1, 1, 1, 1, -1, -1, -1], [1, 1, 1, 1, 1, 1, 1, -1, -1, -1], ]), ) d3 = Seismogram(dsave) d4.t0 = 1.0 # no overlap test right d3 -= d4 assert np.allclose( d3.data, np.array([ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ]), )
def test_transform(): seis = Seismogram() seis.npts = 100 seis.t0 = 0.0 seis.dt = 0.001 seis.live = 1 for i in range(3): for j in range(100): if i == 0: seis.data[i, j] = 1.0 else: seis.data[i, j] = 0.0 seis.data[0, 1] = 1.0 seis.data[0, 2] = 1.0 seis.data[0, 3] = 0.0 seis.data[1, 1] = 1.0 seis.data[1, 2] = 1.0 seis.data[1, 3] = 0.0 seis.data[2, 1] = 1.0 seis.data[2, 2] = 0.0 seis.data[2, 3] = 1.0 a = np.zeros((3, 3)) a[0][0] = 1.0 a[0][1] = 1.0 a[0][2] = 1.0 a[1][0] = -1.0 a[1][1] = 1.0 a[1][2] = 1.0 a[2][0] = 0.0 a[2][1] = -1.0 a[2][2] = 0.0 seis1 = transform(seis, a) assert all(np.isclose(seis1.data[:, 0], [1, -1, 0])) assert all(np.isclose(seis1.data[:, 1], [3, 1, -1])) assert all(np.isclose(seis1.data[:, 2], [2, 0, -1])) assert all(np.isclose(seis1.data[:, 3], [1, 1, 0])) seis2 = rotate_to_standard(seis1) assert all(np.isclose(seis2.data[:, 0], [1, 0, 0])) assert all(np.isclose(seis2.data[:, 1], [1, 1, 1])) assert all(np.isclose(seis2.data[:, 2], [1, 1, 0])) assert all(np.isclose(seis2.data[:, 3], [0, 0, 1])) uvec = SlownessVector() uvec.ux = 0.17085 # cos(-20deg)/5.5 uvec.uy = -0.062185 # sin(-20deg)/5.5 seis3 = free_surface_transformation(seis2, uvec, 5.0, 3.5) assert (np.isclose( seis3.tmatrix, np.array([ [-0.171012, -0.469846, 0], [0.115793, -0.0421458, 0.445447], [-0.597975, 0.217647, 0.228152], ]), )).all() # test with invalid uvec, but inplace return seis4 = free_surface_transformation(seis2, SlownessVector(1.0, 1.0, 0.0), 5.0, 3.5) assert seis4
def test_Seismogram(): seis = Seismogram() seis.npts = 100 assert seis.data.rows() == 3 assert seis.data.columns() == 100 seis.t0 = 0.0 seis.dt = 0.001 seis.live = 1 seis.tref = TimeReferenceType.Relative seis.data = dmatrix(np.random.rand(3, 6)) assert seis.npts != 6 seis.sync_npts() assert seis.npts == 6 assert seis.npts == seis["npts"] seis.npts = 4 assert seis.data.columns() == 4 seis.npts = 10 assert (seis.data[0:3, 4:10] == 0).all() seis.data = dmatrix(np.random.rand(3, 100)) seis.sync_npts() seis_copy = pickle.loads(pickle.dumps(seis)) assert seis_copy.t0 == seis.t0 assert seis_copy.dt == seis.dt assert seis_copy.live == seis.live assert seis_copy.tref == seis.tref assert (seis_copy.data[:] == seis.data[:]).all() # test the += operator seis1 = Seismogram(seis) seis2 = Seismogram(seis) seis1 += seis2 assert (np.isclose(seis1.data[:], seis.data + seis.data)).all() seis.npts = 0 assert seis.data.rows() == 0 seis.npts = 100 for i in range(3): for j in range(100): if i == 0: seis.data[i, j] = 1.0 else: seis.data[i, j] = 0.0 seis.data[0, 1] = 1.0 seis.data[0, 2] = 1.0 seis.data[0, 3] = 0.0 seis.data[1, 1] = 1.0 seis.data[1, 2] = 1.0 seis.data[1, 3] = 0.0 seis.data[2, 1] = 1.0 seis.data[2, 2] = 0.0 seis.data[2, 3] = 1.0 sc = SphericalCoordinate() sc.phi = 0.0 sc.theta = np.pi / 4 seis.rotate(sc) assert all(np.isclose(seis.data[:, 3], [0, -0.707107, 0.707107])) seis.rotate_to_standard() assert all(seis.data[:, 3] == [0, 0, 1]) sc.phi = -np.pi / 4 seis.data[:, 3] = sc.unit_vector seis.rotate(sc) assert all(seis.data[:, 3] == [0, 0, 1]) seis.rotate_to_standard() assert all(np.isclose(seis.data[:, 3], [0.5, -0.5, 0.707107])) seis.data[:, 3] = [0, 0, 1] nu = [np.sqrt(3.0) / 3.0, np.sqrt(3.0) / 3.0, np.sqrt(3.0) / 3.0] seis.rotate(nu) assert (np.isclose( seis.tmatrix, np.array([ [0.70710678, -0.70710678, 0.0], [0.40824829, 0.40824829, -0.81649658], [0.57735027, 0.57735027, 0.57735027], ]), )).all() assert all(np.isclose(seis.data[:, 0], [0.707107, 0.408248, 0.57735])) assert all(np.isclose(seis.data[:, 1], [0, 0, 1.73205])) seis.rotate_to_standard() assert all(np.isclose(seis.data[:, 0], [1, 0, 0])) assert all(np.isclose(seis.data[:, 1], [1, 1, 1])) nu = [np.sqrt(3.0) / 3.0, np.sqrt(3.0) / 3.0, np.sqrt(3.0) / 3.0] seis.rotate(SphericalCoordinate(nu)) assert (np.isclose( seis.tmatrix, np.array([ [0.70710678, -0.70710678, 0.0], [0.40824829, 0.40824829, -0.81649658], [0.57735027, 0.57735027, 0.57735027], ]), )).all() assert all(np.isclose(seis.data[:, 0], [0.707107, 0.408248, 0.57735])) assert all(np.isclose(seis.data[:, 1], [0, 0, 1.73205])) seis.rotate_to_standard() assert all(np.isclose(seis.data[:, 0], [1, 0, 0])) assert all(np.isclose(seis.data[:, 1], [1, 1, 1])) sc.phi = np.pi / 4 sc.theta = 0.0 seis.rotate(sc) assert (np.isclose( seis.tmatrix, np.array([ [0.70710678, -0.70710678, 0.0], [0.70710678, 0.70710678, 0.0], [0.0, 0.0, 1.0], ]), )).all() assert all(np.isclose(seis.data[:, 0], [0.707107, 0.707107, 0])) assert all(np.isclose(seis.data[:, 1], [0, 1.41421, 1])) assert all(np.isclose(seis.data[:, 2], [0, 1.41421, 0])) assert all(np.isclose(seis.data[:, 3], [0, 0, 1])) seis.rotate_to_standard() # test for serialization of SphericalCoordinate sc_copy = pickle.loads(pickle.dumps(sc)) assert sc_copy.radius == sc.radius assert sc_copy.theta == sc.theta assert sc_copy.phi == sc.phi a = np.zeros((3, 3)) a[0][0] = 1.0 a[0][1] = 1.0 a[0][2] = 1.0 a[1][0] = -1.0 a[1][1] = 1.0 a[1][2] = 1.0 a[2][0] = 0.0 a[2][1] = -1.0 a[2][2] = 0.0 seis.transform(a) assert all(np.isclose(seis.data[:, 0], [1, -1, 0])) assert all(np.isclose(seis.data[:, 1], [3, 1, -1])) assert all(np.isclose(seis.data[:, 2], [2, 0, -1])) assert all(np.isclose(seis.data[:, 3], [1, 1, 0])) seis_copy = pickle.loads(pickle.dumps(seis)) seis_copy.rotate_to_standard() assert all(np.isclose(seis_copy.data[:, 0], [1, 0, 0])) assert all(np.isclose(seis_copy.data[:, 1], [1, 1, 1])) assert all(np.isclose(seis_copy.data[:, 2], [1, 1, 0])) assert all(np.isclose(seis_copy.data[:, 3], [0, 0, 1])) seis.rotate_to_standard() seis.rotate(np.pi / 4) seis.transform(a) assert (np.isclose( seis.tmatrix, np.array([[1.41421, 0.0, 1], [0.0, 1.41421, 1], [-0.707107, -0.707107, 0]]), )).all() assert all(np.isclose(seis.data[:, 0], [1.41421, 0, -0.707107])) assert all(np.isclose(seis.data[:, 1], [2.41421, 2.41421, -1.41421])) assert all(np.isclose(seis.data[:, 2], [1.41421, 1.41421, -1.41421])) assert all(np.isclose(seis.data[:, 3], [1, 1, 0])) seis.rotate_to_standard() assert all(np.isclose(seis.data[:, 0], [1, 0, 0])) assert all(np.isclose(seis.data[:, 1], [1, 1, 1])) assert all(np.isclose(seis.data[:, 2], [1, 1, 0])) assert all(np.isclose(seis.data[:, 3], [0, 0, 1])) uvec = SlownessVector() uvec.ux = 0.17085 # cos(-20deg)/5.5 uvec.uy = -0.062185 # sin(-20deg)/5.5 seis.free_surface_transformation(uvec, 5.0, 3.5) assert (np.isclose( seis.tmatrix, np.array([ [-0.171012, -0.469846, 0], [0.115793, -0.0421458, 0.445447], [-0.597975, 0.217647, 0.228152], ]), )).all() seis.tmatrix = a assert (seis.tmatrix == a).all() # test for serialization of SlownessVector uvec_copy = pickle.loads(pickle.dumps(uvec)) assert uvec_copy.ux == uvec.ux assert uvec_copy.uy == uvec.uy assert uvec_copy.azimuth() == uvec.azimuth()
def setup_function(function): ts_size = 255 sampling_rate = 20.0 function.dict1 = { "network": "IU", "station": "ANMO", "starttime": obspy.UTCDateTime(2019, 12, 31, 23, 59, 59, 915000), "npts": ts_size, "sampling_rate": sampling_rate, "channel": "BHE", "live": True, "_id": bson.objectid.ObjectId(), "jdate": obspy.UTCDateTime(2019, 12, 31, 23, 59, 59, 915000), "date_str": obspy.UTCDateTime(2019, 12, 31, 23, 59, 59, 915000), "not_defined_date": obspy.UTCDateTime(2019, 12, 31, 23, 59, 59, 915000), } function.dict2 = { "network": "IU", "station": "ANMO", "starttime": obspy.UTCDateTime(2019, 12, 31, 23, 59, 59, 915000), "npts": ts_size, "sampling_rate": sampling_rate, "channel": "BHN", } function.dict3 = { "network": "IU", "station": "ANMO", "starttime": obspy.UTCDateTime(2019, 12, 31, 23, 59, 59, 915000), "npts": ts_size, "sampling_rate": sampling_rate, "channel": "BHZ", } function.tr1 = obspy.Trace(data=np.random.randint(0, 1000, ts_size), header=function.dict1) function.tr2 = obspy.Trace(data=np.random.randint(0, 1000, ts_size), header=function.dict2) function.tr3 = obspy.Trace(data=np.random.randint(0, 1000, ts_size), header=function.dict3) function.stream = obspy.Stream( traces=[function.tr1, function.tr2, function.tr3]) function.md1 = Metadata() function.md1.put("network", "IU") function.md1.put("npts", ts_size) function.md1.put("sampling_rate", sampling_rate) function.md1.put("live", True) function.ts1 = TimeSeries() function.ts1.data = DoubleVector(np.random.rand(ts_size)) function.ts1.live = True function.ts1.dt = 1 / sampling_rate function.ts1.t0 = 0 function.ts1.npts = ts_size # TODO: need to bind the constructor that can do TimeSeries(md1) function.ts1.put("net", "IU") function.ts1.put("npts", ts_size) function.ts1.put("sampling_rate", sampling_rate) function.seismogram = Seismogram() # TODO: the default of seismogram.tref is UTC which is inconsistent with the default # for TimeSeries() # TODO: It would be nice to have dmatrix support numpy.ndarray as input function.seismogram.data = dmatrix(3, ts_size) for i in range(3): for j in range(ts_size): function.seismogram.data[i, j] = np.random.rand() function.seismogram.live = True function.seismogram.dt = 1 / sampling_rate function.seismogram.t0 = 0 function.seismogram.npts = ts_size # FIXME: if the following key is network, the Seismogram2Stream will error out # when calling TimeSeries2Trace internally due to the issue when mdef.is_defined(k) # returns True but k is an alias, the mdef.type(k) will error out. function.seismogram.put("net", "IU") function.seismogram.put("npts", ts_size) function.seismogram.put("sampling_rate", sampling_rate)
def test_apply_aliases(self): ss = Seismogram() alias_dic = {'delta': 'd', 'npts': 'n', 'starttime': 's'} self.mdschema.Seismogram.apply_aliases(ss, alias_dic) assert not ss.is_defined('delta') assert not ss.is_defined('npts') assert not ss.is_defined('starttime') assert ss.is_defined('d') assert ss.is_defined('n') assert ss.is_defined('s') assert self.mdschema.Seismogram.unique_name('d') == 'delta' assert self.mdschema.Seismogram.unique_name('n') == 'npts' assert self.mdschema.Seismogram.unique_name('s') == 'starttime' self.mdschema.Seismogram.clear_aliases(ss) assert ss.is_defined('delta') assert ss.is_defined('npts') assert ss.is_defined('starttime') self.mdschema.Seismogram.apply_aliases(ss, 'python/tests/data/alias.yaml') assert not ss.is_defined('delta') assert not ss.is_defined('npts') assert not ss.is_defined('starttime') assert ss.is_defined('dd') assert ss.is_defined('nn') assert ss.is_defined('ss') with pytest.raises(MsPASSError, match='is not recognized'): self.mdschema.Seismogram.apply_aliases(ss, 123)
def test_apply_aliases(self): ss = Seismogram() alias_dic = {"delta": "d", "npts": "n", "starttime": "s"} self.mdschema.Seismogram.apply_aliases(ss, alias_dic) assert not ss.is_defined("delta") assert not ss.is_defined("npts") assert not ss.is_defined("starttime") assert ss.is_defined("d") assert ss.is_defined("n") assert ss.is_defined("s") assert self.mdschema.Seismogram.unique_name("d") == "delta" assert self.mdschema.Seismogram.unique_name("n") == "npts" assert self.mdschema.Seismogram.unique_name("s") == "starttime" self.mdschema.Seismogram.clear_aliases(ss) assert ss.is_defined("delta") assert ss.is_defined("npts") assert ss.is_defined("starttime") self.mdschema.Seismogram.apply_aliases(ss, "python/tests/data/alias.yaml") assert not ss.is_defined("delta") assert not ss.is_defined("npts") assert not ss.is_defined("starttime") assert ss.is_defined("dd") assert ss.is_defined("nn") assert ss.is_defined("ss") with pytest.raises(MsPASSError, match="is not recognized"): self.mdschema.Seismogram.apply_aliases(ss, 123)
def test_scale(): dts=_CoreTimeSeries(9) dir=setbasics(dts,9) d3c=_CoreSeismogram(5) setbasics(d3c,5) dts.data[0]=3.0 dts.data[1]=2.0 dts.data[2]=-4.0 dts.data[3]=1.0 dts.data[4]=-100.0 dts.data[5]=-1.0 dts.data[6]=5.0 dts.data[7]=1.0 dts.data[8]=-6.0 # MAD o=f above should be 2 # perf of 0.8 should be 4 # rms should be just over 10=10.010993957 print('Starting tests for time series data of amplitude functions') ampmad=MADAmplitude(dts) print('MAD amplitude estimate=',ampmad) assert(ampmad==3.0) amprms=RMSAmplitude(dts) print('RMS amplitude estimate=',amprms) assert(round(amprms,2)==100.46) amppeak=PeakAmplitude(dts) ampperf80=PerfAmplitude(dts,0.8) print('Peak amplitude=',amppeak) print('80% clip level amplitude=',ampperf80) assert(amppeak==100.0) assert(ampperf80==6.0) print('Starting comparable tests for 3c data') d3c.data[0,0]=3.0 d3c.data[0,1]=2.0 d3c.data[1,2]=-4.0 d3c.data[2,3]=1.0 d3c.data[0,4]=np.sqrt(2)*(100.0) d3c.data[1,4]=-np.sqrt(2)*(100.0) ampmad=MADAmplitude(d3c) print('MAD amplitude estimate=',ampmad) amprms=RMSAmplitude(d3c) print('RMS amplitude estimate=',amprms) amppeak=PeakAmplitude(d3c) ampperf60=PerfAmplitude(d3c,0.6) print('Peak amplitude=',amppeak) print('60% clip level amplitude=',ampperf60) assert(amppeak==200.0) assert(ampperf60==4.0) assert(ampmad==3.0) amptest=round(amprms,2) assert(amptest==89.48) print('Trying scaling functions for TimeSeries') # we need a deep copy here since scaling changes the data d2=TimeSeries(dts) amp=_scale(d2,ScalingMethod.Peak,1.0) print('Computed peak amplitude=',amp) print(d2.data) d2=TimeSeries(dts) amp=_scale(d2,ScalingMethod.Peak,10.0) print('Computed peak amplitude with peak set to 10=',amp) print(d2.data) assert(amp==100.0) assert(d2.data[4]==-10.0) print('verifying scale has modified and set calib correctly') calib=d2.get_double('calib') assert(calib==10.0) d2=TimeSeries(dts) d2.put('calib',6.0) print('test 2 with MAD metric and initial calib of 6') amp=_scale(d2,ScalingMethod.MAD,1.0) calib=d2.get_double('calib') print('New calib value set=',calib) assert(calib==18.0) print('Testing 3C scale functions') d=Seismogram(d3c) amp=_scale(d,ScalingMethod.Peak,1.0) print('Peak amplitude returned by scale funtion=',amp) calib=d.get_double('calib') print('Calib value retrieved (assumed inital 1.0)=',calib) print('Testing python scale function wrapper - first on a TimeSeries with defaults') d2=TimeSeries(dts) amp=scale(d2) print('peak amplitude returned =',amp[0]) assert(amp[0]==100.0) d=Seismogram(d3c) amp=scale(d) print('peak amplitude returned test Seismogram=',amp[0]) assert(amp[0]==200.0) print('starting tests of scale on ensembles') print('first test TimeSeriesEnemble with 5 scaled copies of same vector used earlier in this test') ens=TimeSeriesEnsemble() scls=[2.0,4.0,1.0,10.0,5.0] # note 4 s the median of this vector npts=dts.npts for i in range(5): d=TimeSeries(dts) for k in range(npts): d.data[k]*=scls[i] d.put('calib',1.0) ens.member.append(d) # work on a copy because scaling alters data in place enscpy=TimeSeriesEnsemble(ens) amps=scale(enscpy) print('returned amplitudes for members scaled individually') for i in range(5): print(amps[i]) assert(amps[i]==100.0*scls[i]) enscpy=TimeSeriesEnsemble(ens) amp=scale(enscpy,scale_by_section=True) print('average amplitude=',amp[0]) #assert(amp[0]==4.0) avgamp=amp[0] for i in range(5): calib=enscpy.member[i].get_double("calib") print('member number ',i,' calib is ',calib) assert(round(calib)==400.0) #print(enscpy.member[i].data) # similar test for SeismogramEnsemble npts=d3c.npts ens=SeismogramEnsemble() for i in range(5): d=Seismogram(d3c) for k in range(3): for j in range(npts): d.data[k,j]*=scls[i] d.put('calib',1.0) ens.member.append(d) print('Running comparable tests on SeismogramEnsemble') enscpy=SeismogramEnsemble(ens) amps=scale(enscpy) print('returned amplitudes for members scaled individually') for i in range(5): print(amps[i]) assert(round(amps[i])==round(200.0*scls[i])) print('Trying section scaling of same data') enscpy=SeismogramEnsemble(ens) amp=scale(enscpy,scale_by_section=True) print('average amplitude=',amp[0]) assert(round(amp[0])==800.0) avgamp=amp[0] for i in range(5): calib=enscpy.member[i].get_double("calib") print('member number ',i,' calib is ',calib) assert(round(calib)==800.0)
def make_impulse_data(n=1024, dt=0.05, t0=-5.0): # Compute lag for spike at time=0 lag0 = int(-t0 / dt) z = make_impulse_vector([lag0], [150.0], n) rf_lags = (lag0, lag0 + 50, lag0 + 60, lag0 + 150, lag0 + 180) amps1 = (10.0, 20.0, -60.0, -3.0, 2.0) amps2 = (-15.0, 30.0, 10.0, -20.0, 15.0) ns = make_impulse_vector(rf_lags, amps1, n) ew = make_impulse_vector(rf_lags, amps2, n) d = Seismogram(n) d.set_t0(t0) d.set_dt(dt) d.set_live() d.tref = TimeReferenceType.Relative for i in range(n): d.data[0, i] = ew[i] d.data[1, i] = ns[i] d.data[2, i] = z[i] return d
def get_live_seismogram(ts_size=255, sampling_rate=20.0): seis = Seismogram() seis.set_live() seis.set_as_origin("test", "0", "0", AtomicType.SEISMOGRAM) seis.dt = 1 / sampling_rate seis.npts = ts_size # seis.put('net', 'IU') seis.put("npts", ts_size) seis.put("sampling_rate", sampling_rate) seis.tref = TimeReferenceType.UTC seis.t0 = 0 seis["delta"] = 0.1 seis["calib"] = 0.1 seis["site_id"] = bson.objectid.ObjectId() seis["source_id"] = bson.objectid.ObjectId() seis.data = dmatrix(3, ts_size) for i in range(3): for j in range(ts_size): seis.data[i, j] = np.random.rand() return seis
def get_live_seismogram(): seis = Seismogram() seis.set_live() seis.set_as_origin('test', '0', '0', AtomicType.SEISMOGRAM) seis.dt = 1 / sampling_rate seis.npts = ts_size # seis.put('net', 'IU') seis.put('npts', ts_size) seis.put('sampling_rate', sampling_rate) seis.tref = TimeReferenceType.UTC seis.t0 = 0 seis['delta'] = 0.1 seis['calib'] = 0.1 seis['site_id'] = bson.objectid.ObjectId() seis['channel_id'] = [bson.objectid.ObjectId()] seis['source_id'] = bson.objectid.ObjectId() seis.data = dmatrix(3, ts_size) for i in range(3): for j in range(ts_size): seis.data[i, j] = np.random.rand() return seis
def test_rotate(): seis = Seismogram() seis.npts = 100 seis.t0 = 0.0 seis.dt = 0.001 seis.live = 1 for i in range(3): for j in range(100): if i == 0: seis.data[i, j] = 1.0 else: seis.data[i, j] = 0.0 seis.data[0, 1] = 1.0 seis.data[0, 2] = 1.0 seis.data[0, 3] = 0.0 seis.data[1, 1] = 1.0 seis.data[1, 2] = 1.0 seis.data[1, 3] = 0.0 seis.data[2, 1] = 1.0 seis.data[2, 2] = 0.0 seis.data[2, 3] = 1.0 sc = SphericalCoordinate() sc.phi = 0.0 sc.theta = np.pi / 4 seis2 = rotate(seis, sc) assert all(np.isclose(seis2.data[:, 3], [0, -0.707107, 0.707107])) seis3 = rotate_to_standard(seis2) assert all(np.isclose(seis3.data[:, 3], [0, 0, 1]))
def Stream2Seismogram(st, master=0, cardinal=False, azimuth="azimuth", dip="dip"): """ Convert obspy Stream to a Seismogram. Convert an obspy Stream object with 3 components to a mspass::Seismogram (three-component data) object. This implementation actually converts each component first to a TimeSeries and then calls a C++ function to assemble the complete Seismogram. This has some inefficiencies, but the assumption is this function is called early on in a processing chain to build a raw data set. :param st: input obspy Stream object. The object MUST have exactly 3 components or the function will throw a AssertionError exception. The program is less dogmatic about start times and number of samples as these are handled by the C++ function this python script calls. Be warned, however, that the C++ function can throw a MsPASSrror exception that should be handled separately. :param master: a Seismogram is an assembly of three channels composed created from three TimeSeries/Trace objects. Each component may have different metadata (e.g. orientation data) and common metadata (e.g. station coordinates). To assemble a Seismogram a decision has to be made on which component has the definitive common metadata. We use a simple algorithm and clone the data from one component defined by this index. Must be 0,1, or 2 or the function wil throw a RuntimeError. Default is 0. :param cardinal: boolean used to define one of two algorithms used to assemble the bundle. When true the three input components are assumed to be in cardinal directions (x1=positive east, x2=positive north, and x3=positive up) AND in a fixed order of E,N,Z. Otherwise the Metadata fetched with the azimuth and dip keys are used for orientation. :param azimuth: defines the Metadata key used to fetch the azimuth angle used to define the orientation of each component Trace object. Default is 'azimuth' used by obspy. Note azimuth=hang in css3.0. Cannot be aliased - must be present in obspy Stats unless cardinal is true :param dip: defines the Metadata key used to fetch the vertical angle orientation of each data component. Vertical angle (vang in css3.0) is exactly the same as theta in spherical coordinates. Default is obspy 'dip' key. Cannot be aliased - must be defined in obspy Stats unless cardinal is true :raise: Can throw either an AssertionError or MsPASSrror(currently defaulted to pybind11's default RuntimeError. Error message can be obtained by calling the what method of RuntimeError). """ # First make sure we have exactly 3 components assert len(st) == 3, "Stream length must be EXACTLY 3 for 3-components" assert 0 <= master < 3, "master argument must be 0, 1, or 2" # if all traces are dead in a stream, it should be converted to a dead seismogram try: size = len(st) for i in range(len(st)): if st[i].dead_mspass: size -= 1 if size == 0: res = Seismogram() res.live = False return res except AttributeError: pass # Complicated logic here, but the point is to make sure the azimuth # attribute is set. The cardinal part is to override the test if # we can assume he components are ENZ if not cardinal: if (azimuth not in st[0].stats or azimuth not in st[1].stats or azimuth not in st[2].stats): raise RuntimeError("Stream2Seismogram: Required attribute " + azimuth + " must be in mdother list") if not cardinal: if dip not in st[0].stats or dip not in st[1].stats or dip not in st[ 2].stats: raise RuntimeError("Stream2Seismogram: Required attribute " + dip + " must be in mdother list") # Outer exception handler to handle range of possible errors in # converting each component. Note we pass an empty list for mdother # and aliases except the master bundle = [] for i in range(3): bundle.append(Trace2TimeSeries(st[i])) # The constructor we use below has frozen names hang for azimuth and # vang for what obspy calls dip. Copy to those names - should work # even if the hang and vang are the names although with some inefficiency # assume that would not be normal so avoid unnecessary code if cardinal: bundle[0].put(Keywords.channel_hang, 90.0) bundle[1].put(Keywords.channel_hang, 0.0) bundle[2].put(Keywords.channel_hang, 0.0) bundle[0].put(Keywords.channel_vang, 90.0) bundle[1].put(Keywords.channel_vang, 90.0) bundle[2].put(Keywords.channel_vang, 0.0) else: for i in range(3): hang = bundle[i].get_double(azimuth) bundle[i].put(Keywords.channel_hang, hang) vang = bundle[i].get_double(dip) bundle[i].put(Keywords.channel_vang, vang) # Assume now bundle contains all the pieces we need. This constructor # for _CoreSeismogram should then do the job # This may throw an exception, but we require the caller to handle it # All errors returned by this constructor currenlty leave the data INVALID # so handler should discard anything with an error dout = _CoreSeismogram(bundle, master) res = Seismogram(dout, "INVALID") res.live = True return res