def test_get_travel_time_df(): datapath = os.path.join("data", "testdata", "travel_times") datadir = pkg_resources.resource_filename("gmprocess", datapath) sc1 = StreamCollection.from_directory(os.path.join(datadir, "ci37218996")) sc2 = StreamCollection.from_directory(os.path.join(datadir, "ci38461735")) scs = [sc1, sc2] df1, catalog = create_travel_time_dataframe( sc1, os.path.join(datadir, "catalog_test_traveltimes.csv"), 5, 0.1, "iasp91") df2, catalog = create_travel_time_dataframe( sc2, os.path.join(datadir, "catalog_test_traveltimes.csv"), 5, 0.1, "iasp91") model = TauPyModel("iasp91") for dfidx, df in enumerate([df1, df2]): for staidx, sta in enumerate(df): for eqidx, time in enumerate(df[sta]): sta_coords = scs[dfidx][staidx][0].stats.coordinates event = catalog[eqidx] dist = locations2degrees( sta_coords["latitude"], sta_coords["longitude"], event.latitude, event.longitude, ) if event.depth_km < 0: depth = 0 else: depth = event.depth_km travel_time = model.get_travel_times(depth, dist, ["p", "P", "Pn"])[0].time abs_time = event.time + travel_time np.testing.assert_almost_equal(abs_time, time, decimal=1)
def test_process_streams(): # Loma Prieta test station (nc216859) data_files, origin = read_data_dir("geonet", "us1000778i", "*.V1A") streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) sc.describe() config = update_config(os.path.join(datadir, "config_min_freq_0p2.yml")) test = process_streams(sc, origin, config=config) logging.info(f"Testing trace: {test[0][1]}") assert len(test) == 3 assert len(test[0]) == 3 assert len(test[1]) == 3 assert len(test[2]) == 3 # Apparently the traces end up in a different order on the Travis linux # container than on my local mac. So testing individual traces need to # not care about trace order. trace_maxes = np.sort( [np.max(np.abs(t.data)) for t in test.select(station="HSES")[0]]) np.testing.assert_allclose(trace_maxes, np.array([157.812449, 240.379521, 263.601519]), rtol=1e-5)
def test_process_streams(): # Loma Prieta test station (nc216859) data_files, origin = read_data_dir('geonet', 'us1000778i', '*.V1A') streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) sc.describe() config = update_config(os.path.join(datadir, 'config_min_freq_0p2.yml')) test = process_streams(sc, origin, config=config) logging.info('Testing trace: %s' % test[0][1]) assert len(test) == 3 assert len(test[0]) == 3 assert len(test[1]) == 3 assert len(test[2]) == 3 # Apparently the traces end up in a different order on the Travis linux # container than on my local mac. So testing individual traces need to # not care about trace order. trace_maxes = np.sort( [np.max(np.abs(t.data)) for t in test.select(station='HSES')[0]]) np.testing.assert_allclose(trace_maxes, np.array( [157.81975508, 240.33718094, 263.67804256]), rtol=1e-5)
def test(): # Test for channel grouping with three unique channels streams = [] # datadir = os.path.join(homedir, '..', 'data', 'knet', 'us2000cnnl') datafiles, origin = read_data_dir("knet", "us2000cnnl", "AOM0031801241951*") for datafile in datafiles: streams += read_knet(datafile) grouped_streams = StreamCollection(streams) assert len(grouped_streams) == 1 assert grouped_streams[0].count() == 3 # Test for channel grouping with more file types datafiles, origin = read_data_dir( "geonet", "us1000778i", "20161113_110313_THZ_20.V2A" ) datafile = datafiles[0] streams += read_geonet(datafile) grouped_streams = StreamCollection(streams) assert len(grouped_streams) == 2 assert grouped_streams[0].count() == 3 assert grouped_streams[1].count() == 3 # Test for warning for one channel streams datafiles, origin = read_data_dir("knet", "us2000cnnl", "AOM0071801241951.UD") datafile = datafiles[0] streams += read_knet(datafile) grouped_streams = StreamCollection(streams) # assert "One channel stream:" in logstream.getvalue() assert len(grouped_streams) == 3 assert grouped_streams[0].count() == 3 assert grouped_streams[1].count() == 3 assert grouped_streams[2].count() == 1
def test_get_travel_time_df(): datapath = os.path.join('data', 'testdata', 'travel_times') datadir = pkg_resources.resource_filename('gmprocess', datapath) sc1 = StreamCollection.from_directory(os.path.join(datadir, 'ci37218996')) sc2 = StreamCollection.from_directory(os.path.join(datadir, 'ci38461735')) scs = [sc1, sc2] df1, catalog = create_travel_time_dataframe( sc1, os.path.join(datadir, 'catalog_test_traveltimes.csv'), 5, 0.1, 'iasp91') df2, catalog = create_travel_time_dataframe( sc2, os.path.join(datadir, 'catalog_test_traveltimes.csv'), 5, 0.1, 'iasp91') model = TauPyModel('iasp91') for dfidx, df in enumerate([df1, df2]): for staidx, sta in enumerate(df): for eqidx, time in enumerate(df[sta]): sta_coords = scs[dfidx][staidx][0].stats.coordinates event = catalog[eqidx] dist = locations2degrees(sta_coords['latitude'], sta_coords['longitude'], event.latitude, event.longitude) if event.depth_km < 0: depth = 0 else: depth = event.depth_km travel_time = model.get_travel_times(depth, dist, ['p', 'P', 'Pn'])[0].time abs_time = event.time + travel_time np.testing.assert_almost_equal(abs_time, time, decimal=1)
def test_colocated(): datapath = os.path.join("data", "testdata", "colocated_instruments") datadir = pkg_resources.resource_filename("gmprocess", datapath) sc = StreamCollection.from_directory(datadir) sc.select_colocated() assert sc.n_passed == 7 assert sc.n_failed == 4 # What if no preference is matched? sc = StreamCollection.from_directory(datadir) sc.select_colocated(preference=["XX"]) assert sc.n_passed == 3 assert sc.n_failed == 8
def test_correct_baseline(): data_files, origin = read_data_dir("geonet", "us1000778i", "*.V1A") data_files.sort() streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) final_acc = [] config = get_config() config["integration"]["frequency"] = True for st in sc: for tr in st: tmp_tr = correct_baseline(tr, config=config) final_acc.append(tmp_tr.data[-1]) target_final_acc = np.array([ 0.599829, 0.717284, -1.548017, 0.377616, -0.685688, 0.112147, 0.024594, 0.004697, -0.013296, ]) np.testing.assert_allclose(final_acc, target_final_acc, atol=1e-6)
def test_all_num_outliers(): data_files, _ = read_data_dir("clipping_samples", "hv70907436", "*.mseed") data_files.sort() streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) num_outliers = [] for st in sc: std_dev_method = Std_Dev(st, test_all=True) num_outliers.append(std_dev_method.num_outliers) np.testing.assert_equal( num_outliers, np.array([ [0, 0, 0], [0, 1086, 23], [131, 252, 4482], [1018, 76, 0], [60, 1314, 1511], [0, 0, 4862], ]), )
def test_all_num_outliers(): data_files, _ = read_data_dir("clipping_samples", "hv70907436", "*.mseed") data_files.sort() streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) num_outliers = [] for st in sc: ping_method = Ping(st, test_all=True) num_outliers.append(ping_method.num_outliers) np.testing.assert_equal( num_outliers, np.array( [ [239, 0, 22], [26, 199, 30], [0, 0, 0], [6, 8, 2], [133, 341, 22], [145, 264, 29], ] ), )
def test_all_num_outliers(): data_files, _ = read_data_dir("clipping_samples", "hv70907436", "*.mseed") data_files.sort() streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) num_outliers = [] for st in sc: jerk_method = Jerk(st, test_all=True) num_outliers.append(jerk_method.num_outliers) np.testing.assert_equal( num_outliers, np.array( [ [1145, 1137, 1158], [1227, 878, 1290], [872, 923, 1158], [860, 1111, 1381], [926, 1025, 954], [1205, 1356, 1600], ] ), )
def test_fit_spectra(): config = get_config() datapath = os.path.join('data', 'testdata', 'demo', 'ci38457511', 'raw') datadir = pkg_resources.resource_filename('gmprocess', datapath) event = get_event_object('ci38457511') sc = StreamCollection.from_directory(datadir) for st in sc: st = signal_split(st, event) end_conf = config['windows']['signal_end'] st = signal_end(st, event_time=event.time, event_lon=event.longitude, event_lat=event.latitude, event_mag=event.magnitude, **end_conf) st = compute_snr(st, 30) st = get_corner_frequencies(st, method='constant', constant={ 'highpass': 0.08, 'lowpass': 20.0 }) for st in sc: spectrum.fit_spectra(st, event)
def test_allow_nans(): dpath = os.path.join("data", "testdata", "fdsn", "uu60363602") datadir = pkg_resources.resource_filename("gmprocess", dpath) sc = StreamCollection.from_directory(datadir) origin = read_event_json_files([os.path.join(datadir, "event.json")])[0] psc = process_streams(sc, origin) st = psc[0] ss = StationSummary.from_stream( st, components=["quadratic_mean"], imts=["FAS(4.0)"], bandwidth=300, allow_nans=True, ) assert np.isnan(ss.pgms.Result).all() ss = StationSummary.from_stream( st, components=["quadratic_mean"], imts=["FAS(4.0)"], bandwidth=189, allow_nans=False, ) assert ~np.isnan(ss.pgms.Result).all()
def test_all_max_calc(): data_files, _ = read_data_dir("clipping_samples", "hv70907436", "*.mseed") data_files.sort() streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) st_max_amps = [] for st in sc: max_amp_method = Max_Amp(st, test_all=True) st_max_amps.append(max_amp_method.max_amp) np.testing.assert_allclose( st_max_amps, np.array([ [8553230.5231931563, 5621557.4998055659, 8344327.3850897169], [8379389.0031664912, 10090978.868285095, 8463705.7919004504], [8122003.3022054331, 8148959.0193878114, 8989844.6071329378], [8698976.5524693076, 8435914.830898283, 8204508.3222043216], [8509963.5836342424, 10646801.251152713, 8805642.5964668635], [8766397.4644186441, 8496598.1711016055, 11525175.173268152], ]), rtol=1e-5, )
def test_max_calc(): data_files, _ = read_data_dir("clipping_samples", "hv70907436", "*.mseed") data_files.sort() streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) st_max_amps = [] for st in sc: max_amp_method = Max_Amp(st) st_max_amps.append(max_amp_method.max_amp) np.testing.assert_allclose( st_max_amps, np.array([ 8553230.5231931563, 8379389.0031664912, 8122003.3022054331, 8698976.5524693076, 8509963.5836342424, 8766397.4644186441, ]), rtol=1e-5, )
def test_integrate_taper(): data_files, origin = read_data_dir("geonet", "us1000778i", "*.V1A") data_files.sort() streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) config = get_config() config["integration"]["taper"]["taper"] = True final_vel = [] for st in sc: for tr in st: tmp_tr = tr.integrate(config=config) final_vel.append(tmp_tr.data[-1]) target_final_vel = np.array([ 3.896186e00, -4.901823e00, -5.722080e-01, 1.621672e-01, -1.654317e-01, -8.242356e-04, -1.482590e-02, 1.504334e-01, 1.021050e-01, ]) np.testing.assert_allclose(final_vel, target_final_vel, atol=1e-6)
def _test_colocated(): eventid = 'ci38445975' datafiles, event = read_data_dir('fdsn', eventid, '*') datadir = os.path.split(datafiles[0])[0] raw_streams = StreamCollection.from_directory(datadir) config_file = os.path.join(datadir, 'test_config.yml') with open(config_file, 'r', encoding='utf-8') as f: config = yaml.load(f, Loader=yaml.FullLoader) processed_streams = process_streams(raw_streams, event, config=config) tdir = tempfile.mkdtemp() try: tfile = os.path.join(tdir, 'test.hdf') ws = StreamWorkspace(tfile) ws.addEvent(event) ws.addStreams(event, raw_streams, label='raw') ws.addStreams(event, processed_streams, label='processed') ws.calcMetrics(eventid, labels=['processed'], config=config) stasum = ws.getStreamMetrics(eventid, 'CI', 'MIKB', 'processed') np.testing.assert_allclose( stasum.get_pgm('duration', 'geometric_mean'), 38.94480068) ws.close() except Exception as e: raise (e) finally: shutil.rmtree(tdir)
def test_zero_crossings(): datapath = os.path.join("data", "testdata", "zero_crossings") datadir = pkg_resources.resource_filename("gmprocess", datapath) sc = StreamCollection.from_directory(datadir) sc.describe() conf = get_config() update = { "processing": [ {"detrend": {"detrending_method": "demean"}}, {"check_zero_crossings": {"min_crossings": 1}}, ] } update_dict(conf, update) edict = { "id": "ak20419010", "time": UTCDateTime("2018-11-30T17:29:29"), "lat": 61.346, "lon": -149.955, "depth": 46.7, "magnitude": 7.1, } event = get_event_object(edict) test = process_streams(sc, event, conf) for st in test: for tr in st: assert tr.hasParameter("ZeroCrossingRate") np.testing.assert_allclose( test[0][0].getParameter("ZeroCrossingRate")["crossing_rate"], 0.008888888888888889, atol=1e-5, )
def test_get_status(): dpath = os.path.join('data', 'testdata', 'status') directory = pkg_resources.resource_filename('gmprocess', dpath) sc = StreamCollection.from_directory(directory) # Manually fail some of the streams sc.select(station='BSAP')[0][0].fail('Failure 0') sc.select(station='CPE')[0][0].fail('Failure 1') sc.select(station='MIKB', instrument='HN')[0][0].fail('Failure 2') sc.select(network='PG', station='PSD')[0][0].fail('Failure 3') # Test results from 'short', 'long', and 'net short = sc.get_status('short') assert (short == 1).all() long = sc.get_status('long') assert long.at['AZ.BSAP.HN'] == 'Failure 0' assert long.at['AZ.BZN.HN'] == '' assert long.at['AZ.CPE.HN'] == 'Failure 1' assert long.at['CI.MIKB.BN'] == '' assert long.at['CI.MIKB.HN'] == 'Failure 2' assert long.at['CI.PSD.HN'] == '' assert long.at['PG.PSD.HN'] == 'Failure 3' net = sc.get_status('net') assert net.at['AZ', 'Number Passed'] == 1 assert net.at['AZ', 'Number Failed'] == 2 assert net.at['CI', 'Number Passed'] == 2 assert net.at['CI', 'Number Failed'] == 1 assert net.at['PG', 'Number Passed'] == 0 assert net.at['PG', 'Number Failed'] == 1
def test_signal_split2(): datafiles, origin = read_data_dir("knet", "us2000cnnl", "AOM0011801241951*") streams = [] for datafile in datafiles: streams += read_data(datafile) streams = StreamCollection(streams) stream = streams[0] signal_split(stream, origin) cmpdict = { "split_time": UTCDateTime(2018, 1, 24, 10, 51, 39, 841483), "method": "p_arrival", "picker_type": "travel_time", } pdict = stream[0].getParameter("signal_split") for key, value in cmpdict.items(): v1 = pdict[key] # because I can't figure out how to get utcdattime __eq__ # operator to behave as expected with the currently installed # version of obspy, we're going to pedantically compare two # of these objects... if isinstance(value, UTCDateTime): # value.__precision = 4 # v1.__precision = 4 assert value.year == v1.year assert value.month == v1.month assert value.day == v1.day assert value.hour == v1.hour assert value.minute == v1.minute assert value.second == v1.second else: assert v1 == value
def test_fit_spectra(): config = get_config() datapath = os.path.join("data", "testdata", "demo", "ci38457511", "raw") datadir = pkg_resources.resource_filename("gmprocess", datapath) event = get_event_object("ci38457511") sc = StreamCollection.from_directory(datadir) for st in sc: st = signal_split(st, event) end_conf = config["windows"]["signal_end"] st = signal_end(st, event_time=event.time, event_lon=event.longitude, event_lat=event.latitude, event_mag=event.magnitude, **end_conf) st = compute_snr(st, 30) st = get_corner_frequencies(st, event, method="constant", constant={ "highpass": 0.08, "lowpass": 20.0 }) for st in sc: spectrum.fit_spectra(st, event)
def test_get_status(): dpath = os.path.join("data", "testdata", "status") directory = pkg_resources.resource_filename("gmprocess", dpath) sc = StreamCollection.from_directory(directory) # Manually fail some of the streams sc.select(station="BSAP")[0][0].fail("Failure 0") sc.select(station="CPE")[0][0].fail("Failure 1") sc.select(station="MIKB", instrument="HN")[0][0].fail("Failure 2") sc.select(network="PG", station="PSD")[0][0].fail("Failure 3") # Test results from 'short', 'long', and 'net short = sc.get_status("short") assert (short == 1).all() long = sc.get_status("long") assert long.at["AZ.BSAP.HN"] == "Failure 0" assert long.at["AZ.BZN.HN"] == "" assert long.at["AZ.CPE.HN"] == "Failure 1" assert long.at["CI.MIKB.BN"] == "" assert long.at["CI.MIKB.HN"] == "Failure 2" assert long.at["CI.PSD.HN"] == "" assert long.at["PG.PSD.HN"] == "Failure 3" net = sc.get_status("net") assert net.at["AZ", "Number Passed"] == 1 assert net.at["AZ", "Number Failed"] == 2 assert net.at["CI", "Number Passed"] == 2 assert net.at["CI", "Number Failed"] == 1 assert net.at["PG", "Number Passed"] == 0 assert net.at["PG", "Number Failed"] == 1
def _test_colocated(): eventid = "ci38445975" datafiles, event = read_data_dir("fdsn", eventid, "*") datadir = os.path.split(datafiles[0])[0] raw_streams = StreamCollection.from_directory(datadir) config_file = os.path.join(datadir, "test_config.yml") with open(config_file, "r", encoding="utf-8") as f: yaml = YAML() yaml.preserve_quotes = True config = yaml.load(f) processed_streams = process_streams(raw_streams, event, config=config) tdir = tempfile.mkdtemp() try: tfile = os.path.join(tdir, "test.hdf") ws = StreamWorkspace(tfile) ws.addEvent(event) ws.addStreams(event, raw_streams, label="raw") ws.addStreams(event, processed_streams, label="processed") ws.calcMetrics(eventid, labels=["processed"], config=config) stasum = ws.getStreamMetrics(eventid, "CI", "MIKB", "processed") np.testing.assert_allclose( stasum.get_pgm("duration", "geometric_mean"), 38.94480068) ws.close() except Exception as e: raise (e) finally: shutil.rmtree(tdir)
def test_get_vel(): data_files, origin = read_data_dir("geonet", "us1000778i", "*.V1A") data_files.sort() streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) config = get_config() config["integration"]["frequency"] = True final_vel = [] for st in sc: for tr in st: tmp_tr = get_vel(tr, config=config) final_vel.append(tmp_tr.data[-1]) target_final_vel = np.array([ -2.182293e-03, -1.417545e-03, 2.111492e-03, -9.395322e-04, 1.662219e-03, -2.690978e-04, 1.376186e-04, -7.358185e-05, 1.741465e-05, ]) np.testing.assert_allclose(final_vel, target_final_vel, atol=1e-6)
def test_check_instrument(): data_files, origin = read_data_dir('fdsn', 'nc51194936', '*.mseed') streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) sc.describe() config = update_config(os.path.join(datadir, 'config_test_check_instr.yml')) test = process_streams(sc, origin, config=config) for sta, expected in [('CVS', True), ('GASB', True), ('SBT', False)]: st = test.select(station=sta)[0] logging.info('Testing stream: %s' % st) assert st.passed == expected
def test_check_instrument(): data_files, origin = read_data_dir("fdsn", "nc51194936", "*.mseed") streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) sc.describe() config = update_config(os.path.join(datadir, "config_test_check_instr.yml")) test = process_streams(sc, origin, config=config) for sta, expected in [("CVS", True), ("GASB", True), ("SBT", False)]: st = test.select(station=sta)[0] logging.info(f"Testing stream: {st}") assert st.passed == expected
def test_weird_sensitivity(): datafiles, origin = read_data_dir("fdsn", "us70008dx7", "SL.KOGS*.mseed") streams = [] for datafile in datafiles: streams += read_obspy(datafile) sc = StreamCollection(streams) psc = process_streams(sc, origin) channel = psc[0].select(component="E")[0] assert_almost_equal(channel.data.max(), 62900.197618074293)
def retrieveData(self, event_dict): """Retrieve data from NSMN, turn into StreamCollection. Args: event (dict): Best dictionary matching input event, fields as above in return of getMatchingEvents(). Returns: StreamCollection: StreamCollection object. """ rawdir = self.rawdir if self.rawdir is None: rawdir = tempfile.mkdtemp() else: if not os.path.isdir(rawdir): os.makedirs(rawdir) urlparts = urlparse(SEARCH_URL) req = requests.get(event_dict['url']) data = req.text soup = BeautifulSoup(data, features="lxml") table = soup.find_all('table', 'tableType_01')[1] datafiles = [] for row in table.find_all('tr'): if 'class' in row.attrs: continue col = row.find_all('td', 'coltype01')[0] href = col.contents[0].attrs['href'] station_id = col.contents[0].contents[0] station_url = urljoin('http://' + urlparts.netloc, href) req2 = requests.get(station_url) data2 = req2.text soup2 = BeautifulSoup(data2, features="lxml") center = soup2.find_all('center')[0] anchor = center.find_all('a')[0] href2 = anchor.attrs['href'] data_url = urljoin('http://' + urlparts.netloc, href2) req3 = requests.get(data_url) data = req3.text localfile = os.path.join(rawdir, '%s.txt' % station_id) logging.info('Downloading Turkish data file %s...' % station_id) with open(localfile, 'wt') as f: f.write(data) datafiles.append(localfile) streams = [] for dfile in datafiles: logging.info('Reading datafile %s...' % dfile) streams += read_nsmn(dfile) if self.rawdir is None: shutil.rmtree(rawdir) stream_collection = StreamCollection(streams=streams, drop_non_free=self.drop_non_free) return stream_collection
def test(): # Read in data with only one stationxml entry data_files, origin = read_data_dir("station_xml_epochs", "nc73631381", "*.mseed") test_root = os.path.normpath(os.path.join(data_files[0], os.pardir)) sc = StreamCollection.from_directory(test_root) psc = process_streams(sc, origin) # Read in data with all dates in stationxml data_files, origin = read_data_dir("station_xml_epochs", "nc73631381_ad", "*.mseed") test_root = os.path.normpath(os.path.join(data_files[0], os.pardir)) sc_ad = StreamCollection.from_directory(test_root) psc_ad = process_streams(sc_ad, origin) single_maxes = np.sort([np.max(tr.data) for tr in psc[0]]) alldates_maxes = np.sort([np.max(tr.data) for tr in psc_ad[0]]) assert_allclose(single_maxes, alldates_maxes)
def test_weird_sensitivity(): datafiles, origin = read_data_dir('fdsn', 'us70008dx7', 'SL.KOGS*.mseed') streams = [] for datafile in datafiles: streams += read_obspy(datafile) sc = StreamCollection(streams) psc = process_streams(sc, origin) channel = psc[0].select(component='E')[0] assert_almost_equal(channel.data.max(), 62900.191900393373)
def test_get_disp(): data_files, origin = read_data_dir("geonet", "us1000778i", "*.V1A") data_files.sort() streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) config = get_config() config["integration"]["frequency"] = True final_disp = [] for st in sc: for tr in st: tmp_tr = get_disp(tr, config=config) final_disp.append(tmp_tr.data[-1]) target_final_disp = np.array([ -0.07689, 0.082552, -0.024509, -0.00047, -0.000257, -0.000152, -0.003425, 0.000671, 0.000178, ]) np.testing.assert_allclose(final_disp, target_final_disp, atol=1e-6) config["integration"]["frequency"] = False config["integration"]["initial"] = 0.0 config["integration"]["demean"] = True final_disp = [] for st in sc: for tr in st: tmp_tr = get_disp(tr, config=config) final_disp.append(tmp_tr.data[-1]) target_final_disp = np.array([ -0.076882, 0.082549, -0.024512, -0.000469, -0.000259, -0.000152, -0.003425, 0.000672, 0.000178, ]) np.testing.assert_allclose(final_disp, target_final_disp, atol=1e-6)