def main(argv=None): parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip()) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Waveform format.') parser.add_argument('-o', '--outfile', help='Output filename.') parser.add_argument('-n', '--no-automerge', dest='automerge', action='store_false', help='Disable automatic merging of matching channels.') parser.add_argument('--full', dest='full', action='store_true', help='Disable min/max-plot, i.e. always plot every ' 'single sample (Stream.plot(..., method="full"), ' 'for interactive zooming).') parser.add_argument('files', nargs='+', help='Files to plot.') args = parser.parse_args(argv) if args.outfile is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) st = Stream() for f in args.files: st += read(f, format=args.format) kwargs = {"outfile": args.outfile, "automerge": args.automerge} if args.full: kwargs['method'] = "full" st.plot(**kwargs)
def main(argv=None): parser = ArgumentParser(prog="obspy-plot", description=__doc__.strip()) parser.add_argument("-V", "--version", action="version", version="%(prog)s " + __version__) parser.add_argument("-f", "--format", choices=ENTRY_POINTS["waveform"], help="Waveform format.") parser.add_argument("-o", "--outfile", help="Output filename.") parser.add_argument( "-n", "--no-automerge", dest="automerge", action="store_false", help="Disable automatic merging of matching channels.", ) parser.add_argument( "--full", dest="full", action="store_true", help="Disable min/max-plot, i.e. always plot every " 'single sample (Stream.plot(..., method="full"), ' "for interactive zooming).", ) parser.add_argument("files", nargs="+", help="Files to plot.") args = parser.parse_args(argv) if args.outfile is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) st = Stream() for f in args.files: st += read(f, format=args.format) kwargs = {"outfile": args.outfile, "automerge": args.automerge} if args.full: kwargs["method"] = "full" st.plot(**kwargs)
def main(argv=None): parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip()) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Waveform format.') parser.add_argument('-o', '--outfile', help='Output filename.') parser.add_argument('-n', '--no-automerge', dest='automerge', action='store_false', help='Disable automatic merging of matching channels.') parser.add_argument('files', nargs='+', help='Files to plot.') args = parser.parse_args(argv) if args.outfile is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) st = Stream() for f in args.files: st += read(f, format=args.format) st.plot(outfile=args.outfile, automerge=args.automerge)
def __enter__(self): """ Set matplotlib defaults. """ MatplotlibBackend.switch_backend("AGG", sloppy=False) from matplotlib import font_manager, rcParams, rcdefaults import locale try: locale.setlocale(locale.LC_ALL, native_str('en_US.UTF-8')) except Exception: try: locale.setlocale(locale.LC_ALL, native_str('English_United States.1252')) except Exception: msg = "Could not set locale to English/United States. " + \ "Some date-related tests may fail" warnings.warn(msg) # set matplotlib builtin default settings for testing rcdefaults() if self.style is not None: self.style.__enter__() if MATPLOTLIB_VERSION >= [2, 0, 0]: default_font = 'DejaVu Sans' else: default_font = 'Bitstream Vera Sans' rcParams['font.family'] = default_font with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'findfont:.*') font_manager.findfont(default_font) if w: warnings.warn('Unable to find the ' + default_font + ' font. ' 'Plotting tests will likely fail.') try: rcParams['text.hinting'] = False except KeyError: warnings.warn("could not set rcParams['text.hinting']") try: rcParams['text.hinting_factor'] = 8 except KeyError: warnings.warn("could not set rcParams['text.hinting_factor']") if self.plt_close_all_enter: import matplotlib.pyplot as plt try: plt.close("all") except Exception: pass return self
def __enter__(self): """ Set matplotlib defaults. """ MatplotlibBackend.switch_backend("AGG", sloppy=False) from matplotlib import font_manager, rcParams, rcdefaults import locale try: locale.setlocale(locale.LC_ALL, native_str('en_US.UTF-8')) except: try: locale.setlocale(locale.LC_ALL, native_str('English_United States.1252')) except: msg = "Could not set locale to English/United States. " + \ "Some date-related tests may fail" warnings.warn(msg) # set matplotlib builtin default settings for testing rcdefaults() rcParams['font.family'] = 'Bitstream Vera Sans' with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'findfont:.*') font_manager.findfont('Bitstream Vera Sans') if w: warnings.warn('Unable to find the Bitstream Vera Sans font. ' 'Plotting tests will likely fail.') try: rcParams['text.hinting'] = False except KeyError: warnings.warn("could not set rcParams['text.hinting']") try: rcParams['text.hinting_factor'] = 8 except KeyError: warnings.warn("could not set rcParams['text.hinting_factor']") return self
def test_plot_catalog_before_1900(self): """ Tests plotting events with origin times before 1900 """ cat = read_events() cat[1].origins[0].time = UTCDateTime(813, 2, 4, 14, 13) # just checking this runs without error is fine, no need to check # content with MatplotlibBackend("AGG", sloppy=True): cat.plot(outfile=io.BytesIO(), method='basemap') # also test with just a single event cat.events = [cat[1]] cat.plot(outfile=io.BytesIO(), method='basemap')
def run(argv=None, interactive=True): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser(prog='obspy-runtests', description='A command-line program that runs all ' 'ObsPy tests.') parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + get_git_version()) parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode') parser.add_argument('-q', '--quiet', action='store_true', help='quiet mode') parser.add_argument('--raise-all-warnings', action='store_true', help='All warnings are raised as exceptions when this ' 'flag is set. Only for debugging purposes.') # filter options filter = parser.add_argument_group( 'Module Filter', 'Providing no modules will test all ' 'ObsPy modules which do not require an ' 'active network connection.') filter.add_argument('-a', '--all', action='store_true', dest='test_all_modules', help='test all modules (including network modules)') filter.add_argument('-x', '--exclude', action='append', help='exclude given module from test') filter.add_argument('tests', nargs='*', help='test modules to run') # timing / profile options timing = parser.add_argument_group('Timing/Profile Options') timing.add_argument('-t', '--timeit', action='store_true', help='shows accumulated run times of each module') timing.add_argument('-s', '--slowest', default=0, type=int, dest='n', help='lists n slowest test cases') timing.add_argument('-p', '--profile', action='store_true', help='uses cProfile, saves the results to file ' + 'obspy.pstats and prints some profiling numbers') # reporting options report = parser.add_argument_group('Reporting Options') report.add_argument('-r', '--report', action='store_true', help='automatically submit a test report') report.add_argument('-d', '--dontask', action='store_true', help="don't explicitly ask for submitting a test " "report") report.add_argument('-u', '--server', default='tests.obspy.org', help='report server (default is tests.obspy.org)') report.add_argument('-n', '--node', dest='hostname', default=HOSTNAME, help='nodename visible at the report server') report.add_argument('-l', '--log', default=None, help='append log file to test report') report.add_argument('--ci-url', default=None, dest="ci_url", help='URL to Continuous Integration job page.') report.add_argument('--pr-url', default=None, dest="pr_url", help='Github (Pull Request) URL.') # other options others = parser.add_argument_group('Additional Options') others.add_argument('--tutorial', action='store_true', help='add doctests in tutorial') others.add_argument('--no-flake8', action='store_true', help='skip code formatting test') others.add_argument('--keep-images', action='store_true', help='store images created during image comparison ' 'tests in subfolders of baseline images') others.add_argument('--keep-only-failed-images', action='store_true', help='when storing images created during testing, ' 'only store failed images and the corresponding ' 'diff images (but not images that passed the ' 'corresponding test).') args = parser.parse_args(argv) # set correct verbosity level if args.verbose: verbosity = 2 # raise all NumPy warnings np.seterr(all='warn') elif args.quiet: verbosity = 0 # ignore user and deprecation warnings warnings.simplefilter("ignore", DeprecationWarning) warnings.simplefilter("ignore", UserWarning) # don't ask to send a report args.dontask = True else: verbosity = 1 # show all NumPy warnings np.seterr(all='print') # ignore user warnings warnings.simplefilter("ignore", UserWarning) # whether to raise any warning that's appearing if args.raise_all_warnings: # raise all NumPy warnings np.seterr(all='raise') # raise user and deprecation warnings warnings.simplefilter("error", UserWarning) # ignore specific warnings msg = ('Matplotlib is currently using agg, which is a non-GUI backend, ' 'so cannot show the figure.') warnings.filterwarnings("ignore", message=msg) # check for send report option or environmental settings if args.report or 'OBSPY_REPORT' in os.environ.keys(): report = True else: report = False if 'OBSPY_REPORT_SERVER' in os.environ.keys(): args.server = os.environ['OBSPY_REPORT_SERVER'] # check interactivity settings if interactive and args.dontask: interactive = False if args.keep_images: os.environ['OBSPY_KEEP_IMAGES'] = "" if args.keep_only_failed_images: os.environ['OBSPY_KEEP_ONLY_FAILED_IMAGES'] = "" if args.no_flake8: os.environ['OBSPY_NO_FLAKE8'] = "" # All arguments are used by the test runner and should not interfere # with any other module that might also parse them, e.g. flake8. sys.argv = sys.argv[:1] return run_tests(verbosity, args.tests, report, args.log, args.server, args.test_all_modules, args.timeit, interactive, args.n, exclude=args.exclude, tutorial=args.tutorial, hostname=args.hostname, ci_url=args.ci_url, pr_url=args.pr_url)
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) from future.builtins import * # NOQA import unittest from obspy.core.util import add_doctests, add_unittests from obspy.core.util.misc import MatplotlibBackend # this code is needed to run the tests without any X11 or any other # display, e.g. via a SSH connection. Import it only once, else a nasty # warning occurs. # see also: http://matplotlib.org/faq/howto_faq.html MatplotlibBackend("AGG", sloppy=False) MODULE_NAME = "obspy.imaging" def suite(): suite = unittest.TestSuite() add_doctests(suite, MODULE_NAME) add_unittests(suite, MODULE_NAME) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
def main(argv=None): parser = ArgumentParser(prog='obspy-scan', description=__doc__.strip(), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Optional, the file format.\n' + ' '.join(__doc__.split('\n')[-4:])) parser.add_argument('-v', '--verbose', action='store_true', help='Optional. Verbose output.') parser.add_argument('-q', '--quiet', action='store_true', help='Optional. Be quiet. Overwritten by --verbose ' 'flag.') parser.add_argument('-n', '--non-recursive', action='store_false', dest='recursive', help='Optional. Do not descend into directories.') parser.add_argument('-i', '--ignore-links', action='store_true', help='Optional. Do not follow symbolic links.') parser.add_argument('--start-time', default=None, type=UTCDateTime, help='Optional, a UTCDateTime compatible string. ' + 'Only visualize data after this time and set ' + 'time-axis axis accordingly.') parser.add_argument('--end-time', default=None, type=UTCDateTime, help='Optional, a UTCDateTime compatible string. ' + 'Only visualize data before this time and set ' + 'time-axis axis accordingly.') parser.add_argument('--id', action='append', help='Optional, a SEED channel identifier ' "(e.g. 'GR.FUR..HHZ'). You may provide this " + 'option multiple times. Only these ' + 'channels will be plotted.') parser.add_argument('-t', '--event-time', default=None, type=UTCDateTime, action='append', help='Optional, a UTCDateTime compatible string ' + "(e.g. '2010-01-01T12:00:00'). You may provide " + 'this option multiple times. These times get ' + 'marked by vertical lines in the plot. ' + 'Useful e.g. to mark event origin times.') parser.add_argument('-w', '--write', default=None, help='Optional, npz file for writing data ' 'after scanning waveform files') parser.add_argument('-l', '--load', default=None, help='Optional, npz file for loading data ' 'before scanning waveform files') parser.add_argument('--no-x', action='store_true', help='Optional, Do not plot crosses.') parser.add_argument('--no-gaps', action='store_true', help='Optional, Do not plot gaps.') parser.add_argument('-o', '--output', default=None, help='Save plot to image file (e.g. out.pdf, ' + 'out.png) instead of opening a window.') parser.add_argument('--print-gaps', action='store_true', help='Optional, prints a list of gaps at the end.') parser.add_argument('paths', nargs='*', help='Files or directories to scan.') args = parser.parse_args(argv) if args.output is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) # Print help and exit if no arguments are given if len(args.paths) == 0 and args.load is None: parser.error('No paths specified.') # Use recursively parsing function? if args.recursive: parse_func = recursive_parse else: parse_func = parse_file_to_dict from matplotlib.dates import date2num, num2date from matplotlib.ticker import FuncFormatter from matplotlib.patches import Rectangle from matplotlib.collections import PatchCollection import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) # Plot vertical lines if option 'event_time' was specified if args.event_time: times = [date2num(t.datetime) for t in args.event_time] for time in times: ax.axvline(time, color='k') if args.start_time: args.start_time = date2num(args.start_time.datetime) if args.end_time: args.end_time = date2num(args.end_time.datetime) # Generate dictionary containing nested lists of start and end times per # station data = {} samp_int = {} counter = 1 if args.load: load_npz(args.load, data, samp_int) for path in args.paths: counter = parse_func(data, samp_int, path, counter, args.format, verbose=args.verbose, quiet=args.quiet, ignore_links=args.ignore_links) if not data: if args.verbose or not args.quiet: print("No waveform data found.") return if args.write: write_npz(args.write, data, samp_int) # either use ids specified by user or use ids based on what data we have # parsed ids = args.id or list(data.keys()) ids = sorted(ids)[::-1] labels = [""] * len(ids) if args.verbose or not args.quiet: print('\n') for _i, _id in enumerate(ids): labels[_i] = ids[_i] # sort data list and sampling rate list if _id in data: startend = np.array(data[_id]) _samp_int = np.array(samp_int[_id]) indices = np.lexsort((startend[:, 1], startend[:, 0])) startend = startend[indices] _samp_int = _samp_int[indices] else: startend = np.array([]) _samp_int = np.array([]) if len(startend) == 0: if not (args.start_time and args.end_time): continue if not args.no_gaps: rects = [ Rectangle((args.start_time, _i - 0.4), args.end_time - args.start_time, 0.8) ] ax.add_collection(PatchCollection(rects, color="r")) if args.print_gaps and (args.verbose or not args.quiet): print("%s %s %s %.3f" % (_id, args.start_time, args.end_time, args.end_time - args.start_time)) continue # restrict plotting of results to given start/end time if args.start_time: indices = startend[:, 1] > args.start_time startend = startend[indices] _samp_int = _samp_int[indices] if len(startend) == 0: continue if args.end_time: indices = startend[:, 0] < args.end_time startend = startend[indices] _samp_int = _samp_int[indices] if len(startend) == 0: continue data_start = startend[:, 0].min() data_end = startend[:, 1].max() timerange_start = args.start_time or data_start timerange_end = args.end_time or data_end timerange = timerange_end - timerange_start if timerange == 0.0: warnings.warn('Zero sample long data for _id=%s, skipping' % _id) continue startend_compressed = compress_start_end(startend, 1000) offset = np.ones(len(startend)) * _i # generate list of y values if not args.no_x: ax.plot(startend[:, 0], offset, 'x', linewidth=2) ax.hlines(offset[:len(startend_compressed)], startend_compressed[:, 0], startend_compressed[:, 1], 'b', linewidth=2, zorder=3) # find the gaps diffs = startend[1:, 0] - startend[:-1, 1] # currend.start - last.end gapsum = diffs[diffs > 0].sum() # if start- and/or endtime is specified, add missing data at start/end # to gap sum has_gap = False gap_at_start = (args.start_time and data_start > args.start_time and data_start - args.start_time) gap_at_end = (args.end_time and args.end_time > data_end and args.end_time - data_end) if args.start_time and gap_at_start: gapsum += gap_at_start has_gap = True if args.end_time and gap_at_end: gapsum += gap_at_end has_gap = True perc = (timerange - gapsum) / timerange labels[_i] = labels[_i] + "\n%.1f%%" % (perc * 100) gap_indices = diffs > 1.8 * _samp_int[:-1] gap_indices = np.append(gap_indices, False) has_gap |= any(gap_indices) if has_gap: # don't handle last end time as start of gap gaps_start = startend[gap_indices, 1] gaps_end = startend[np.roll(gap_indices, 1), 0] if args.start_time and gap_at_start: gaps_start = np.append(gaps_start, args.start_time) gaps_end = np.append(gaps_end, data_start) if args.end_time and gap_at_end: gaps_start = np.append(gaps_start, data_end) gaps_end = np.append(gaps_end, args.end_time) if not args.no_gaps: rects = [ Rectangle((start_, offset[0] - 0.4), end_ - start_, 0.8) for start_, end_ in zip(gaps_start, gaps_end) ] ax.add_collection(PatchCollection(rects, color="r")) if args.print_gaps: for start_, end_ in zip(gaps_start, gaps_end): start_, end_ = num2date((start_, end_)) start_ = UTCDateTime(start_.isoformat()) end_ = UTCDateTime(end_.isoformat()) if args.verbose or not args.quiet: print("%s %s %s %.3f" % (_id, start_, end_, end_ - start_)) # Pretty format the plot ax.set_ylim(0 - 0.5, len(ids) - 0.5) ax.set_yticks(np.arange(len(ids))) ax.set_yticklabels(labels, family="monospace", ha="right") fig.autofmt_xdate() # rotate date ax.xaxis_date() # set custom formatters to always show date in first tick formatter = ObsPyAutoDateFormatter(ax.xaxis.get_major_locator()) formatter.scaled[1 / 24.] = \ FuncFormatter(decimal_seconds_format_date_first_tick) formatter.scaled.pop(1 / (24. * 60.)) ax.xaxis.set_major_formatter(formatter) plt.subplots_adjust(left=0.2) # set x-axis limits according to given start/end time if args.start_time and args.end_time: ax.set_xlim(left=args.start_time, right=args.end_time) elif args.start_time: ax.set_xlim(left=args.start_time, auto=None) elif args.end_time: ax.set_xlim(right=args.end_time, auto=None) else: left, right = ax.xaxis.get_data_interval() x_axis_range = right - left ax.set_xlim(left - 0.05 * x_axis_range, right + 0.05 * x_axis_range) if args.output is None: plt.show() else: fig.set_dpi(72) height = len(ids) * 0.5 height = max(4, height) fig.set_figheight(height) plt.tight_layout() if not args.start_time or not args.end_time: days = ax.get_xlim() days = days[1] - days[0] else: days = args.end_time - args.start_time width = max(6, days / 30.) width = min(width, height * 4) fig.set_figwidth(width) plt.subplots_adjust(top=1, bottom=0, left=0, right=1) plt.tight_layout() fig.savefig(args.output) if args.verbose and not args.quiet: sys.stdout.write('\n')
def run(argv=None, interactive=True): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser(prog="obspy-runtests", description="A command-line program that runs all " "ObsPy tests.") parser.add_argument("-V", "--version", action="version", version="%(prog)s " + get_git_version()) parser.add_argument("-v", "--verbose", action="store_true", help="verbose mode") parser.add_argument("-q", "--quiet", action="store_true", help="quiet mode") # filter options filter = parser.add_argument_group( "Module Filter", "Providing no modules will test all " "ObsPy modules which do not require an " "active network connection.", ) filter.add_argument("-a", "--all", action="store_true", help="test all modules (including network modules)") filter.add_argument("-x", "--exclude", action="append", help="exclude given module from test") filter.add_argument("tests", nargs="*", help="test modules to run") # timing / profile options timing = parser.add_argument_group("Timing/Profile Options") timing.add_argument("-t", "--timeit", action="store_true", help="shows accumulated run times of each module") timing.add_argument("-s", "--slowest", default=0, type=int, dest="n", help="lists n slowest test cases") timing.add_argument( "-p", "--profile", action="store_true", help="uses cProfile, saves the results to file " + "obspy.pstats and prints some profiling numbers", ) # reporting options report = parser.add_argument_group("Reporting Options") report.add_argument("-r", "--report", action="store_true", help="automatically submit a test report") report.add_argument( "-d", "--dontask", action="store_true", help="don't explicitly ask for submitting a test " "report" ) report.add_argument("-u", "--server", default="tests.obspy.org", help="report server (default is tests.obspy.org)") report.add_argument("-n", "--node", dest="hostname", default=HOSTNAME, help="nodename visible at the report server") report.add_argument("-l", "--log", default=None, help="append log file to test report") report.add_argument("--ci-url", default=None, dest="ci_url", help="URL to Continuous Integration job page.") report.add_argument("--pr-url", default=None, dest="pr_url", help="Github (Pull Request) URL.") # other options others = parser.add_argument_group("Additional Options") others.add_argument("--tutorial", action="store_true", help="add doctests in tutorial") others.add_argument("--no-flake8", action="store_true", help="skip code formatting test") others.add_argument( "--keep-images", action="store_true", help="store images created during image comparison " "tests in subfolders of baseline images", ) others.add_argument( "--keep-only-failed-images", action="store_true", help="when storing images created during testing, " "only store failed images and the corresponding " "diff images (but not images that passed the " "corresponding test).", ) args = parser.parse_args(argv) # set correct verbosity level if args.verbose: verbosity = 2 # raise all NumPy warnings np.seterr(all="raise") # raise user and deprecation warnings warnings.simplefilter("error", UserWarning) elif args.quiet: verbosity = 0 # ignore user and deprecation warnings warnings.simplefilter("ignore", DeprecationWarning) warnings.simplefilter("ignore", UserWarning) # don't ask to send a report args.dontask = True else: verbosity = 1 # show all NumPy warnings np.seterr(all="print") # ignore user warnings warnings.simplefilter("ignore", UserWarning) # check for send report option or environmental settings if args.report or "OBSPY_REPORT" in os.environ.keys(): report = True else: report = False if "OBSPY_REPORT_SERVER" in os.environ.keys(): args.server = os.environ["OBSPY_REPORT_SERVER"] # check interactivity settings if interactive and args.dontask: interactive = False if args.keep_images: os.environ["OBSPY_KEEP_IMAGES"] = "" if args.keep_only_failed_images: os.environ["OBSPY_KEEP_ONLY_FAILED_IMAGES"] = "" if args.no_flake8: os.environ["OBSPY_NO_FLAKE8"] = "" return run_tests( verbosity, args.tests, report, args.log, args.server, args.all, args.timeit, interactive, args.n, exclude=args.exclude, tutorial=args.tutorial, hostname=args.hostname, ci_url=args.ci_url, pr_url=args.pr_url, )
def main(argv=None): parser = ArgumentParser(prog='obspy-scan', description=__doc__.strip(), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Optional, the file format.\n' + ' '.join(__doc__.split('\n')[-4:])) parser.add_argument('-v', '--verbose', action='store_true', help='Optional. Verbose output.') parser.add_argument('-q', '--quiet', action='store_true', help='Optional. Be quiet. Overwritten by --verbose ' 'flag.') parser.add_argument('-n', '--non-recursive', action='store_false', dest='recursive', help='Optional. Do not descend into directories.') parser.add_argument('-i', '--ignore-links', action='store_true', help='Optional. Do not follow symbolic links.') parser.add_argument('--start-time', default=None, type=UTCDateTime, help='Optional, a UTCDateTime compatible string. ' + 'Only visualize data after this time and set ' + 'time-axis axis accordingly.') parser.add_argument('--end-time', default=None, type=UTCDateTime, help='Optional, a UTCDateTime compatible string. ' + 'Only visualize data before this time and set ' + 'time-axis axis accordingly.') parser.add_argument('--id', action='append', help='Optional, a SEED channel identifier ' "(e.g. 'GR.FUR..HHZ'). You may provide this " + 'option multiple times. Only these ' + 'channels will be plotted.') parser.add_argument('-t', '--event-time', default=None, type=UTCDateTime, action='append', help='Optional, a UTCDateTime compatible string ' + "(e.g. '2010-01-01T12:00:00'). You may provide " + 'this option multiple times. These times get ' + 'marked by vertical lines in the plot. ' + 'Useful e.g. to mark event origin times.') parser.add_argument('-w', '--write', default=None, help='Optional, npz file for writing data ' 'after scanning waveform files') parser.add_argument('-l', '--load', default=None, help='Optional, npz file for loading data ' 'before scanning waveform files') parser.add_argument('--no-x', action='store_true', help='Optional, Do not plot crosses.') parser.add_argument('--no-gaps', action='store_true', help='Optional, Do not plot gaps.') parser.add_argument('-o', '--output', default=None, help='Save plot to image file (e.g. out.pdf, ' + 'out.png) instead of opening a window.') parser.add_argument('--print-gaps', action='store_true', help='Optional, prints a list of gaps at the end.') parser.add_argument('paths', nargs='*', help='Files or directories to scan.') args = parser.parse_args(argv) if args.output is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) # Print help and exit if no arguments are given if len(args.paths) == 0 and args.load is None: parser.error('No paths specified.') # Use recursively parsing function? if args.recursive: parse_func = recursive_parse else: parse_func = parse_file_to_dict from matplotlib.dates import date2num, num2date from matplotlib.ticker import FuncFormatter from matplotlib.patches import Rectangle from matplotlib.collections import PatchCollection import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) # Plot vertical lines if option 'event_time' was specified if args.event_time: times = [date2num(t.datetime) for t in args.event_time] for time in times: ax.axvline(time, color='k') if args.start_time: args.start_time = date2num(args.start_time.datetime) if args.end_time: args.end_time = date2num(args.end_time.datetime) # Generate dictionary containing nested lists of start and end times per # station data = {} samp_int = {} counter = 1 if args.load: load_npz(args.load, data, samp_int) for path in args.paths: counter = parse_func(data, samp_int, path, counter, args.format, verbose=args.verbose, quiet=args.quiet, ignore_links=args.ignore_links) if not data: if args.verbose or not args.quiet: print("No waveform data found.") return if args.write: write_npz(args.write, data, samp_int) # either use ids specified by user or use ids based on what data we have # parsed ids = args.id or list(data.keys()) ids = sorted(ids)[::-1] labels = [""] * len(ids) if args.verbose or not args.quiet: print('\n') for _i, _id in enumerate(ids): labels[_i] = ids[_i] # sort data list and sampling rate list if _id in data: startend = np.array(data[_id]) _samp_int = np.array(samp_int[_id]) indices = np.lexsort((startend[:, 1], startend[:, 0])) startend = startend[indices] _samp_int = _samp_int[indices] else: startend = np.array([]) _samp_int = np.array([]) if len(startend) == 0: if not (args.start_time and args.end_time): continue if not args.no_gaps: rects = [Rectangle((args.start_time, _i - 0.4), args.end_time - args.start_time, 0.8)] ax.add_collection(PatchCollection(rects, color="r")) if args.print_gaps and (args.verbose or not args.quiet): print("%s %s %s %.3f" % ( _id, args.start_time, args.end_time, args.end_time - args.start_time)) continue # restrict plotting of results to given start/end time if args.start_time: indices = startend[:, 1] > args.start_time startend = startend[indices] _samp_int = _samp_int[indices] if len(startend) == 0: continue if args.end_time: indices = startend[:, 0] < args.end_time startend = startend[indices] _samp_int = _samp_int[indices] if len(startend) == 0: continue data_start = startend[:, 0].min() data_end = startend[:, 1].max() timerange_start = args.start_time or data_start timerange_end = args.end_time or data_end timerange = timerange_end - timerange_start if timerange == 0.0: warnings.warn('Zero sample long data for _id=%s, skipping' % _id) continue startend_compressed = compress_start_end(startend, 1000) offset = np.ones(len(startend)) * _i # generate list of y values if not args.no_x: ax.plot(startend[:, 0], offset, 'x', linewidth=2) ax.hlines(offset[:len(startend_compressed)], startend_compressed[:, 0], startend_compressed[:, 1], 'b', linewidth=2, zorder=3) # find the gaps diffs = startend[1:, 0] - startend[:-1, 1] # currend.start - last.end gapsum = diffs[diffs > 0].sum() # if start- and/or endtime is specified, add missing data at start/end # to gap sum has_gap = False gap_at_start = ( args.start_time and data_start > args.start_time and data_start - args.start_time) gap_at_end = ( args.end_time and args.end_time > data_end and args.end_time - data_end) if args.start_time and gap_at_start: gapsum += gap_at_start has_gap = True if args.end_time and gap_at_end: gapsum += gap_at_end has_gap = True perc = (timerange - gapsum) / timerange labels[_i] = labels[_i] + "\n%.1f%%" % (perc * 100) gap_indices = diffs > 1.8 * _samp_int[:-1] gap_indices = np.append(gap_indices, False) has_gap |= any(gap_indices) if has_gap: # don't handle last end time as start of gap gaps_start = startend[gap_indices, 1] gaps_end = startend[np.roll(gap_indices, 1), 0] if args.start_time and gap_at_start: gaps_start = np.append(gaps_start, args.start_time) gaps_end = np.append(gaps_end, data_start) if args.end_time and gap_at_end: gaps_start = np.append(gaps_start, data_end) gaps_end = np.append(gaps_end, args.end_time) if not args.no_gaps: rects = [Rectangle((start_, offset[0] - 0.4), end_ - start_, 0.8) for start_, end_ in zip(gaps_start, gaps_end)] ax.add_collection(PatchCollection(rects, color="r")) if args.print_gaps: for start_, end_ in zip(gaps_start, gaps_end): start_, end_ = num2date((start_, end_)) start_ = UTCDateTime(start_.isoformat()) end_ = UTCDateTime(end_.isoformat()) if args.verbose or not args.quiet: print("%s %s %s %.3f" % (_id, start_, end_, end_ - start_)) # Pretty format the plot ax.set_ylim(0 - 0.5, len(ids) - 0.5) ax.set_yticks(np.arange(len(ids))) ax.set_yticklabels(labels, family="monospace", ha="right") fig.autofmt_xdate() # rotate date ax.xaxis_date() # set custom formatters to always show date in first tick formatter = ObsPyAutoDateFormatter(ax.xaxis.get_major_locator()) formatter.scaled[1 / 24.] = \ FuncFormatter(decimal_seconds_format_date_first_tick) formatter.scaled.pop(1/(24.*60.)) ax.xaxis.set_major_formatter(formatter) plt.subplots_adjust(left=0.2) # set x-axis limits according to given start/end time if args.start_time and args.end_time: ax.set_xlim(left=args.start_time, right=args.end_time) elif args.start_time: ax.set_xlim(left=args.start_time, auto=None) elif args.end_time: ax.set_xlim(right=args.end_time, auto=None) else: left, right = ax.xaxis.get_data_interval() x_axis_range = right - left ax.set_xlim(left - 0.05 * x_axis_range, right + 0.05 * x_axis_range) if args.output is None: plt.show() else: fig.set_dpi(72) height = len(ids) * 0.5 height = max(4, height) fig.set_figheight(height) plt.tight_layout() if not args.start_time or not args.end_time: days = ax.get_xlim() days = days[1] - days[0] else: days = args.end_time - args.start_time width = max(6, days / 30.) width = min(width, height * 4) fig.set_figwidth(width) plt.subplots_adjust(top=1, bottom=0, left=0, right=1) plt.tight_layout() fig.savefig(args.output) if args.verbose and not args.quiet: sys.stdout.write('\n')
def scan(paths, format=None, verbose=False, recursive=True, ignore_links=False, starttime=None, endtime=None, seed_ids=None, event_times=None, npz_output=None, npz_input=None, plot_x=True, plot_gaps=True, print_gaps=False, plot=False): """ :type plot: bool or str :param plot: False for no plot at all, True for interactive window, str for output to image file. """ scanner = Scanner(format=format, verbose=verbose, recursive=recursive, ignore_links=ignore_links) if plot is None: plot = False # Print help and exit if no arguments are given if len(paths) == 0 and npz_input is None: msg = "No paths specified and no npz data to load specified" raise ValueError(msg) if npz_input: scanner.load_npz(npz_input) for path in paths: scanner.parse(path) if not scanner.data: if verbose: print("No waveform data found.") return None if npz_output: scanner.save_npz(npz_output) kwargs = dict(starttime=starttime, endtime=endtime, seed_ids=seed_ids) if plot: kwargs.update( dict(plot_x=plot_x, plot_gaps=plot_gaps, print_gaps=print_gaps, event_times=event_times)) if plot is True: scanner.plot(outfile=None, show=True, **kwargs) else: # plotting to file, so switch to non-interactive backend with MatplotlibBackend("AGG", sloppy=False): scanner.plot(outfile=plot, show=False, **kwargs) else: scanner.analyze_parsed_data(print_gaps=print_gaps, **kwargs) return scanner
def xcorr_pick_correction(pick1, trace1, pick2, trace2, t_before, t_after, cc_maxlag, filter=None, filter_options={}, plot=False, filename=None): """ Calculate the correction for the differential pick time determined by cross correlation of the waveforms in narrow windows around the pick times. For details on the fitting procedure refer to [Deichmann1992]_. The parameters depend on the epicentral distance and magnitude range. For small local earthquakes (Ml ~0-2, distance ~3-10 km) with consistent manual picks the following can be tried:: t_before=0.05, t_after=0.2, cc_maxlag=0.10, filter="bandpass", filter_options={'freqmin': 1, 'freqmax': 20} The appropriate parameter sets can and should be determined/verified visually using the option `plot=True` on a representative set of picks. To get the corrected differential pick time calculate: ``((pick2 + pick2_corr) - pick1)``. To get a corrected differential travel time using origin times for both events calculate: ``((pick2 + pick2_corr - ot2) - (pick1 - ot1))`` :type pick1: :class:`~obspy.core.utcdatetime.UTCDateTime` :param pick1: Time of pick for `trace1`. :type trace1: :class:`~obspy.core.trace.Trace` :param trace1: Waveform data for `pick1`. Add some time at front/back. The appropriate part of the trace is used automatically. :type pick2: :class:`~obspy.core.utcdatetime.UTCDateTime` :param pick2: Time of pick for `trace2`. :type trace2: :class:`~obspy.core.trace.Trace` :param trace2: Waveform data for `pick2`. Add some time at front/back. The appropriate part of the trace is used automatically. :type t_before: float :param t_before: Time to start cross correlation window before pick times in seconds. :type t_after: float :param t_after: Time to end cross correlation window after pick times in seconds. :type cc_maxlag: float :param cc_maxlag: Maximum lag/shift time tested during cross correlation in seconds. :type filter: str :param filter: `None` for no filtering or name of filter type as passed on to :meth:`~obspy.core.Trace.trace.filter` if filter should be used. To avoid artifacts in filtering provide sufficiently long time series for `trace1` and `trace2`. :type filter_options: dict :param filter_options: Filter options that get passed on to :meth:`~obspy.core.Trace.trace.filter` if filtering is used. :type plot: bool :param plot: If `True`, a plot window illustrating the alignment of the two traces at best cross correlation will be shown. This can and should be used to verify the used parameters before running automatedly on large data sets. :type filename: str :param filename: If plot option is selected, specifying a filename here (e.g. 'myplot.pdf' or 'myplot.png') will output the plot to a file instead of opening a plot window. :rtype: (float, float) :returns: Correction time `pick2_corr` for `pick2` pick time as a float and corresponding correlation coefficient. """ # perform some checks on the traces if trace1.stats.sampling_rate != trace2.stats.sampling_rate: msg = "Sampling rates do not match: %s != %s" % \ (trace1.stats.sampling_rate, trace2.stats.sampling_rate) raise Exception(msg) if trace1.id != trace2.id: msg = "Trace ids do not match: %s != %s" % (trace1.id, trace2.id) warnings.warn(msg) samp_rate = trace1.stats.sampling_rate # don't modify existing traces with filters if filter: trace1 = trace1.copy() trace2 = trace2.copy() # check data, apply filter and take correct slice of traces slices = [] for _i, (t, tr) in enumerate(((pick1, trace1), (pick2, trace2))): start = t - t_before - (cc_maxlag / 2.0) end = t + t_after + (cc_maxlag / 2.0) duration = end - start # check if necessary time spans are present in data if tr.stats.starttime > start: msg = "Trace %s starts too late." % _i raise Exception(msg) if tr.stats.endtime < end: msg = "Trace %s ends too early." % _i raise Exception(msg) if filter and start - tr.stats.starttime < duration: msg = "Artifacts from signal processing possible. Trace " + \ "%s should have more additional data at the start." % _i warnings.warn(msg) if filter and tr.stats.endtime - end < duration: msg = "Artifacts from signal processing possible. Trace " + \ "%s should have more additional data at the end." % _i warnings.warn(msg) # apply signal processing and take correct slice of data if filter: tr.data = tr.data.astype(np.float64) tr.detrend(type='demean') tr.data *= cosine_taper(len(tr), 0.1) tr.filter(type=filter, **filter_options) slices.append(tr.slice(start, end)) # cross correlate shift_len = int(cc_maxlag * samp_rate) cc = correlate(slices[0].data, slices[1].data, shift_len, method='direct') _cc_shift, cc_max = xcorr_max(cc) cc_curvature = np.concatenate((np.zeros(1), np.diff(cc, 2), np.zeros(1))) cc_convex = np.ma.masked_where(np.sign(cc_curvature) >= 0, cc) cc_concave = np.ma.masked_where(np.sign(cc_curvature) < 0, cc) # check results of cross correlation if cc_max < 0: msg = "Absolute maximum is negative: %.3f. " % cc_max + \ "Using positive maximum: %.3f" % max(cc) warnings.warn(msg) cc_max = max(cc) if cc_max < 0.8: msg = "Maximum of cross correlation lower than 0.8: %s" % cc_max warnings.warn(msg) # make array with time shifts in seconds corresponding to cc function cc_t = np.linspace(-cc_maxlag, cc_maxlag, shift_len * 2 + 1) # take the subportion of the cross correlation around the maximum that is # convex and fit a parabola. # use vertex as subsample resolution best cc fit. peak_index = cc.argmax() first_sample = peak_index # XXX this could be improved.. while first_sample > 0 and cc_curvature[first_sample - 1] <= 0: first_sample -= 1 last_sample = peak_index while last_sample < len(cc) - 1 and cc_curvature[last_sample + 1] <= 0: last_sample += 1 if first_sample == 0 or last_sample == len(cc) - 1: msg = "Fitting at maximum lag. Maximum lag time should be increased." warnings.warn(msg) # work on subarrays num_samples = last_sample - first_sample + 1 if num_samples < 3: msg = "Less than 3 samples selected for fit to cross " + \ "correlation: %s" % num_samples raise Exception(msg) if num_samples < 5: msg = "Less than 5 samples selected for fit to cross " + \ "correlation: %s" % num_samples warnings.warn(msg) # quadratic fit for small subwindow coeffs, residual = scipy.polyfit( cc_t[first_sample:last_sample + 1], cc[first_sample:last_sample + 1], deg=2, full=True)[:2] # check results of fit if coeffs[0] >= 0: msg = "Fitted parabola opens upwards!" warnings.warn(msg) if residual > 0.1: msg = "Residual in quadratic fit to cross correlation maximum " + \ "larger than 0.1: %s" % residual warnings.warn(msg) # X coordinate of vertex of parabola gives time shift to correct # differential pick time. Y coordinate gives maximum correlation # coefficient. dt = -coeffs[1] / 2.0 / coeffs[0] coeff = (4 * coeffs[0] * coeffs[2] - coeffs[1] ** 2) / (4 * coeffs[0]) # this is the shift to apply on the time axis of `trace2` to align the # traces. Actually we do not want to shift the trace to align it but we # want to correct the time of `pick2` so that the traces align without # shifting. This is the negative of the cross correlation shift. dt = -dt pick2_corr = dt # plot the results if selected if plot is True: with MatplotlibBackend(filename and "AGG" or None, sloppy=True): import matplotlib.pyplot as plt fig = plt.figure() ax1 = fig.add_subplot(211) tmp_t = np.linspace(0, len(slices[0]) / samp_rate, len(slices[0])) ax1.plot(tmp_t, slices[0].data / float(slices[0].data.max()), "k", label="Trace 1") ax1.plot(tmp_t, slices[1].data / float(slices[1].data.max()), "r", label="Trace 2") ax1.plot(tmp_t - dt, slices[1].data / float(slices[1].data.max()), "g", label="Trace 2 (shifted)") ax1.legend(loc="lower right", prop={'size': "small"}) ax1.set_title("%s" % slices[0].id) ax1.set_xlabel("time [s]") ax1.set_ylabel("norm. amplitude") ax2 = fig.add_subplot(212) ax2.plot(cc_t, cc_convex, ls="", marker=".", color="k", label="xcorr (convex)") ax2.plot(cc_t, cc_concave, ls="", marker=".", color="0.7", label="xcorr (concave)") ax2.plot(cc_t[first_sample:last_sample + 1], cc[first_sample:last_sample + 1], "b.", label="used for fitting") tmp_t = np.linspace(cc_t[first_sample], cc_t[last_sample], num_samples * 10) ax2.plot(tmp_t, scipy.polyval(coeffs, tmp_t), "b", label="fit") ax2.axvline(-dt, color="g", label="vertex") ax2.axhline(coeff, color="g") ax2.set_xlabel("%.2f at %.3f seconds correction" % (coeff, -dt)) ax2.set_ylabel("correlation coefficient") ax2.set_ylim(-1, 1) ax2.set_xlim(cc_t[0], cc_t[-1]) ax2.legend(loc="lower right", prop={'size': "x-small"}) # plt.legend(loc="lower left") if filename: fig.savefig(filename) else: plt.show() return (pick2_corr, coeff)
def main(argv=None): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser( prog='obspy-sds-report', description=__doc__, formatter_class=RawDescriptionHelpFormatter) parser.add_argument( '-r', '--sds-root', dest='sds_root', required=True, help='Root folder of SDS archive.') parser.add_argument( '-o', '--output', dest='output', required=True, help='Full path (absolute or relative) of output files, without ' 'suffix (e.g. ``/tmp/sds_report``).') parser.add_argument( '-u', '--update', dest='update', default=False, action="store_true", help='Only update latency information, reuse previously computed list ' 'of streams to check and data percentage and gap count. Many ' 'other options e.g. regarding stream selection (``--id``, ' ' ``--location``, ..) and time span of data quality checks ' '(``--check-quality-days``) will be without effect if this ' 'option is specified. Only updating latency is significantly ' 'faster than a full analysis run, a normal use case is to do a ' 'full run once or twice per day and update latency every 5 or ' 'ten minutes. An exception is raised if an update is specified ' 'but the necessary file is not yet present.') parser.add_argument( '-l', '--location', dest='locations', action="append", help='Location codes to look for (e.g. ``""`` for empty location code ' 'or ``"00"``). This option can be provided multiple times and ' 'must be specified at least once for a full run (i.e. without ' '``--update`` option). While network/station combinations are ' 'automatically discovered, only streams whose location codes are ' 'provided here will be discovered and taken into account and ' 'ultimately displayed.') parser.add_argument( '-c', '--channel', dest='channels', action="append", help='Channel codes to look for (e.g. specified three times with ' '``HHZ``, ``EHZ`` and ``ELZ`` to cover all stations that serve ' 'a broad-band, short-period or low gain vertical channel). ' 'This option can be provided multiple times and must be ' 'specified at least once for a full run (i.e. without ' '``--update`` option). Only one stream per ' 'network/station/location combination will be displayed, ' 'selected by the lowest latency.') parser.add_argument( '-i', '--id', dest='ids', action="append", default=[], help='SEED IDs of streams that should be included in addition to the ' 'autodiscovery of streams controlled by ``--location`` and ' '``--channel`` options (e.g. ``IU.ANMO..LHZ``). ' 'This option can be provided multiple times.') parser.add_argument( '--skip', dest='skip', action="append", default=[], help='Networks or stations that should be skipped (e.g. ``IU`` or ' '``IU.ANMO``). This option can be provided multiple times.') parser.add_argument( '-f', '--format', default="MSEED", choices=ENTRY_POINTS['waveform'], help='Waveform format of SDS archive. Should be "MSEED" in most ' 'cases. Use ``None`` or empty string for format autodection ' '(slower and should not be necessary in most all cases). ' 'Warning: formats that do not support ``headonly`` ' 'option in ``read()`` operation will be significantly slower).') parser.add_argument( '--check-backwards-days', dest='check_back_days', default=30, type=int, help='Check for latency backwards for this many days.') parser.add_argument( '--check-quality-days', dest='check_quality_days', default=7, type=int, help='Calculate and plot data availability and number of ' 'gaps for a period of this many days.') parser.add_argument( '--latency-warn', dest='latency_warn', default=3600, type=float, help='Latency warning threshold in seconds.') parser.add_argument( '--latency-warn-color', dest='latency_warn_color', default="#FFFF33", help='Latency warning threshold color (valid HTML color string).') parser.add_argument( '--latency-error', dest='latency_error', default=24 * 3600, type=float, help='Latency error threshold in seconds.') parser.add_argument( '--latency-error-color', dest='latency_error_color', default="#E41A1C", help='Latency error threshold color (valid HTML color string).') parser.add_argument( '--percentage-warn', dest='percentage_warn', default=99.5, type=float, help='Data availability percentage warning threshold (``0`` to ' '``100``).') parser.add_argument( '--gaps-warn', dest='gaps_warn', default=20, type=int, help='Gap/overlap number warning threshold.') parser.add_argument( '--data-quality-warn-color', dest='data_quality_warn_color', default="#377EB8", help='Data quality (percentage/gap count) warning color ' '(valid HTML color string).') parser.add_argument( '--outdated-color', dest='outdated_color', default="#808080", help='Color for streams that have no data in check range ' '(valid HTML color string).') parser.add_argument( '--ok-color', dest='ok_color', default="#4DAF4A", help='Color for streams that pass all checks (valid HTML color ' 'string).') parser.add_argument( '--background-color', dest='background_color', default="#999999", help='Color for background of page (valid HTML color string).') parser.add_argument( '-V', '--version', action='version', version='%(prog)s ' + __version__) args = parser.parse_args(argv) now = UTCDateTime() stop_time = now - args.check_back_days * 24 * 3600 client = Client(args.sds_root) dtype_streamfile = np.dtype("U10, U30, U10, U10, f8, f8, i8") availability_check_endtime = now - 3600 availability_check_starttime = ( availability_check_endtime - (args.check_quality_days * 24 * 3600)) streams_file = args.output + ".txt" html_file = args.output + ".html" scan_file = args.output + ".png" if args.format.upper() == "NONE" or args.format == "": args.format = None # check whether to set up list of streams to check or use existing list # update list of streams once per day at nighttime if args.update: if not Path(streams_file).is_file(): msg = ("Update flag specified, but no output of previous full run " "was present in the expected location (as determined by " "``--output`` flag: {})").format(streams_file) raise IOError(msg) # use existing list of streams and availability information, just # update latency nslc = np.loadtxt(streams_file, delimiter=",", dtype=dtype_streamfile) else: if not args.locations or not args.channels: msg = ("At least one location code ``--location`` and at least " "one channel code ``--channel`` must be specified.") raise ObsPyException(msg) nsl = set() # get all network/station combinations in SDS archive for net, sta in client.get_all_stations(): if net in args.skip or ".".join((net, sta)) in args.skip: continue # for all combinations of user specified location and channel codes # check if data is in SDS archive for loc in args.locations: for cha in args.channels: if client.has_data(net, sta, loc, cha): # for now omit channel information, we only include the # channel with lowest latency later on nsl.add((net, sta, loc)) break nsl = sorted(nsl) nslc = [] # determine which channel to check for each network/station/location # combination for net, sta, loc in nsl: latency = [] # check latency of all channels that should be checked for cha in args.channels: latency_ = client.get_latency( net, sta, loc, cha, stop_time=stop_time, check_has_no_data=False) latency.append(latency_ or np.inf) # only include the channel with lowest latency in our stream list cha = args.channels[np.argmin(latency)] latency = np.min(latency) nslc.append((net, sta, loc, cha, latency)) for id in args.ids: net, sta, loc, cha = id.split(".") latency = client.get_latency( net, sta, loc, cha, stop_time=stop_time, check_has_no_data=False) latency = latency or np.inf nslc.append((net, sta, loc, cha, latency)) nslc_ = [] # request and assemble availability information. # this takes pretty long (on network/slow file systems), # so we only do it during a full run here, not during update for net, sta, loc, cha, latency in nslc: percentage, gap_count = client.get_availability_percentage( net, sta, loc, cha, availability_check_starttime, availability_check_endtime) nslc_.append((net, sta, loc, cha, latency, percentage, gap_count)) nslc = nslc_ # write stream list and availability information to file nslc = np.array(sorted(nslc), dtype=dtype_streamfile) np.savetxt(streams_file, nslc, delimiter=",", fmt=["%s", "%s", "%s", "%s", "%f", "%f", "%d"]) # generate obspy-scan image files = [] seed_ids = set() for nslc_ in nslc: net, sta, loc, cha, latency, _, _ = nslc_ if np.isinf(latency) or latency > args.check_back_days * 24 * 3600: continue seed_ids.add(".".join((net, sta, loc, cha))) files += client._get_filenames( net, sta, loc, cha, availability_check_starttime, availability_check_endtime) scan(files, format=args.format, starttime=availability_check_starttime, endtime=availability_check_endtime, plot=scan_file, verbose=False, recursive=True, ignore_links=False, seed_ids=seed_ids, print_gaps=False) # request and assemble current latency information data = [] for net, sta, loc, cha, latency, percentage, gap_count in nslc: if args.update: latency = client.get_latency( net, sta, loc, cha, stop_time=stop_time, check_has_no_data=False) latency = latency or np.inf data.append((net, sta, loc, cha, latency, percentage, gap_count)) # separate out the long dead streams data_normal = [] data_outdated = [] for data_ in data: latency = data_[4] if np.isinf(latency) or latency > args.check_back_days * 24 * 3600: data_outdated.append(data_) else: data_normal.append(data_) # write html output to file html = _format_html(args, data_normal, data_outdated) with open(html_file, "wt") as fh: fh.write(html)
def run(argv=None, interactive=True): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser(prog='obspy-runtests', description='A command-line program that runs all ' 'ObsPy tests.') parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + get_git_version()) parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode') parser.add_argument('-q', '--quiet', action='store_true', help='quiet mode') parser.add_argument('--raise-all-warnings', action='store_true', help='All warnings are raised as exceptions when this ' 'flag is set. Only for debugging purposes.') # filter options filter = parser.add_argument_group('Module Filter', 'Providing no modules will test all ' 'ObsPy modules which do not require an ' 'active network connection.') filter.add_argument('-a', '--all', action='store_true', dest='test_all_modules', help='test all modules (including network modules)') filter.add_argument('-x', '--exclude', action='append', help='exclude given module from test') filter.add_argument('tests', nargs='*', help='test modules to run') # timing / profile options timing = parser.add_argument_group('Timing/Profile Options') timing.add_argument('-t', '--timeit', action='store_true', help='shows accumulated run times of each module') timing.add_argument('-s', '--slowest', default=0, type=int, dest='n', help='lists n slowest test cases') timing.add_argument('-p', '--profile', action='store_true', help='uses cProfile, saves the results to file ' + 'obspy.pstats and prints some profiling numbers') # reporting options report = parser.add_argument_group('Reporting Options') report.add_argument('-r', '--report', action='store_true', help='automatically submit a test report') report.add_argument('-d', '--dontask', action='store_true', help="don't explicitly ask for submitting a test " "report") report.add_argument('-u', '--server', default='tests.obspy.org', help='report server (default is tests.obspy.org)') report.add_argument('-n', '--node', dest='hostname', default=HOSTNAME, help='nodename visible at the report server') report.add_argument('-l', '--log', default=None, help='append log file to test report') report.add_argument('--ci-url', default=None, dest="ci_url", help='URL to Continuous Integration job page.') report.add_argument('--pr-url', default=None, dest="pr_url", help='Github (Pull Request) URL.') # other options others = parser.add_argument_group('Additional Options') others.add_argument('--tutorial', action='store_true', help='add doctests in tutorial') others.add_argument('--no-flake8', action='store_true', help='skip code formatting test') others.add_argument('--keep-images', action='store_true', help='store images created during image comparison ' 'tests in subfolders of baseline images') others.add_argument('--keep-only-failed-images', action='store_true', help='when storing images created during testing, ' 'only store failed images and the corresponding ' 'diff images (but not images that passed the ' 'corresponding test).') args = parser.parse_args(argv) # set correct verbosity level if args.verbose: verbosity = 2 # raise all NumPy warnings np.seterr(all='warn') elif args.quiet: verbosity = 0 # ignore user and deprecation warnings warnings.simplefilter("ignore", DeprecationWarning) warnings.simplefilter("ignore", UserWarning) # don't ask to send a report args.dontask = True else: verbosity = 1 # show all NumPy warnings np.seterr(all='print') # ignore user warnings warnings.simplefilter("ignore", UserWarning) # whether to raise any warning that's appearing if args.raise_all_warnings: # raise all NumPy warnings np.seterr(all='raise') # raise user and deprecation warnings warnings.simplefilter("error", UserWarning) # check for send report option or environmental settings if args.report or 'OBSPY_REPORT' in os.environ.keys(): report = True else: report = False if 'OBSPY_REPORT_SERVER' in os.environ.keys(): args.server = os.environ['OBSPY_REPORT_SERVER'] # check interactivity settings if interactive and args.dontask: interactive = False if args.keep_images: os.environ['OBSPY_KEEP_IMAGES'] = "" if args.keep_only_failed_images: os.environ['OBSPY_KEEP_ONLY_FAILED_IMAGES'] = "" if args.no_flake8: os.environ['OBSPY_NO_FLAKE8'] = "" # All arguments are used by the test runner and should not interfere # with any other module that might also parse them, e.g. flake8. sys.argv = sys.argv[:1] return run_tests(verbosity, args.tests, report, args.log, args.server, args.test_all_modules, args.timeit, interactive, args.n, exclude=args.exclude, tutorial=args.tutorial, hostname=args.hostname, ci_url=args.ci_url, pr_url=args.pr_url)
def main(argv=None): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser( prog='obspy-sds-report', description=__doc__, formatter_class=RawDescriptionHelpFormatter) parser.add_argument( '-r', '--sds-root', dest='sds_root', required=True, help='Root folder of SDS archive.') parser.add_argument( '-o', '--output', dest='output', required=True, help='Full path (absolute or relative) of output files, without ' 'suffix (e.g. ``/tmp/sds_report``).') parser.add_argument( '-u', '--update', dest='update', default=False, action="store_true", help='Only update latency information, reuse previously computed list ' 'of streams to check and data percentage and gap count. Many ' 'other options e.g. regarding stream selection (``--id``, ' ' ``--location``, ..) and time span of data quality checks ' '(``--check-quality-days``) will be without effect if this ' 'option is specified. Only updating latency is significantly ' 'faster than a full analysis run, a normal use case is to do a ' 'full run once or twice per day and update latency every 5 or ' 'ten minutes. An exception is raised if an update is specified ' 'but the necessary file is not yet present.') parser.add_argument( '-l', '--location', dest='locations', action="append", help='Location codes to look for (e.g. ``""`` for empty location code ' 'or ``"00"``). This option can be provided multiple times and ' 'must be specified at least once for a full run (i.e. without ' '``--update`` option). While network/station combinations are ' 'automatically discovered, only streams whose location codes are ' 'provided here will be discovered and taken into account and ' 'ultimately displayed.') parser.add_argument( '-c', '--channel', dest='channels', action="append", help='Channel codes to look for (e.g. specified three times with ' '``HHZ``, ``EHZ`` and ``ELZ`` to cover all stations that serve ' 'a broad-band, short-period or low gain vertical channel). ' 'This option can be provided multiple times and must be ' 'specified at least once for a full run (i.e. without ' '``--update`` option). Only one stream per ' 'network/station/location combination will be displayed, ' 'selected by the lowest latency.') parser.add_argument( '-i', '--id', dest='ids', action="append", default=[], help='SEED IDs of streams that should be included in addition to the ' 'autodiscovery of streams controlled by ``--location`` and ' '``--channel`` options (e.g. ``IU.ANMO..LHZ``). ' 'This option can be provided multiple times.') parser.add_argument( '--skip', dest='skip', action="append", default=[], help='Networks or stations that should be skipped (e.g. ``IU`` or ' '``IU.ANMO``). This option can be provided multiple times.') parser.add_argument( '-f', '--format', default="MSEED", choices=ENTRY_POINTS['waveform'], help='Waveform format of SDS archive. Should be "MSEED" in most ' 'cases. Use ``None`` or empty string for format autodection ' '(slower and should not be necessary in most all cases). ' 'Warning: formats that do not support ``headonly`` ' 'option in ``read()`` operation will be significantly slower).') parser.add_argument( '--check-backwards-days', dest='check_back_days', default=30, type=int, help='Check for latency backwards for this many days.') parser.add_argument( '--check-quality-days', dest='check_quality_days', default=7, type=int, help='Calculate and plot data availability and number of ' 'gaps for a period of this many days.') parser.add_argument( '--latency-warn', dest='latency_warn', default=3600, type=float, help='Latency warning threshold in seconds.') parser.add_argument( '--latency-warn-color', dest='latency_warn_color', default="#FFFF33", help='Latency warning threshold color (valid HTML color string).') parser.add_argument( '--latency-error', dest='latency_error', default=24*3600, type=float, help='Latency error threshold in seconds.') parser.add_argument( '--latency-error-color', dest='latency_error_color', default="#E41A1C", help='Latency error threshold color (valid HTML color string).') parser.add_argument( '--percentage-warn', dest='percentage_warn', default=99.5, type=float, help='Data availability percentage warning threshold (``0`` to ' '``100``).') parser.add_argument( '--gaps-warn', dest='gaps_warn', default=20, type=int, help='Gap/overlap number warning threshold.') parser.add_argument( '--data-quality-warn-color', dest='data_quality_warn_color', default="#377EB8", help='Data quality (percentage/gap count) warning color ' '(valid HTML color string).') parser.add_argument( '--outdated-color', dest='outdated_color', default="#808080", help='Color for streams that have no data in check range ' '(valid HTML color string).') parser.add_argument( '--ok-color', dest='ok_color', default="#4DAF4A", help='Color for streams that pass all checks (valid HTML color ' 'string).') parser.add_argument( '--background-color', dest='background_color', default="#999999", help='Color for background of page (valid HTML color string).') parser.add_argument( '-V', '--version', action='version', version='%(prog)s ' + __version__) args = parser.parse_args(argv) now = UTCDateTime() stop_time = now - args.check_back_days * 24 * 3600 client = Client(args.sds_root) dtype_streamfile = np.dtype("U10, U30, U10, U10, f8, f8, i8") availability_check_endtime = now - 3600 availability_check_starttime = ( availability_check_endtime - (args.check_quality_days * 24 * 3600)) streams_file = args.output + ".txt" html_file = args.output + ".html" scan_file = args.output + ".png" if args.format.upper() == "NONE" or args.format == "": args.format = None # check whether to set up list of streams to check or use existing list # update list of streams once per day at nighttime if args.update: if not os.path.isfile(streams_file): msg = ("Update flag specified, but no output of previous full run " "was present in the expected location (as determined by " "``--output`` flag: {})").format(streams_file) raise IOError(msg) # use existing list of streams and availability information, just # update latency nslc = np.loadtxt(streams_file, delimiter=",", dtype=dtype_streamfile) else: if not args.locations or not args.channels: msg = ("At least one location code ``--location`` and at least " "one channel code ``--channel`` must be specified.") raise ObsPyException(msg) nsl = set() # get all network/station combinations in SDS archive for net, sta in client.get_all_stations(): if net in args.skip or ".".join((net, sta)) in args.skip: continue # for all combinations of user specified location and channel codes # check if data is in SDS archive for loc in args.locations: for cha in args.channels: if client.has_data(net, sta, loc, cha): # for now omit channel information, we only include the # channel with lowest latency later on nsl.add((net, sta, loc)) break nsl = sorted(nsl) nslc = [] # determine which channel to check for each network/station/location # combination for net, sta, loc in nsl: latency = [] # check latency of all channels that should be checked for cha in args.channels: latency_ = client.get_latency(net, sta, loc, cha, stop_time=stop_time) latency.append(latency_ or np.inf) # only include the channel with lowest latency in our stream list cha = args.channels[np.argmin(latency)] latency = np.min(latency) nslc.append((net, sta, loc, cha, latency)) for id in args.ids: net, sta, loc, cha = id.split(".") latency = client.get_latency(net, sta, loc, cha, stop_time=stop_time) latency = latency or np.inf nslc.append((net, sta, loc, cha, latency)) nslc_ = [] # request and assemble availability information. # this takes pretty long (on network/slow file systems), # so we only do it during a full run here, not during update for net, sta, loc, cha, latency in nslc: percentage, gap_count = client.get_availability_percentage( net, sta, loc, cha, availability_check_starttime, availability_check_endtime) nslc_.append((net, sta, loc, cha, latency, percentage, gap_count)) nslc = nslc_ # write stream list and availability information to file nslc = np.array(sorted(nslc), dtype=dtype_streamfile) np.savetxt(streams_file, nslc, delimiter=",", fmt=["%s", "%s", "%s", "%s", "%f", "%f", "%d"]) # generate obspy-scan image files = [] seed_ids = set() for nslc_ in nslc: net, sta, loc, cha, latency, _, _ = nslc_ if np.isinf(latency) or latency > args.check_back_days * 24 * 3600: continue seed_ids.add(".".join((net, sta, loc, cha))) files += client._get_filenames( net, sta, loc, cha, availability_check_starttime, availability_check_endtime) scan(files, format=args.format, starttime=availability_check_starttime, endtime=availability_check_endtime, plot=scan_file, verbose=False, recursive=True, ignore_links=False, seed_ids=seed_ids, print_gaps=False) # request and assemble current latency information data = [] for net, sta, loc, cha, latency, percentage, gap_count in nslc: if args.update: latency = client.get_latency(net, sta, loc, cha, stop_time=stop_time) latency = latency or np.inf data.append((net, sta, loc, cha, latency, percentage, gap_count)) # separate out the long dead streams data_normal = [] data_outdated = [] for data_ in data: latency = data_[4] if np.isinf(latency) or latency > args.check_back_days * 24 * 3600: data_outdated.append(data_) else: data_normal.append(data_) # write html output to file html = _format_html(args, data_normal, data_outdated) with open(html_file, "wt") as fh: fh.write(html)