def main(argv=None): parser = ArgumentParser(prog="obspy-plot", description=__doc__.strip()) parser.add_argument("-V", "--version", action="version", version="%(prog)s " + __version__) parser.add_argument("-f", "--format", choices=ENTRY_POINTS["waveform"], help="Waveform format.") parser.add_argument("-o", "--outfile", help="Output filename.") parser.add_argument( "-n", "--no-automerge", dest="automerge", action="store_false", help="Disable automatic merging of matching channels.", ) parser.add_argument( "--full", dest="full", action="store_true", help="Disable min/max-plot, i.e. always plot every " 'single sample (Stream.plot(..., method="full"), ' "for interactive zooming).", ) parser.add_argument("files", nargs="+", help="Files to plot.") args = parser.parse_args(argv) if args.outfile is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) st = Stream() for f in args.files: st += read(f, format=args.format) kwargs = {"outfile": args.outfile, "automerge": args.automerge} if args.full: kwargs["method"] = "full" st.plot(**kwargs)
def main(argv=None): parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip()) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Waveform format.') parser.add_argument('-o', '--outfile', help='Output filename.') parser.add_argument('-n', '--no-automerge', dest='automerge', action='store_false', help='Disable automatic merging of matching channels.') parser.add_argument('--full', dest='full', action='store_true', help='Disable min/max-plot, i.e. always plot every ' 'single sample (Stream.plot(..., method="full"), ' 'for interactive zooming).') parser.add_argument('files', nargs='+', help='Files to plot.') args = parser.parse_args(argv) if args.outfile is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) st = Stream() for f in args.files: st += read(f, format=args.format) kwargs = {"outfile": args.outfile, "automerge": args.automerge} if args.full: kwargs['method'] = "full" st.plot(**kwargs)
def main(argv=None): parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip()) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Waveform format.') parser.add_argument('-o', '--outfile', help='Output filename.') parser.add_argument('-n', '--no-automerge', dest='automerge', action='store_false', help='Disable automatic merging of matching channels.') parser.add_argument('--full', dest='full', action='store_true', help='Disable min/max-plot, i.e. always plot every ' 'single sample (Stream.plot(..., method="full"), ' 'for interactive zooming).') parser.add_argument('files', nargs='+', help='Files to plot.') args = parser.parse_args(argv) if args.outfile is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) st = Stream() for f in args.files: st += read(f, format=args.format) kwargs = {"outfile": args.outfile, "automerge": args.automerge} if args.full: kwargs['method'] = "full" st.plot(**kwargs)
def main(argv=None): parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip()) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Waveform format.') parser.add_argument('-o', '--outfile', help='Output filename.') parser.add_argument('-n', '--no-automerge', dest='automerge', action='store_false', help='Disable automatic merging of matching channels.') parser.add_argument('files', nargs='+', help='Files to plot.') args = parser.parse_args(argv) if args.outfile is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) st = Stream() for f in args.files: st += read(f, format=args.format) st.plot(outfile=args.outfile, automerge=args.automerge)
def __enter__(self): """ Set matplotlib defaults. """ MatplotlibBackend.switch_backend("AGG", sloppy=False) from matplotlib import font_manager, rcParams, rcdefaults import locale try: locale.setlocale(locale.LC_ALL, native_str('en_US.UTF-8')) except Exception: try: locale.setlocale(locale.LC_ALL, native_str('English_United States.1252')) except Exception: msg = "Could not set locale to English/United States. " + \ "Some date-related tests may fail" warnings.warn(msg) # set matplotlib builtin default settings for testing rcdefaults() if self.style is not None: self.style.__enter__() if MATPLOTLIB_VERSION >= [2, 0, 0]: default_font = 'DejaVu Sans' else: default_font = 'Bitstream Vera Sans' rcParams['font.family'] = default_font with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'findfont:.*') font_manager.findfont(default_font) if w: warnings.warn('Unable to find the ' + default_font + ' font. ' 'Plotting tests will likely fail.') try: rcParams['text.hinting'] = False except KeyError: warnings.warn("could not set rcParams['text.hinting']") try: rcParams['text.hinting_factor'] = 8 except KeyError: warnings.warn("could not set rcParams['text.hinting_factor']") if self.plt_close_all_enter: import matplotlib.pyplot as plt try: plt.close("all") except Exception: pass return self
def __enter__(self): """ Set matplotlib defaults. """ MatplotlibBackend.switch_backend("AGG", sloppy=False) from matplotlib import font_manager, rcParams, rcdefaults import locale try: locale.setlocale(locale.LC_ALL, native_str('en_US.UTF-8')) except Exception: try: locale.setlocale(locale.LC_ALL, native_str('English_United States.1252')) except Exception: msg = "Could not set locale to English/United States. " + \ "Some date-related tests may fail" warnings.warn(msg) # set matplotlib builtin default settings for testing rcdefaults() if self.style is not None: self.style.__enter__() if MATPLOTLIB_VERSION >= [2, 0, 0]: default_font = 'DejaVu Sans' else: default_font = 'Bitstream Vera Sans' rcParams['font.family'] = default_font with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'findfont:.*') font_manager.findfont(default_font) if w: warnings.warn('Unable to find the ' + default_font + ' font. ' 'Plotting tests will likely fail.') try: rcParams['text.hinting'] = False except KeyError: warnings.warn("could not set rcParams['text.hinting']") try: rcParams['text.hinting_factor'] = 8 except KeyError: warnings.warn("could not set rcParams['text.hinting_factor']") if self.plt_close_all_enter: import matplotlib.pyplot as plt try: plt.close("all") except Exception: pass return self
def main(argv=None): parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip()) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Waveform format.') parser.add_argument('-o', '--outfile', help='Output filename.') parser.add_argument('-n', '--no-automerge', dest='automerge', action='store_false', help='Disable automatic merging of matching channels.') parser.add_argument('files', nargs='+', help='Files to plot.') args = parser.parse_args(argv) if args.outfile is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) st = Stream() for f in args.files: st += read(f, format=args.format) st.plot(outfile=args.outfile, automerge=args.automerge)
def __enter__(self): """ Set matplotlib defaults. """ MatplotlibBackend.switch_backend("AGG", sloppy=False) from matplotlib import font_manager, rcParams, rcdefaults import locale try: locale.setlocale(locale.LC_ALL, native_str('en_US.UTF-8')) except: try: locale.setlocale(locale.LC_ALL, native_str('English_United States.1252')) except: msg = "Could not set locale to English/United States. " + \ "Some date-related tests may fail" warnings.warn(msg) # set matplotlib builtin default settings for testing rcdefaults() rcParams['font.family'] = 'Bitstream Vera Sans' with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'findfont:.*') font_manager.findfont('Bitstream Vera Sans') if w: warnings.warn('Unable to find the Bitstream Vera Sans font. ' 'Plotting tests will likely fail.') try: rcParams['text.hinting'] = False except KeyError: warnings.warn("could not set rcParams['text.hinting']") try: rcParams['text.hinting_factor'] = 8 except KeyError: warnings.warn("could not set rcParams['text.hinting_factor']") return self
def __enter__(self): """ Set matplotlib defaults. """ MatplotlibBackend.switch_backend("AGG", sloppy=False) from matplotlib import font_manager, rcParams, rcdefaults import locale try: locale.setlocale(locale.LC_ALL, native_str('en_US.UTF-8')) except: try: locale.setlocale(locale.LC_ALL, native_str('English_United States.1252')) except: msg = "Could not set locale to English/United States. " + \ "Some date-related tests may fail" warnings.warn(msg) # set matplotlib builtin default settings for testing rcdefaults() rcParams['font.family'] = 'Bitstream Vera Sans' with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'findfont:.*') font_manager.findfont('Bitstream Vera Sans') if w: warnings.warn('Unable to find the Bitstream Vera Sans font. ' 'Plotting tests will likely fail.') try: rcParams['text.hinting'] = False except KeyError: warnings.warn("could not set rcParams['text.hinting']") try: rcParams['text.hinting_factor'] = 8 except KeyError: warnings.warn("could not set rcParams['text.hinting_factor']") return self
def run(argv=None, interactive=True): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser(prog='obspy-runtests', description='A command-line program that runs all ' 'ObsPy tests.') parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + get_git_version()) parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode') parser.add_argument('-q', '--quiet', action='store_true', help='quiet mode') parser.add_argument('--raise-all-warnings', action='store_true', help='All warnings are raised as exceptions when this ' 'flag is set. Only for debugging purposes.') # filter options filter = parser.add_argument_group('Module Filter', 'Providing no modules will test all ' 'ObsPy modules which do not require an ' 'active network connection.') filter.add_argument('-a', '--all', action='store_true', dest='test_all_modules', help='test all modules (including network modules)') filter.add_argument('-x', '--exclude', action='append', help='exclude given module from test') filter.add_argument('tests', nargs='*', help='test modules to run') # timing / profile options timing = parser.add_argument_group('Timing/Profile Options') timing.add_argument('-t', '--timeit', action='store_true', help='shows accumulated run times of each module') timing.add_argument('-s', '--slowest', default=0, type=int, dest='n', help='lists n slowest test cases') timing.add_argument('-p', '--profile', action='store_true', help='uses cProfile, saves the results to file ' + 'obspy.pstats and prints some profiling numbers') # reporting options report = parser.add_argument_group('Reporting Options') report.add_argument('-r', '--report', action='store_true', help='automatically submit a test report') report.add_argument('-d', '--dontask', action='store_true', help="don't explicitly ask for submitting a test " "report") report.add_argument('-u', '--server', default='tests.obspy.org', help='report server (default is tests.obspy.org)') report.add_argument('-n', '--node', dest='hostname', default=HOSTNAME, help='nodename visible at the report server') report.add_argument('-l', '--log', default=None, help='append log file to test report') report.add_argument('--ci-url', default=None, dest="ci_url", help='URL to Continuous Integration job page.') report.add_argument('--pr-url', default=None, dest="pr_url", help='Github (Pull Request) URL.') # other options others = parser.add_argument_group('Additional Options') others.add_argument('--tutorial', action='store_true', help='add doctests in tutorial') others.add_argument('--no-flake8', action='store_true', help='skip code formatting test') others.add_argument('--keep-images', action='store_true', help='store images created during image comparison ' 'tests in subfolders of baseline images') others.add_argument('--keep-only-failed-images', action='store_true', help='when storing images created during testing, ' 'only store failed images and the corresponding ' 'diff images (but not images that passed the ' 'corresponding test).') args = parser.parse_args(argv) # set correct verbosity level if args.verbose: verbosity = 2 # raise all NumPy warnings np.seterr(all='warn') elif args.quiet: verbosity = 0 # ignore user and deprecation warnings warnings.simplefilter("ignore", DeprecationWarning) warnings.simplefilter("ignore", UserWarning) # don't ask to send a report args.dontask = True else: verbosity = 1 # show all NumPy warnings np.seterr(all='print') # ignore user warnings warnings.simplefilter("ignore", UserWarning) # whether to raise any warning that's appearing if args.raise_all_warnings: # raise all NumPy warnings np.seterr(all='raise') # raise user and deprecation warnings warnings.simplefilter("error", UserWarning) # check for send report option or environmental settings if args.report or 'OBSPY_REPORT' in os.environ.keys(): report = True else: report = False if 'OBSPY_REPORT_SERVER' in os.environ.keys(): args.server = os.environ['OBSPY_REPORT_SERVER'] # check interactivity settings if interactive and args.dontask: interactive = False if args.keep_images: os.environ['OBSPY_KEEP_IMAGES'] = "" if args.keep_only_failed_images: os.environ['OBSPY_KEEP_ONLY_FAILED_IMAGES'] = "" if args.no_flake8: os.environ['OBSPY_NO_FLAKE8'] = "" # All arguments are used by the test runner and should not interfere # with any other module that might also parse them, e.g. flake8. sys.argv = sys.argv[:1] return run_tests(verbosity, args.tests, report, args.log, args.server, args.test_all_modules, args.timeit, interactive, args.n, exclude=args.exclude, tutorial=args.tutorial, hostname=args.hostname, ci_url=args.ci_url, pr_url=args.pr_url)
def run(argv=None, interactive=True): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser(prog='obspy-runtests', description='A command-line program that runs all ' 'ObsPy tests.') parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + get_git_version()) parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode') parser.add_argument('-q', '--quiet', action='store_true', help='quiet mode') parser.add_argument('--raise-all-warnings', action='store_true', help='All warnings are raised as exceptions when this ' 'flag is set. Only for debugging purposes.') # filter options filter = parser.add_argument_group( 'Module Filter', 'Providing no modules will test all ' 'ObsPy modules which do not require an ' 'active network connection.') filter.add_argument('-a', '--all', action='store_true', dest='test_all_modules', help='test all modules (including network modules)') filter.add_argument('-x', '--exclude', action='append', help='exclude given module from test') filter.add_argument('tests', nargs='*', help='test modules to run') # timing / profile options timing = parser.add_argument_group('Timing/Profile Options') timing.add_argument('-t', '--timeit', action='store_true', help='shows accumulated run times of each module') timing.add_argument('-s', '--slowest', default=0, type=int, dest='n', help='lists n slowest test cases') timing.add_argument('-p', '--profile', action='store_true', help='uses cProfile, saves the results to file ' + 'obspy.pstats and prints some profiling numbers') # reporting options report = parser.add_argument_group('Reporting Options') report.add_argument('-r', '--report', action='store_true', help='automatically submit a test report') report.add_argument('-d', '--dontask', action='store_true', help="don't explicitly ask for submitting a test " "report") report.add_argument('-u', '--server', default='tests.obspy.org', help='report server (default is tests.obspy.org)') report.add_argument('-n', '--node', dest='hostname', default=HOSTNAME, help='nodename visible at the report server') report.add_argument('-l', '--log', default=None, help='append log file to test report') report.add_argument('--ci-url', default=None, dest="ci_url", help='URL to Continuous Integration job page.') report.add_argument('--pr-url', default=None, dest="pr_url", help='Github (Pull Request) URL.') # other options others = parser.add_argument_group('Additional Options') others.add_argument('--tutorial', action='store_true', help='add doctests in tutorial') others.add_argument('--no-flake8', action='store_true', help='skip code formatting test') others.add_argument('--keep-images', action='store_true', help='store images created during image comparison ' 'tests in subfolders of baseline images') others.add_argument('--keep-only-failed-images', action='store_true', help='when storing images created during testing, ' 'only store failed images and the corresponding ' 'diff images (but not images that passed the ' 'corresponding test).') args = parser.parse_args(argv) # set correct verbosity level if args.verbose: verbosity = 2 # raise all NumPy warnings np.seterr(all='warn') elif args.quiet: verbosity = 0 # ignore user and deprecation warnings warnings.simplefilter("ignore", DeprecationWarning) warnings.simplefilter("ignore", UserWarning) # don't ask to send a report args.dontask = True else: verbosity = 1 # show all NumPy warnings np.seterr(all='print') # ignore user warnings warnings.simplefilter("ignore", UserWarning) # whether to raise any warning that's appearing if args.raise_all_warnings: # raise all NumPy warnings np.seterr(all='raise') # raise user and deprecation warnings warnings.simplefilter("error", UserWarning) # ignore specific warnings msg = ('Matplotlib is currently using agg, which is a non-GUI backend, ' 'so cannot show the figure.') warnings.filterwarnings("ignore", message=msg) # check for send report option or environmental settings if args.report or 'OBSPY_REPORT' in os.environ.keys(): report = True else: report = False if 'OBSPY_REPORT_SERVER' in os.environ.keys(): args.server = os.environ['OBSPY_REPORT_SERVER'] # check interactivity settings if interactive and args.dontask: interactive = False if args.keep_images: os.environ['OBSPY_KEEP_IMAGES'] = "" if args.keep_only_failed_images: os.environ['OBSPY_KEEP_ONLY_FAILED_IMAGES'] = "" if args.no_flake8: os.environ['OBSPY_NO_FLAKE8'] = "" # All arguments are used by the test runner and should not interfere # with any other module that might also parse them, e.g. flake8. sys.argv = sys.argv[:1] return run_tests(verbosity, args.tests, report, args.log, args.server, args.test_all_modules, args.timeit, interactive, args.n, exclude=args.exclude, tutorial=args.tutorial, hostname=args.hostname, ci_url=args.ci_url, pr_url=args.pr_url)
def main(argv=None): parser = ArgumentParser(prog='obspy-scan', description=__doc__.strip(), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Optional, the file format.\n' + ' '.join(__doc__.split('\n')[-4:])) parser.add_argument('-v', '--verbose', action='store_true', help='Optional. Verbose output.') parser.add_argument('-q', '--quiet', action='store_true', help='Optional. Be quiet. Overwritten by --verbose ' 'flag.') parser.add_argument('-n', '--non-recursive', action='store_false', dest='recursive', help='Optional. Do not descend into directories.') parser.add_argument('-i', '--ignore-links', action='store_true', help='Optional. Do not follow symbolic links.') parser.add_argument('--start-time', default=None, type=UTCDateTime, help='Optional, a UTCDateTime compatible string. ' + 'Only visualize data after this time and set ' + 'time-axis axis accordingly.') parser.add_argument('--end-time', default=None, type=UTCDateTime, help='Optional, a UTCDateTime compatible string. ' + 'Only visualize data before this time and set ' + 'time-axis axis accordingly.') parser.add_argument('--id', action='append', help='Optional, a SEED channel identifier ' "(e.g. 'GR.FUR..HHZ'). You may provide this " + 'option multiple times. Only these ' + 'channels will be plotted.') parser.add_argument('-t', '--event-time', default=None, type=UTCDateTime, action='append', help='Optional, a UTCDateTime compatible string ' + "(e.g. '2010-01-01T12:00:00'). You may provide " + 'this option multiple times. These times get ' + 'marked by vertical lines in the plot. ' + 'Useful e.g. to mark event origin times.') parser.add_argument('-w', '--write', default=None, help='Optional, npz file for writing data ' 'after scanning waveform files') parser.add_argument('-l', '--load', default=None, help='Optional, npz file for loading data ' 'before scanning waveform files') parser.add_argument('--no-x', action='store_true', help='Optional, Do not plot crosses.') parser.add_argument('--no-gaps', action='store_true', help='Optional, Do not plot gaps.') parser.add_argument('-o', '--output', default=None, help='Save plot to image file (e.g. out.pdf, ' + 'out.png) instead of opening a window.') parser.add_argument('--print-gaps', action='store_true', help='Optional, prints a list of gaps at the end.') parser.add_argument('paths', nargs='*', help='Files or directories to scan.') args = parser.parse_args(argv) if args.output is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) # Print help and exit if no arguments are given if len(args.paths) == 0 and args.load is None: parser.error('No paths specified.') # Use recursively parsing function? if args.recursive: parse_func = recursive_parse else: parse_func = parse_file_to_dict from matplotlib.dates import date2num, num2date from matplotlib.ticker import FuncFormatter from matplotlib.patches import Rectangle from matplotlib.collections import PatchCollection import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) # Plot vertical lines if option 'event_time' was specified if args.event_time: times = [date2num(t.datetime) for t in args.event_time] for time in times: ax.axvline(time, color='k') if args.start_time: args.start_time = date2num(args.start_time.datetime) if args.end_time: args.end_time = date2num(args.end_time.datetime) # Generate dictionary containing nested lists of start and end times per # station data = {} samp_int = {} counter = 1 if args.load: load_npz(args.load, data, samp_int) for path in args.paths: counter = parse_func(data, samp_int, path, counter, args.format, verbose=args.verbose, quiet=args.quiet, ignore_links=args.ignore_links) if not data: if args.verbose or not args.quiet: print("No waveform data found.") return if args.write: write_npz(args.write, data, samp_int) # either use ids specified by user or use ids based on what data we have # parsed ids = args.id or list(data.keys()) ids = sorted(ids)[::-1] labels = [""] * len(ids) if args.verbose or not args.quiet: print('\n') for _i, _id in enumerate(ids): labels[_i] = ids[_i] # sort data list and sampling rate list if _id in data: startend = np.array(data[_id]) _samp_int = np.array(samp_int[_id]) indices = np.lexsort((startend[:, 1], startend[:, 0])) startend = startend[indices] _samp_int = _samp_int[indices] else: startend = np.array([]) _samp_int = np.array([]) if len(startend) == 0: if not (args.start_time and args.end_time): continue if not args.no_gaps: rects = [ Rectangle((args.start_time, _i - 0.4), args.end_time - args.start_time, 0.8) ] ax.add_collection(PatchCollection(rects, color="r")) if args.print_gaps and (args.verbose or not args.quiet): print("%s %s %s %.3f" % (_id, args.start_time, args.end_time, args.end_time - args.start_time)) continue # restrict plotting of results to given start/end time if args.start_time: indices = startend[:, 1] > args.start_time startend = startend[indices] _samp_int = _samp_int[indices] if len(startend) == 0: continue if args.end_time: indices = startend[:, 0] < args.end_time startend = startend[indices] _samp_int = _samp_int[indices] if len(startend) == 0: continue data_start = startend[:, 0].min() data_end = startend[:, 1].max() timerange_start = args.start_time or data_start timerange_end = args.end_time or data_end timerange = timerange_end - timerange_start if timerange == 0.0: warnings.warn('Zero sample long data for _id=%s, skipping' % _id) continue startend_compressed = compress_start_end(startend, 1000) offset = np.ones(len(startend)) * _i # generate list of y values if not args.no_x: ax.plot(startend[:, 0], offset, 'x', linewidth=2) ax.hlines(offset[:len(startend_compressed)], startend_compressed[:, 0], startend_compressed[:, 1], 'b', linewidth=2, zorder=3) # find the gaps diffs = startend[1:, 0] - startend[:-1, 1] # currend.start - last.end gapsum = diffs[diffs > 0].sum() # if start- and/or endtime is specified, add missing data at start/end # to gap sum has_gap = False gap_at_start = (args.start_time and data_start > args.start_time and data_start - args.start_time) gap_at_end = (args.end_time and args.end_time > data_end and args.end_time - data_end) if args.start_time and gap_at_start: gapsum += gap_at_start has_gap = True if args.end_time and gap_at_end: gapsum += gap_at_end has_gap = True perc = (timerange - gapsum) / timerange labels[_i] = labels[_i] + "\n%.1f%%" % (perc * 100) gap_indices = diffs > 1.8 * _samp_int[:-1] gap_indices = np.append(gap_indices, False) has_gap |= any(gap_indices) if has_gap: # don't handle last end time as start of gap gaps_start = startend[gap_indices, 1] gaps_end = startend[np.roll(gap_indices, 1), 0] if args.start_time and gap_at_start: gaps_start = np.append(gaps_start, args.start_time) gaps_end = np.append(gaps_end, data_start) if args.end_time and gap_at_end: gaps_start = np.append(gaps_start, data_end) gaps_end = np.append(gaps_end, args.end_time) if not args.no_gaps: rects = [ Rectangle((start_, offset[0] - 0.4), end_ - start_, 0.8) for start_, end_ in zip(gaps_start, gaps_end) ] ax.add_collection(PatchCollection(rects, color="r")) if args.print_gaps: for start_, end_ in zip(gaps_start, gaps_end): start_, end_ = num2date((start_, end_)) start_ = UTCDateTime(start_.isoformat()) end_ = UTCDateTime(end_.isoformat()) if args.verbose or not args.quiet: print("%s %s %s %.3f" % (_id, start_, end_, end_ - start_)) # Pretty format the plot ax.set_ylim(0 - 0.5, len(ids) - 0.5) ax.set_yticks(np.arange(len(ids))) ax.set_yticklabels(labels, family="monospace", ha="right") fig.autofmt_xdate() # rotate date ax.xaxis_date() # set custom formatters to always show date in first tick formatter = ObsPyAutoDateFormatter(ax.xaxis.get_major_locator()) formatter.scaled[1 / 24.] = \ FuncFormatter(decimal_seconds_format_date_first_tick) formatter.scaled.pop(1 / (24. * 60.)) ax.xaxis.set_major_formatter(formatter) plt.subplots_adjust(left=0.2) # set x-axis limits according to given start/end time if args.start_time and args.end_time: ax.set_xlim(left=args.start_time, right=args.end_time) elif args.start_time: ax.set_xlim(left=args.start_time, auto=None) elif args.end_time: ax.set_xlim(right=args.end_time, auto=None) else: left, right = ax.xaxis.get_data_interval() x_axis_range = right - left ax.set_xlim(left - 0.05 * x_axis_range, right + 0.05 * x_axis_range) if args.output is None: plt.show() else: fig.set_dpi(72) height = len(ids) * 0.5 height = max(4, height) fig.set_figheight(height) plt.tight_layout() if not args.start_time or not args.end_time: days = ax.get_xlim() days = days[1] - days[0] else: days = args.end_time - args.start_time width = max(6, days / 30.) width = min(width, height * 4) fig.set_figwidth(width) plt.subplots_adjust(top=1, bottom=0, left=0, right=1) plt.tight_layout() fig.savefig(args.output) if args.verbose and not args.quiet: sys.stdout.write('\n')
def main(argv=None): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser( prog='obspy-sds-report', description=__doc__, formatter_class=RawDescriptionHelpFormatter) parser.add_argument( '-r', '--sds-root', dest='sds_root', required=True, help='Root folder of SDS archive.') parser.add_argument( '-o', '--output', dest='output', required=True, help='Full path (absolute or relative) of output files, without ' 'suffix (e.g. ``/tmp/sds_report``).') parser.add_argument( '-u', '--update', dest='update', default=False, action="store_true", help='Only update latency information, reuse previously computed list ' 'of streams to check and data percentage and gap count. Many ' 'other options e.g. regarding stream selection (``--id``, ' ' ``--location``, ..) and time span of data quality checks ' '(``--check-quality-days``) will be without effect if this ' 'option is specified. Only updating latency is significantly ' 'faster than a full analysis run, a normal use case is to do a ' 'full run once or twice per day and update latency every 5 or ' 'ten minutes. An exception is raised if an update is specified ' 'but the necessary file is not yet present.') parser.add_argument( '-l', '--location', dest='locations', action="append", help='Location codes to look for (e.g. ``""`` for empty location code ' 'or ``"00"``). This option can be provided multiple times and ' 'must be specified at least once for a full run (i.e. without ' '``--update`` option). While network/station combinations are ' 'automatically discovered, only streams whose location codes are ' 'provided here will be discovered and taken into account and ' 'ultimately displayed.') parser.add_argument( '-c', '--channel', dest='channels', action="append", help='Channel codes to look for (e.g. specified three times with ' '``HHZ``, ``EHZ`` and ``ELZ`` to cover all stations that serve ' 'a broad-band, short-period or low gain vertical channel). ' 'This option can be provided multiple times and must be ' 'specified at least once for a full run (i.e. without ' '``--update`` option). Only one stream per ' 'network/station/location combination will be displayed, ' 'selected by the lowest latency.') parser.add_argument( '-i', '--id', dest='ids', action="append", default=[], help='SEED IDs of streams that should be included in addition to the ' 'autodiscovery of streams controlled by ``--location`` and ' '``--channel`` options (e.g. ``IU.ANMO..LHZ``). ' 'This option can be provided multiple times.') parser.add_argument( '--skip', dest='skip', action="append", default=[], help='Networks or stations that should be skipped (e.g. ``IU`` or ' '``IU.ANMO``). This option can be provided multiple times.') parser.add_argument( '-f', '--format', default="MSEED", choices=ENTRY_POINTS['waveform'], help='Waveform format of SDS archive. Should be "MSEED" in most ' 'cases. Use ``None`` or empty string for format autodection ' '(slower and should not be necessary in most all cases). ' 'Warning: formats that do not support ``headonly`` ' 'option in ``read()`` operation will be significantly slower).') parser.add_argument( '--check-backwards-days', dest='check_back_days', default=30, type=int, help='Check for latency backwards for this many days.') parser.add_argument( '--check-quality-days', dest='check_quality_days', default=7, type=int, help='Calculate and plot data availability and number of ' 'gaps for a period of this many days.') parser.add_argument( '--latency-warn', dest='latency_warn', default=3600, type=float, help='Latency warning threshold in seconds.') parser.add_argument( '--latency-warn-color', dest='latency_warn_color', default="#FFFF33", help='Latency warning threshold color (valid HTML color string).') parser.add_argument( '--latency-error', dest='latency_error', default=24*3600, type=float, help='Latency error threshold in seconds.') parser.add_argument( '--latency-error-color', dest='latency_error_color', default="#E41A1C", help='Latency error threshold color (valid HTML color string).') parser.add_argument( '--percentage-warn', dest='percentage_warn', default=99.5, type=float, help='Data availability percentage warning threshold (``0`` to ' '``100``).') parser.add_argument( '--gaps-warn', dest='gaps_warn', default=20, type=int, help='Gap/overlap number warning threshold.') parser.add_argument( '--data-quality-warn-color', dest='data_quality_warn_color', default="#377EB8", help='Data quality (percentage/gap count) warning color ' '(valid HTML color string).') parser.add_argument( '--outdated-color', dest='outdated_color', default="#808080", help='Color for streams that have no data in check range ' '(valid HTML color string).') parser.add_argument( '--ok-color', dest='ok_color', default="#4DAF4A", help='Color for streams that pass all checks (valid HTML color ' 'string).') parser.add_argument( '--background-color', dest='background_color', default="#999999", help='Color for background of page (valid HTML color string).') parser.add_argument( '-V', '--version', action='version', version='%(prog)s ' + __version__) args = parser.parse_args(argv) now = UTCDateTime() stop_time = now - args.check_back_days * 24 * 3600 client = Client(args.sds_root) dtype_streamfile = np.dtype("U10, U30, U10, U10, f8, f8, i8") availability_check_endtime = now - 3600 availability_check_starttime = ( availability_check_endtime - (args.check_quality_days * 24 * 3600)) streams_file = args.output + ".txt" html_file = args.output + ".html" scan_file = args.output + ".png" if args.format.upper() == "NONE" or args.format == "": args.format = None # check whether to set up list of streams to check or use existing list # update list of streams once per day at nighttime if args.update: if not os.path.isfile(streams_file): msg = ("Update flag specified, but no output of previous full run " "was present in the expected location (as determined by " "``--output`` flag: {})").format(streams_file) raise IOError(msg) # use existing list of streams and availability information, just # update latency nslc = np.loadtxt(streams_file, delimiter=",", dtype=dtype_streamfile) else: if not args.locations or not args.channels: msg = ("At least one location code ``--location`` and at least " "one channel code ``--channel`` must be specified.") raise ObsPyException(msg) nsl = set() # get all network/station combinations in SDS archive for net, sta in client.get_all_stations(): if net in args.skip or ".".join((net, sta)) in args.skip: continue # for all combinations of user specified location and channel codes # check if data is in SDS archive for loc in args.locations: for cha in args.channels: if client.has_data(net, sta, loc, cha): # for now omit channel information, we only include the # channel with lowest latency later on nsl.add((net, sta, loc)) break nsl = sorted(nsl) nslc = [] # determine which channel to check for each network/station/location # combination for net, sta, loc in nsl: latency = [] # check latency of all channels that should be checked for cha in args.channels: latency_ = client.get_latency(net, sta, loc, cha, stop_time=stop_time) latency.append(latency_ or np.inf) # only include the channel with lowest latency in our stream list cha = args.channels[np.argmin(latency)] latency = np.min(latency) nslc.append((net, sta, loc, cha, latency)) for id in args.ids: net, sta, loc, cha = id.split(".") latency = client.get_latency(net, sta, loc, cha, stop_time=stop_time) latency = latency or np.inf nslc.append((net, sta, loc, cha, latency)) nslc_ = [] # request and assemble availability information. # this takes pretty long (on network/slow file systems), # so we only do it during a full run here, not during update for net, sta, loc, cha, latency in nslc: percentage, gap_count = client.get_availability_percentage( net, sta, loc, cha, availability_check_starttime, availability_check_endtime) nslc_.append((net, sta, loc, cha, latency, percentage, gap_count)) nslc = nslc_ # write stream list and availability information to file nslc = np.array(sorted(nslc), dtype=dtype_streamfile) np.savetxt(streams_file, nslc, delimiter=",", fmt=["%s", "%s", "%s", "%s", "%f", "%f", "%d"]) # generate obspy-scan image files = [] seed_ids = set() for nslc_ in nslc: net, sta, loc, cha, latency, _, _ = nslc_ if np.isinf(latency) or latency > args.check_back_days * 24 * 3600: continue seed_ids.add(".".join((net, sta, loc, cha))) files += client._get_filenames( net, sta, loc, cha, availability_check_starttime, availability_check_endtime) scan(files, format=args.format, starttime=availability_check_starttime, endtime=availability_check_endtime, plot=scan_file, verbose=False, recursive=True, ignore_links=False, seed_ids=seed_ids, print_gaps=False) # request and assemble current latency information data = [] for net, sta, loc, cha, latency, percentage, gap_count in nslc: if args.update: latency = client.get_latency(net, sta, loc, cha, stop_time=stop_time) latency = latency or np.inf data.append((net, sta, loc, cha, latency, percentage, gap_count)) # separate out the long dead streams data_normal = [] data_outdated = [] for data_ in data: latency = data_[4] if np.isinf(latency) or latency > args.check_back_days * 24 * 3600: data_outdated.append(data_) else: data_normal.append(data_) # write html output to file html = _format_html(args, data_normal, data_outdated) with open(html_file, "wt") as fh: fh.write(html)
def main(argv=None): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser( prog='obspy-sds-report', description=__doc__, formatter_class=RawDescriptionHelpFormatter) parser.add_argument( '-r', '--sds-root', dest='sds_root', required=True, help='Root folder of SDS archive.') parser.add_argument( '-o', '--output', dest='output', required=True, help='Full path (absolute or relative) of output files, without ' 'suffix (e.g. ``/tmp/sds_report``).') parser.add_argument( '-u', '--update', dest='update', default=False, action="store_true", help='Only update latency information, reuse previously computed list ' 'of streams to check and data percentage and gap count. Many ' 'other options e.g. regarding stream selection (``--id``, ' ' ``--location``, ..) and time span of data quality checks ' '(``--check-quality-days``) will be without effect if this ' 'option is specified. Only updating latency is significantly ' 'faster than a full analysis run, a normal use case is to do a ' 'full run once or twice per day and update latency every 5 or ' 'ten minutes. An exception is raised if an update is specified ' 'but the necessary file is not yet present.') parser.add_argument( '-l', '--location', dest='locations', action="append", help='Location codes to look for (e.g. ``""`` for empty location code ' 'or ``"00"``). This option can be provided multiple times and ' 'must be specified at least once for a full run (i.e. without ' '``--update`` option). While network/station combinations are ' 'automatically discovered, only streams whose location codes are ' 'provided here will be discovered and taken into account and ' 'ultimately displayed.') parser.add_argument( '-c', '--channel', dest='channels', action="append", help='Channel codes to look for (e.g. specified three times with ' '``HHZ``, ``EHZ`` and ``ELZ`` to cover all stations that serve ' 'a broad-band, short-period or low gain vertical channel). ' 'This option can be provided multiple times and must be ' 'specified at least once for a full run (i.e. without ' '``--update`` option). Only one stream per ' 'network/station/location combination will be displayed, ' 'selected by the lowest latency.') parser.add_argument( '-i', '--id', dest='ids', action="append", default=[], help='SEED IDs of streams that should be included in addition to the ' 'autodiscovery of streams controlled by ``--location`` and ' '``--channel`` options (e.g. ``IU.ANMO..LHZ``). ' 'This option can be provided multiple times.') parser.add_argument( '--skip', dest='skip', action="append", default=[], help='Networks or stations that should be skipped (e.g. ``IU`` or ' '``IU.ANMO``). This option can be provided multiple times.') parser.add_argument( '-f', '--format', default="MSEED", choices=ENTRY_POINTS['waveform'], help='Waveform format of SDS archive. Should be "MSEED" in most ' 'cases. Use ``None`` or empty string for format autodection ' '(slower and should not be necessary in most all cases). ' 'Warning: formats that do not support ``headonly`` ' 'option in ``read()`` operation will be significantly slower).') parser.add_argument( '--check-backwards-days', dest='check_back_days', default=30, type=int, help='Check for latency backwards for this many days.') parser.add_argument( '--check-quality-days', dest='check_quality_days', default=7, type=int, help='Calculate and plot data availability and number of ' 'gaps for a period of this many days.') parser.add_argument( '--latency-warn', dest='latency_warn', default=3600, type=float, help='Latency warning threshold in seconds.') parser.add_argument( '--latency-warn-color', dest='latency_warn_color', default="#FFFF33", help='Latency warning threshold color (valid HTML color string).') parser.add_argument( '--latency-error', dest='latency_error', default=24 * 3600, type=float, help='Latency error threshold in seconds.') parser.add_argument( '--latency-error-color', dest='latency_error_color', default="#E41A1C", help='Latency error threshold color (valid HTML color string).') parser.add_argument( '--percentage-warn', dest='percentage_warn', default=99.5, type=float, help='Data availability percentage warning threshold (``0`` to ' '``100``).') parser.add_argument( '--gaps-warn', dest='gaps_warn', default=20, type=int, help='Gap/overlap number warning threshold.') parser.add_argument( '--data-quality-warn-color', dest='data_quality_warn_color', default="#377EB8", help='Data quality (percentage/gap count) warning color ' '(valid HTML color string).') parser.add_argument( '--outdated-color', dest='outdated_color', default="#808080", help='Color for streams that have no data in check range ' '(valid HTML color string).') parser.add_argument( '--ok-color', dest='ok_color', default="#4DAF4A", help='Color for streams that pass all checks (valid HTML color ' 'string).') parser.add_argument( '--background-color', dest='background_color', default="#999999", help='Color for background of page (valid HTML color string).') parser.add_argument( '-V', '--version', action='version', version='%(prog)s ' + __version__) args = parser.parse_args(argv) now = UTCDateTime() stop_time = now - args.check_back_days * 24 * 3600 client = Client(args.sds_root) dtype_streamfile = np.dtype("U10, U30, U10, U10, f8, f8, i8") availability_check_endtime = now - 3600 availability_check_starttime = ( availability_check_endtime - (args.check_quality_days * 24 * 3600)) streams_file = args.output + ".txt" html_file = args.output + ".html" scan_file = args.output + ".png" if args.format.upper() == "NONE" or args.format == "": args.format = None # check whether to set up list of streams to check or use existing list # update list of streams once per day at nighttime if args.update: if not Path(streams_file).is_file(): msg = ("Update flag specified, but no output of previous full run " "was present in the expected location (as determined by " "``--output`` flag: {})").format(streams_file) raise IOError(msg) # use existing list of streams and availability information, just # update latency nslc = np.loadtxt(streams_file, delimiter=",", dtype=dtype_streamfile) else: if not args.locations or not args.channels: msg = ("At least one location code ``--location`` and at least " "one channel code ``--channel`` must be specified.") raise ObsPyException(msg) nsl = set() # get all network/station combinations in SDS archive for net, sta in client.get_all_stations(): if net in args.skip or ".".join((net, sta)) in args.skip: continue # for all combinations of user specified location and channel codes # check if data is in SDS archive for loc in args.locations: for cha in args.channels: if client.has_data(net, sta, loc, cha): # for now omit channel information, we only include the # channel with lowest latency later on nsl.add((net, sta, loc)) break nsl = sorted(nsl) nslc = [] # determine which channel to check for each network/station/location # combination for net, sta, loc in nsl: latency = [] # check latency of all channels that should be checked for cha in args.channels: latency_ = client.get_latency( net, sta, loc, cha, stop_time=stop_time, check_has_no_data=False) latency.append(latency_ or np.inf) # only include the channel with lowest latency in our stream list cha = args.channels[np.argmin(latency)] latency = np.min(latency) nslc.append((net, sta, loc, cha, latency)) for id in args.ids: net, sta, loc, cha = id.split(".") latency = client.get_latency( net, sta, loc, cha, stop_time=stop_time, check_has_no_data=False) latency = latency or np.inf nslc.append((net, sta, loc, cha, latency)) nslc_ = [] # request and assemble availability information. # this takes pretty long (on network/slow file systems), # so we only do it during a full run here, not during update for net, sta, loc, cha, latency in nslc: percentage, gap_count = client.get_availability_percentage( net, sta, loc, cha, availability_check_starttime, availability_check_endtime) nslc_.append((net, sta, loc, cha, latency, percentage, gap_count)) nslc = nslc_ # write stream list and availability information to file nslc = np.array(sorted(nslc), dtype=dtype_streamfile) np.savetxt(streams_file, nslc, delimiter=",", fmt=["%s", "%s", "%s", "%s", "%f", "%f", "%d"]) # generate obspy-scan image files = [] seed_ids = set() for nslc_ in nslc: net, sta, loc, cha, latency, _, _ = nslc_ if np.isinf(latency) or latency > args.check_back_days * 24 * 3600: continue seed_ids.add(".".join((net, sta, loc, cha))) files += client._get_filenames( net, sta, loc, cha, availability_check_starttime, availability_check_endtime) scan(files, format=args.format, starttime=availability_check_starttime, endtime=availability_check_endtime, plot=scan_file, verbose=False, recursive=True, ignore_links=False, seed_ids=seed_ids, print_gaps=False) # request and assemble current latency information data = [] for net, sta, loc, cha, latency, percentage, gap_count in nslc: if args.update: latency = client.get_latency( net, sta, loc, cha, stop_time=stop_time, check_has_no_data=False) latency = latency or np.inf data.append((net, sta, loc, cha, latency, percentage, gap_count)) # separate out the long dead streams data_normal = [] data_outdated = [] for data_ in data: latency = data_[4] if np.isinf(latency) or latency > args.check_back_days * 24 * 3600: data_outdated.append(data_) else: data_normal.append(data_) # write html output to file html = _format_html(args, data_normal, data_outdated) with open(html_file, "wt") as fh: fh.write(html)
def main(argv=None): parser = ArgumentParser(prog='obspy-scan', description=__doc__.strip(), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'], help='Optional, the file format.\n' + ' '.join(__doc__.split('\n')[-4:])) parser.add_argument('-v', '--verbose', action='store_true', help='Optional. Verbose output.') parser.add_argument('-q', '--quiet', action='store_true', help='Optional. Be quiet. Overwritten by --verbose ' 'flag.') parser.add_argument('-n', '--non-recursive', action='store_false', dest='recursive', help='Optional. Do not descend into directories.') parser.add_argument('-i', '--ignore-links', action='store_true', help='Optional. Do not follow symbolic links.') parser.add_argument('--start-time', default=None, type=UTCDateTime, help='Optional, a UTCDateTime compatible string. ' + 'Only visualize data after this time and set ' + 'time-axis axis accordingly.') parser.add_argument('--end-time', default=None, type=UTCDateTime, help='Optional, a UTCDateTime compatible string. ' + 'Only visualize data before this time and set ' + 'time-axis axis accordingly.') parser.add_argument('--id', action='append', help='Optional, a SEED channel identifier ' "(e.g. 'GR.FUR..HHZ'). You may provide this " + 'option multiple times. Only these ' + 'channels will be plotted.') parser.add_argument('-t', '--event-time', default=None, type=UTCDateTime, action='append', help='Optional, a UTCDateTime compatible string ' + "(e.g. '2010-01-01T12:00:00'). You may provide " + 'this option multiple times. These times get ' + 'marked by vertical lines in the plot. ' + 'Useful e.g. to mark event origin times.') parser.add_argument('-w', '--write', default=None, help='Optional, npz file for writing data ' 'after scanning waveform files') parser.add_argument('-l', '--load', default=None, help='Optional, npz file for loading data ' 'before scanning waveform files') parser.add_argument('--no-x', action='store_true', help='Optional, Do not plot crosses.') parser.add_argument('--no-gaps', action='store_true', help='Optional, Do not plot gaps.') parser.add_argument('-o', '--output', default=None, help='Save plot to image file (e.g. out.pdf, ' + 'out.png) instead of opening a window.') parser.add_argument('--print-gaps', action='store_true', help='Optional, prints a list of gaps at the end.') parser.add_argument('paths', nargs='*', help='Files or directories to scan.') args = parser.parse_args(argv) if args.output is not None: MatplotlibBackend.switch_backend("AGG", sloppy=False) # Print help and exit if no arguments are given if len(args.paths) == 0 and args.load is None: parser.error('No paths specified.') # Use recursively parsing function? if args.recursive: parse_func = recursive_parse else: parse_func = parse_file_to_dict from matplotlib.dates import date2num, num2date from matplotlib.ticker import FuncFormatter from matplotlib.patches import Rectangle from matplotlib.collections import PatchCollection import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) # Plot vertical lines if option 'event_time' was specified if args.event_time: times = [date2num(t.datetime) for t in args.event_time] for time in times: ax.axvline(time, color='k') if args.start_time: args.start_time = date2num(args.start_time.datetime) if args.end_time: args.end_time = date2num(args.end_time.datetime) # Generate dictionary containing nested lists of start and end times per # station data = {} samp_int = {} counter = 1 if args.load: load_npz(args.load, data, samp_int) for path in args.paths: counter = parse_func(data, samp_int, path, counter, args.format, verbose=args.verbose, quiet=args.quiet, ignore_links=args.ignore_links) if not data: if args.verbose or not args.quiet: print("No waveform data found.") return if args.write: write_npz(args.write, data, samp_int) # either use ids specified by user or use ids based on what data we have # parsed ids = args.id or list(data.keys()) ids = sorted(ids)[::-1] labels = [""] * len(ids) if args.verbose or not args.quiet: print('\n') for _i, _id in enumerate(ids): labels[_i] = ids[_i] # sort data list and sampling rate list if _id in data: startend = np.array(data[_id]) _samp_int = np.array(samp_int[_id]) indices = np.lexsort((startend[:, 1], startend[:, 0])) startend = startend[indices] _samp_int = _samp_int[indices] else: startend = np.array([]) _samp_int = np.array([]) if len(startend) == 0: if not (args.start_time and args.end_time): continue if not args.no_gaps: rects = [Rectangle((args.start_time, _i - 0.4), args.end_time - args.start_time, 0.8)] ax.add_collection(PatchCollection(rects, color="r")) if args.print_gaps and (args.verbose or not args.quiet): print("%s %s %s %.3f" % ( _id, args.start_time, args.end_time, args.end_time - args.start_time)) continue # restrict plotting of results to given start/end time if args.start_time: indices = startend[:, 1] > args.start_time startend = startend[indices] _samp_int = _samp_int[indices] if len(startend) == 0: continue if args.end_time: indices = startend[:, 0] < args.end_time startend = startend[indices] _samp_int = _samp_int[indices] if len(startend) == 0: continue data_start = startend[:, 0].min() data_end = startend[:, 1].max() timerange_start = args.start_time or data_start timerange_end = args.end_time or data_end timerange = timerange_end - timerange_start if timerange == 0.0: warnings.warn('Zero sample long data for _id=%s, skipping' % _id) continue startend_compressed = compress_start_end(startend, 1000) offset = np.ones(len(startend)) * _i # generate list of y values if not args.no_x: ax.plot(startend[:, 0], offset, 'x', linewidth=2) ax.hlines(offset[:len(startend_compressed)], startend_compressed[:, 0], startend_compressed[:, 1], 'b', linewidth=2, zorder=3) # find the gaps diffs = startend[1:, 0] - startend[:-1, 1] # currend.start - last.end gapsum = diffs[diffs > 0].sum() # if start- and/or endtime is specified, add missing data at start/end # to gap sum has_gap = False gap_at_start = ( args.start_time and data_start > args.start_time and data_start - args.start_time) gap_at_end = ( args.end_time and args.end_time > data_end and args.end_time - data_end) if args.start_time and gap_at_start: gapsum += gap_at_start has_gap = True if args.end_time and gap_at_end: gapsum += gap_at_end has_gap = True perc = (timerange - gapsum) / timerange labels[_i] = labels[_i] + "\n%.1f%%" % (perc * 100) gap_indices = diffs > 1.8 * _samp_int[:-1] gap_indices = np.append(gap_indices, False) has_gap |= any(gap_indices) if has_gap: # don't handle last end time as start of gap gaps_start = startend[gap_indices, 1] gaps_end = startend[np.roll(gap_indices, 1), 0] if args.start_time and gap_at_start: gaps_start = np.append(gaps_start, args.start_time) gaps_end = np.append(gaps_end, data_start) if args.end_time and gap_at_end: gaps_start = np.append(gaps_start, data_end) gaps_end = np.append(gaps_end, args.end_time) if not args.no_gaps: rects = [Rectangle((start_, offset[0] - 0.4), end_ - start_, 0.8) for start_, end_ in zip(gaps_start, gaps_end)] ax.add_collection(PatchCollection(rects, color="r")) if args.print_gaps: for start_, end_ in zip(gaps_start, gaps_end): start_, end_ = num2date((start_, end_)) start_ = UTCDateTime(start_.isoformat()) end_ = UTCDateTime(end_.isoformat()) if args.verbose or not args.quiet: print("%s %s %s %.3f" % (_id, start_, end_, end_ - start_)) # Pretty format the plot ax.set_ylim(0 - 0.5, len(ids) - 0.5) ax.set_yticks(np.arange(len(ids))) ax.set_yticklabels(labels, family="monospace", ha="right") fig.autofmt_xdate() # rotate date ax.xaxis_date() # set custom formatters to always show date in first tick formatter = ObsPyAutoDateFormatter(ax.xaxis.get_major_locator()) formatter.scaled[1 / 24.] = \ FuncFormatter(decimal_seconds_format_date_first_tick) formatter.scaled.pop(1/(24.*60.)) ax.xaxis.set_major_formatter(formatter) plt.subplots_adjust(left=0.2) # set x-axis limits according to given start/end time if args.start_time and args.end_time: ax.set_xlim(left=args.start_time, right=args.end_time) elif args.start_time: ax.set_xlim(left=args.start_time, auto=None) elif args.end_time: ax.set_xlim(right=args.end_time, auto=None) else: left, right = ax.xaxis.get_data_interval() x_axis_range = right - left ax.set_xlim(left - 0.05 * x_axis_range, right + 0.05 * x_axis_range) if args.output is None: plt.show() else: fig.set_dpi(72) height = len(ids) * 0.5 height = max(4, height) fig.set_figheight(height) plt.tight_layout() if not args.start_time or not args.end_time: days = ax.get_xlim() days = days[1] - days[0] else: days = args.end_time - args.start_time width = max(6, days / 30.) width = min(width, height * 4) fig.set_figwidth(width) plt.subplots_adjust(top=1, bottom=0, left=0, right=1) plt.tight_layout() fig.savefig(args.output) if args.verbose and not args.quiet: sys.stdout.write('\n')
def run(argv=None, interactive=True): MatplotlibBackend.switch_backend("AGG", sloppy=False) parser = ArgumentParser(prog="obspy-runtests", description="A command-line program that runs all " "ObsPy tests.") parser.add_argument("-V", "--version", action="version", version="%(prog)s " + get_git_version()) parser.add_argument("-v", "--verbose", action="store_true", help="verbose mode") parser.add_argument("-q", "--quiet", action="store_true", help="quiet mode") # filter options filter = parser.add_argument_group( "Module Filter", "Providing no modules will test all " "ObsPy modules which do not require an " "active network connection.", ) filter.add_argument("-a", "--all", action="store_true", help="test all modules (including network modules)") filter.add_argument("-x", "--exclude", action="append", help="exclude given module from test") filter.add_argument("tests", nargs="*", help="test modules to run") # timing / profile options timing = parser.add_argument_group("Timing/Profile Options") timing.add_argument("-t", "--timeit", action="store_true", help="shows accumulated run times of each module") timing.add_argument("-s", "--slowest", default=0, type=int, dest="n", help="lists n slowest test cases") timing.add_argument( "-p", "--profile", action="store_true", help="uses cProfile, saves the results to file " + "obspy.pstats and prints some profiling numbers", ) # reporting options report = parser.add_argument_group("Reporting Options") report.add_argument("-r", "--report", action="store_true", help="automatically submit a test report") report.add_argument( "-d", "--dontask", action="store_true", help="don't explicitly ask for submitting a test " "report" ) report.add_argument("-u", "--server", default="tests.obspy.org", help="report server (default is tests.obspy.org)") report.add_argument("-n", "--node", dest="hostname", default=HOSTNAME, help="nodename visible at the report server") report.add_argument("-l", "--log", default=None, help="append log file to test report") report.add_argument("--ci-url", default=None, dest="ci_url", help="URL to Continuous Integration job page.") report.add_argument("--pr-url", default=None, dest="pr_url", help="Github (Pull Request) URL.") # other options others = parser.add_argument_group("Additional Options") others.add_argument("--tutorial", action="store_true", help="add doctests in tutorial") others.add_argument("--no-flake8", action="store_true", help="skip code formatting test") others.add_argument( "--keep-images", action="store_true", help="store images created during image comparison " "tests in subfolders of baseline images", ) others.add_argument( "--keep-only-failed-images", action="store_true", help="when storing images created during testing, " "only store failed images and the corresponding " "diff images (but not images that passed the " "corresponding test).", ) args = parser.parse_args(argv) # set correct verbosity level if args.verbose: verbosity = 2 # raise all NumPy warnings np.seterr(all="raise") # raise user and deprecation warnings warnings.simplefilter("error", UserWarning) elif args.quiet: verbosity = 0 # ignore user and deprecation warnings warnings.simplefilter("ignore", DeprecationWarning) warnings.simplefilter("ignore", UserWarning) # don't ask to send a report args.dontask = True else: verbosity = 1 # show all NumPy warnings np.seterr(all="print") # ignore user warnings warnings.simplefilter("ignore", UserWarning) # check for send report option or environmental settings if args.report or "OBSPY_REPORT" in os.environ.keys(): report = True else: report = False if "OBSPY_REPORT_SERVER" in os.environ.keys(): args.server = os.environ["OBSPY_REPORT_SERVER"] # check interactivity settings if interactive and args.dontask: interactive = False if args.keep_images: os.environ["OBSPY_KEEP_IMAGES"] = "" if args.keep_only_failed_images: os.environ["OBSPY_KEEP_ONLY_FAILED_IMAGES"] = "" if args.no_flake8: os.environ["OBSPY_NO_FLAKE8"] = "" return run_tests( verbosity, args.tests, report, args.log, args.server, args.all, args.timeit, interactive, args.n, exclude=args.exclude, tutorial=args.tutorial, hostname=args.hostname, ci_url=args.ci_url, pr_url=args.pr_url, )