def test_corner_freqs():

    event_time = UTCDateTime('2001-02-28T18:54:32')
    ALCT_tr = read(os.path.join(datadir, 'ALCTENE.UW..sac'))[0]
    ALCT_dist = 75.9559

    corners_1 = process.get_corner_frequencies(ALCT_tr, event_time, ALCT_dist,
                                               split_method='velocity')
    # np.testing.assert_allclose(corners_1, [0.036, 50.0], atol=0.001)
    np.testing.assert_allclose(corners_1, [3.281554e-02, 50.0], atol=0.001)

    ALCT_tr.stats.starttime += 300
    corners_2 = process.get_corner_frequencies(ALCT_tr, event_time, ALCT_dist,
                                               split_method='velocity')
    assert corners_2 == [-1, -1]

    event_time = UTCDateTime('2016-10-22T17:17:05')
    ALKI_tr = read(os.path.join(datadir, 'ALKIENE.UW..sac'))[0]
    ALKI_dist = 37.87883
    corners_3 = process.get_corner_frequencies(ALKI_tr, event_time, ALKI_dist,
                                               ratio=100000.0,
                                               split_method='velocity')
    assert corners_3 == [-2, -2]
    corners_4 = process.get_corner_frequencies(ALKI_tr, event_time, ALKI_dist,
                                               split_method='velocity')
    # assert corners_4 == [-3, -3]
    assert corners_4 == [-2, -2]
def _test_horizontal_frequencies():
    config = get_config()
    event_time = UTCDateTime('2001-02-28T18:54:32')
    ALCT_tr1 = read(os.path.join(datadir, 'ALCTENE.UW..sac'))[0]
    ALCT_tr2 = read(os.path.join(datadir, 'ALCTENN.UW..sac'))[0]
    stream = [ALCT_tr1, ALCT_tr2]

    ALCT_dist = 75.9559
    processed = process.process_config(
        stream, config=config,
        event_time=event_time, epi_dist=ALCT_dist)
    for trace in processed:
        corners = trace.stats.processing_parameters.corners
        # assert corners['default_high_frequency'] == 50
        np.testing.assert_allclose([corners['default_high_frequency']], [50.0])
        # assert corners['default_low_frequency'] == 0.018310546875
        assert corners['default_low_frequency'] == 0.01595909725588508

    stream[0].stats.channel = 'Z'
    processed = process.process_config(
        stream, config=config,
        event_time=event_time, epi_dist=ALCT_dist)
    corners1 = processed[0].stats.processing_parameters.corners
    high1 = corners1['default_high_frequency']
    low1 = corners1['default_low_frequency']
    assert np.allclose([high1], [50.0])
    # assert low1 == 0.0244140625
    assert low1 == 0.02155036612037732
    corners2 = processed[1].stats.processing_parameters.corners
    high2 = corners2['default_high_frequency']
    low2 = corners2['default_low_frequency']
    # assert high2 == 48.4619140625
    assert high2 == 48.52051157467704
    # assert low2 == 0.018310546875
    assert low2 == 0.01595909725588508
Пример #3
0
def test_get_stream(mock_ntf, data):
    mseeddata = data.read('trace_GE.APE.mseed')

    segment = MockSegment(mseeddata)
    tobspy = time.time()
    stream_obspy = read(BytesIO(mseeddata))
    tobspy = time.time() - tobspy
    tme = time.time()
    stream_me = get_stream(segment)
    tme = time.time() - tme
    # assert we are faster (actually that calling read with format='MSEED' is faster than
    # calling with format=None)
    assert tme < tobspy
    assert (stream_obspy[0].data == stream_me[0].data).all()
    assert not mock_ntf.called

    with pytest.raises(TypeError):
        stream_obspy = read(BytesIO(mseeddata[:5]))
    assert mock_ntf.called

    mock_ntf.reset_mock()
    segment = MockSegment(mseeddata[:5])
    with pytest.raises(ValueError):
        stream_me = get_stream(segment)
    assert not mock_ntf.called
Пример #4
0
 def test_plotMultipleTraces(self):
     """
     Plots multiple traces underneath.
     """
     # 1 trace
     st = read()[1]
     with ImageComparison(self.path, 'waveform_1_trace.png') as ic:
         st.plot(outfile=ic.name, automerge=False)
     # 3 traces
     st = read()
     with ImageComparison(self.path, 'waveform_3_traces.png') as ic:
         st.plot(outfile=ic.name, automerge=False)
     # 5 traces
     st = st[1] * 5
     with ImageComparison(self.path, 'waveform_5_traces.png') as ic:
         st.plot(outfile=ic.name, automerge=False)
     # 10 traces
     st = st[1] * 10
     with ImageComparison(self.path, 'waveform_10_traces.png') as ic:
         st.plot(outfile=ic.name, automerge=False)
     # 10 traces - huge numbers
     st = st[1] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data * 10 ** i
     with ImageComparison(self.path, 'waveform_10_traces_huge.png') as ic:
         st.plot(outfile=ic.name, automerge=False, equal_scale=False)
     # 10 traces - tiny numbers
     st = st[1] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data / (10 ** i)
     with ImageComparison(self.path, 'waveform_10_traces_tiny.png') as ic:
         st.plot(outfile=ic.name, automerge=False, equal_scale=False)
Пример #5
0
 def test_plot_multiple_traces(self):
     """
     Plots multiple traces underneath.
     """
     # 1 trace
     st = read()[1]
     with ImageComparison(self.path, 'waveform_1_trace.png') as ic:
         st.plot(outfile=ic.name, automerge=False)
     # 3 traces
     st = read()
     with ImageComparison(self.path, 'waveform_3_traces.png') as ic:
         st.plot(outfile=ic.name, automerge=False)
     # 5 traces
     st = st[1] * 5
     with ImageComparison(self.path, 'waveform_5_traces.png') as ic:
         st.plot(outfile=ic.name, automerge=False)
     # 10 traces
     st = st[1] * 10
     with ImageComparison(self.path, 'waveform_10_traces.png') as ic:
         st.plot(outfile=ic.name, automerge=False)
     # 10 traces - huge numbers
     st = st[1] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data * 10**i
     with ImageComparison(self.path, 'waveform_10_traces_huge.png') as ic:
         st.plot(outfile=ic.name, automerge=False, equal_scale=False)
     # 10 traces - tiny numbers
     st = st[1] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data / (10**i)
     with ImageComparison(self.path, 'waveform_10_traces_tiny.png') as ic:
         st.plot(outfile=ic.name, automerge=False, equal_scale=False)
Пример #6
0
 def test_plotMultipleTraces(self):
     """
     Plots multiple traces underneath.
     """
     # 1 trace
     st = read()[0]
     with NamedTemporaryFile(suffix='.png') as tf:
         st.plot(outfile=tf.name, automerge=False)
         expected_image = os.path.join(self.path, 'waveform_1_trace.png')
         compare_images(tf.name, expected_image, 0.001)
     # 3 traces
     st = read()
     with NamedTemporaryFile(suffix='.png') as tf:
         st.plot(outfile=tf.name, automerge=False)
         expected_image = os.path.join(self.path, 'waveform_3_traces.png')
         compare_images(tf.name, expected_image, 0.001)
     # 5 traces
     st = st[1] * 5
     with NamedTemporaryFile(suffix='.png') as tf:
         st.plot(outfile=tf.name, automerge=False)
         expected_image = os.path.join(self.path, 'waveform_5_traces.png')
         compare_images(tf.name, expected_image, 0.001)
     # 10 traces
     st = st[1] * 10
     with NamedTemporaryFile(suffix='.png') as tf:
         st.plot(outfile=tf.name, automerge=False)
         expected_image = os.path.join(self.path, 'waveform_10_traces.png')
         compare_images(tf.name, expected_image, 0.001)
     # 10 traces - huge numbers
     st = st[1] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data * 10**i
     with NamedTemporaryFile(suffix='.png') as tf:
         st.plot(outfile=tf.name, automerge=False, equal_scale=False)
         expected_image = os.path.join(self.path,
                                       'waveform_10_traces_huge.png')
         compare_images(tf.name, expected_image, 0.001)
     # 10 traces - tiny numbers
     st = st[1] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data / (10**i)
     with NamedTemporaryFile(suffix='.png') as tf:
         st.plot(outfile=tf.name, automerge=False, equal_scale=False)
         expected_image = os.path.join(self.path,
                                       'waveform_10_traces_tiny.png')
         compare_images(tf.name, expected_image, 0.001)
Пример #7
0
 def test_plot_multiple_traces_1_trace(self, image_path):
     """
     Plots multiple traces underneath.
     """
     # 1 trace
     st = read()[1]
     st.plot(outfile=image_path, automerge=False)
def test_split():
    tr1 = read(os.path.join(datadir, 'CN.BBB..BHE.sac'))[0]
    success = False
    try:
        process.split_signal_and_noise(tr1, split_method='p_arrival')
        success = True
    except ValueError:
        pass
    assert success is False

    success = False
    try:
        process.split_signal_and_noise(tr1, split_method='velocity')
        success = True
    except ValueError:
        pass
    assert success is False

    success = False
    try:
        process.split_signal_and_noise(tr1, split_method='invalid')
        success = True
    except ValueError:
        pass
    assert success is False
Пример #9
0
def is_obspy(filename, config=None):
    """Check to see if file is a format supported by Obspy (not KNET).

    Note: Currently only SAC and Miniseed are supported.

    Args:
        filename (str):
            Path to possible Obspy format.
        config (dict):
            Dictionary containing configuration.

    Returns:
        bool: True if obspy supported, otherwise False.
    """
    logging.debug("Checking if format is supported by obspy.")
    metadir = config["read"]["metadata_directory"]
    if config is None:
        config = get_config()
    if not os.path.isfile(filename):
        return False
    try:
        stream = read(filename)
        if stream[0].stats._format in IGNORE_FORMATS:
            return False
        if stream[0].stats._format in REQUIRES_XML:
            xmlfile = _get_station_file(filename, stream, metadir)
            if not os.path.isfile(xmlfile):
                return False
            return True
        else:
            return True
    except BaseException:
        return False

    return False
Пример #10
0
 def test_plot_multiple_traces_10_traces(self, image_path):
     """
     10 traces
     """
     # 10 traces
     st = read()[1] * 10
     st.plot(outfile=image_path, automerge=False)
Пример #11
0
def is_obspy(filename):
    """Check to see if file is a format supported by Obspy (not KNET).

    Note: Currently only SAC and Miniseed are supported.

    Args:
        filename (str):
            Path to possible Obspy format.
    Returns:
        bool: True if obspy supported, otherwise False.
    """
    logging.debug("Checking if format is supported by obspy.")
    if not os.path.isfile(filename):
        return False
    try:
        stream = read(filename)
        if stream[0].stats._format in IGNORE_FORMATS:
            return False
        if stream[0].stats._format in REQUIRES_XML:
            xmlfile = _get_station_file(filename, stream)
            if not os.path.isfile(xmlfile):
                return False
            return True
        else:
            return True
    except BaseException:
        return False

    return False
Пример #12
0
 def on_data(self, trace):
     global i
     global traces
     global fn
     global day
     if i == 1:
         print('Received traces. Checking for existing data...')
         if (os.path.isfile(fn)):
             print('Found %s, reading...' % fn)
             traces = read(fn)
             print('Done.')
         else:
             print('No data found. Creating new blank trace to write to...')
             traces = Trace()
         traces = trace
         print('Trace %s: %s' % (i, trace))
     else:
         print('Trace %s: %s' % (i, trace))
         traces += trace
         traces.__add__(trace)
         if (float(i) / 10. == int(float(i) / 10.)):
             print('Saving %s traces to %s...' % (i, fn))
             traces.write(fn, format='MSEED')
             print('Done.')
     i += 1
     if (day != UTCDateTime.now().strftime('%Y.%j')):
         day = UTCDateTime.now().strftime('%Y.%j')
         fn = fn = '%s.%s.%s.%s.D.%s' % (net, sta, loc, cha, day)
         i = 1
Пример #13
0
 def test_plot_multiple_traces_10_traces_huge(self, image_path):
     """
     10 traces - huge numbers
     """
     st = read()[1] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data * 10**i
     st.plot(outfile=image_path, automerge=False, equal_scale=False)
Пример #14
0
def readallchannels(dataset, operation='eventdir'):
    """
    wrapps obspy.core.stream.read so various seismic file
    formats can be read in one pass.

    Assumes data files are organized in event directories.
    This can be improved

    :type dataset: list
    :param dataset: files to read.
    :rtype: class obspy.core.stream
    :return: class of data streams.
    """
    # 1) Iterate over full files names
    # 2) Get all available channel of each file
    # 3) Read individually in stream.

    # Reading the waveforms
    m = 0
    eventset = []
    waveformset = Stream()
    for e, eventfile in enumerate(dataset):

        eventfile = eventfile.strip()
        waveformset += read(eventfile)
        eventset.append(e)
        (root, ext) = os.path.splitext(eventfile)
        channelset = glob.glob(
            os.path.dirname(eventfile) + '/*' + waveformset[-1].stats.station +
            '*' + ext)

        for c, channelfile in enumerate(channelset):
            if not eventfile == channelfile:
                eventset.append(e)
                waveformset += read(
                    channelfile
                )  # file with uncorrect metadata may be ignored (ex: YHHN in station metatdata)

                #df = waveformset[-1].stats.sampling_rate
                #cft = classicSTALTA(waveformset[-1].data, int(5 * df), int(10 * df))
                #plotTrigger(waveformset[-1], cft, 1.5, 0.5)

    return waveformset
Пример #15
0
 def test_append_not_float32(self):
     """
     Test for not using float32.
     """
     tr = read()[0]
     tr.data = np.require(tr.data, dtype=native_str('>f4'))
     traces = tr / 3
     rtr = RtTrace()
     for trace in traces:
         rtr.append(trace)
Пример #16
0
 def test_appendNotFloat32(self):
     """
     Test for not using float32.
     """
     tr = read()[0]
     tr.data = np.require(tr.data, dtype=native_str('>f4'))
     traces = tr / 3
     rtr = RtTrace()
     for trace in traces:
         rtr.append(trace)
Пример #17
0
def readallchannels(dataset, operation='eventdir'):
    """
    wrapps obspy.core.stream.read so various seismic file
    formats can be read in one pass.

    Assumes data files are organized in event directories.
    This can be improved

    :type dataset: list
    :param dataset: files to read.
    :rtype: class obspy.core.stream
    :return: class of data streams.
    """
    # 1) Iterate over full files names
    # 2) Get all available channel of each file
    # 3) Read individually in stream.




    # Reading the waveforms
    m=0
    eventset=[]
    waveformset = Stream()
    for e, eventfile in enumerate(dataset):

        eventfile = eventfile.strip()
        waveformset += read(eventfile)
        eventset.append(e)
        (root,ext)=os.path.splitext(eventfile)
        channelset = glob.glob(os.path.dirname(eventfile)+'/*'+waveformset[-1].stats.station+'*'+ext)

        for c, channelfile in enumerate(channelset):
            if not eventfile == channelfile :
                eventset.append(e)
                waveformset += read(channelfile) # file with uncorrect metadata may be ignored (ex: YHHN in station metatdata)

                #df = waveformset[-1].stats.sampling_rate
                #cft = classicSTALTA(waveformset[-1].data, int(5 * df), int(10 * df))
                #plotTrigger(waveformset[-1], cft, 1.5, 0.5)

    return waveformset
Пример #18
0
 def test_plotWithLabels(self):
     """
     Plots with labels.
     """
     st = read()
     st.label = u"Title #1 üöä?"
     st[0].label = 'Hello World!'
     st[1].label = u'Hällö Wörld & Marß'
     st[2].label = '*' * 80
     outfile = os.path.join(self.path, 'waveform_labels.png')
     st.plot(outfile=outfile)
Пример #19
0
 def test_plotWithLabels(self):
     """
     Plots with labels.
     """
     st = read()
     st.label = u"Title #1 üöä?"
     st[0].label = 'Hello World!'
     st[1].label = u'Hällö Wörld & Marß'
     st[2].label = '*' * 80
     outfile = os.path.join(self.path, 'waveform_labels.png')
     st.plot(outfile=outfile)
Пример #20
0
def run(catalog_path, trace_folder, dump_folder):
    catalog = Catalog.from_csv(catalog_path)
    trace_writer = TraceWriter(dump_folder)

    for filename in tqdm.tqdm(os.listdir(trace_folder)):
        file_stream = stream.read(os.path.join(trace_folder, filename))
        for trace in file_stream:
            if get_trace_component(trace) != 'Z':
                continue
            if len(catalog.get_trace_picks(trace)) > 0:
                trace_writer.write(trace)
Пример #21
0
 def test_plot_with_labels(self, image_path):
     """
     Plots with labels.
     """
     st = read()
     st.label = u"Title #1 üöä?"
     st[0].label = 'Hello World!'
     st[1].label = u'Hällö Wörld & Marß'
     st[2].label = '*' * 80
     # create and compare image
     st.plot(outfile=image_path)
Пример #22
0
def GetStream(path):

    st=Stream()
    if os.path.isfile(path) is True:
        f=open(path,'r')
        flst=f.readlines()
        
        for line in flst:
            sacfile=line.strip()
            st += read(sacfile)
    elif os.path.isdir(path) is True:
        flst = os.listdir(path)

        for item in flst:
            sacfile = os.path.join(path,item)
            st += read(sacfile)
    else :
        print("Parameter Error!")
        exit(0)

    return st
Пример #23
0
 def test_plotWithLabels(self):
     """
     Plots with labels.
     """
     st = read()
     st.label = u"Title #1 üöä?"
     st[0].label = 'Hello World!'
     st[1].label = u'Hällö Wörld & Marß'
     st[2].label = '*' * 80
     # create and compare image
     with ImageComparison(self.path, 'waveform_labels.png') as ic:
         st.plot(outfile=ic.name)
Пример #24
0
 def test_plot_with_labels(self):
     """
     Plots with labels.
     """
     st = read()
     st.label = u"Title #1 üöä?"
     st[0].label = 'Hello World!'
     st[1].label = u'Hällö Wörld & Marß'
     st[2].label = '*' * 80
     # create and compare image
     with ImageComparison(self.path, 'waveform_labels.png') as ic:
         st.plot(outfile=ic.name)
Пример #25
0
def read_fdsn(filename):
    """Read Obspy data file (SAC, MiniSEED, etc).

    Args:
        filename (str):
            Path to data file.
        kwargs (ref):
            Other arguments will be ignored.
    Returns:
        Stream: StationStream object.
    """
    logging.debug("Starting read_fdsn.")
    if not is_fdsn(filename):
        raise Exception('%s is not a valid Obspy file format.' % filename)

    streams = []
    tstream = read(filename)
    xmlfile = _get_station_file(filename, tstream)
    inventory = read_inventory(xmlfile)
    traces = []
    for ttrace in tstream:
        trace = StationTrace(data=ttrace.data,
                             header=ttrace.stats,
                             inventory=inventory)
        location = ttrace.stats.location

        trace.stats.channel = get_channel_name(
            trace.stats.sampling_rate, trace.stats.channel[1] == 'N',
            inventory.get_orientation(trace.id)['dip'] in [90, -90]
            or trace.stats.channel[2] == 'Z',
            is_channel_north(inventory.get_orientation(trace.id)['azimuth']))

        if trace.stats.location == '':
            trace.stats.location = '--'

        network = ttrace.stats.network
        if network in LOCATION_CODES:
            codes = LOCATION_CODES[network]
            if location in codes:
                sdict = codes[location]
                if sdict['free_field']:
                    trace.stats.standard.structure_type = 'free_field'
                else:
                    trace.stats.standard.structure_type = sdict['description']
        head, tail = os.path.split(filename)
        trace.stats['standard']['source_file'] = tail or os.path.basename(head)
        traces.append(trace)
    stream = StationStream(traces=traces)
    streams.append(stream)

    return streams
Пример #26
0
 def test_plotWithLabels(self):
     """
     Plots with labels.
     """
     st = read()
     st.label = u"Title #1 üöä?"
     st[0].label = 'Hello World!'
     st[1].label = u'Hällö Wörld & Marß'
     st[2].label = '*' * 80
     # create and compare image
     with NamedTemporaryFile(suffix='.png') as tf:
         st.plot(outfile=tf.name)
         # compare images
         expected_image = os.path.join(self.path, 'waveform_labels.png')
         compare_images(tf.name, expected_image, 0.001)
Пример #27
0
def run(catalog_path, signal_folder, output_folder, window_size, event_start_offset):

	window_writer = WindowWriter(
		folder=output_folder,
		window_size=window_size,
		event_start_offset=event_start_offset)

	catalog = Catalog.from_csv(catalog_path)

	for signal_filename in tqdm.tqdm(os.listdir(signal_folder)):
		# Read stream
		st = stream.read(os.path.join(signal_folder, signal_filename))
		# Streams are expected to only have one trace if they have multiple only the first one is used
		trace = st[0]
		window_writer.write_windows(trace, catalog)
def test_amp_check_trim():

    # read the two sac files for testing
    # one is unedited with a standard maximum amplitude
    # the second has been multiplied so that it fails the amplitude check
    NOWS_tr = read(os.path.join(datadir, 'NOWSENR.sac'))[0]
    NOWS_tr_mul = Trace(data=NOWS_tr.data * 10e9, header=NOWS_tr.stats)

    assert process.check_max_amplitude(NOWS_tr) is True
    assert process.check_max_amplitude(NOWS_tr_mul) is False

    # Check that our trim and window function doesn't affect the ending time
    # of this record
    org_time = UTCDateTime('2001-02-14T22:03:58')
    trim = process.trim_total_window(NOWS_tr, org_time, 32.7195).stats.endtime
    assert NOWS_tr.stats.endtime == trim
    def _process(self, input):
        stations = defaultdict(lambda: [])
        # first group all files by station
        for filename in os.listdir(input):
            m = self.pattern.match(filename)
            if m:
                stations[m.group(1)].append(m.group(2))

        # now read and create an obspy stream from each group
        for s, fs in stations.items():
            streaming = obspy.Stream()
            for f in fs:
                filename = os.path.join(input,
                                        '{}.{}.{}'.format(s, f, self.ext))
                streaming = streaming + read(filename)
            self.write('output', [streaming, self.label])
Пример #30
0
    def requestEventWaveformTraces(self, event):
        """
        Method for requesting event data from waveformLocation. Implement this to fetch event waveform data
        """
        streams = Stream()
        if self.__checkStatus() and event.waveform_h:
            wfdisc_file_start_i = int(
                event.waveform_h[0].waveform_info.lower().find('.wfdisc'))
            wfdisc_file = event.waveform_h[0].waveform_info.lower(
            )[wfdisc_file_start_i - 13:wfdisc_file_start_i + 7]

            year = wfdisc_file[:4]
            yearjul = wfdisc_file[:7]
            if os.path.exists('{0}/{1}/{2}/{3}'.format(self.__file_path, year,
                                                       yearjul, wfdisc_file)):
                streams += read('{0}/{1}/{2}/{3}'.format(
                    self.__file_path, year, yearjul, wfdisc_file))

        return streams
def read_fdsn(filename):
    """Read Obspy data file (SAC, MiniSEED, etc).

    Args:
        filename (str):
            Path to data file.
        kwargs (ref):
            Other arguments will be ignored.
    Returns:
        Stream: StationStream object.
    """
    logging.debug("Starting read_fdsn.")
    if not is_fdsn(filename):
        raise Exception('%s is not a valid Obspy file format.' % filename)

    streams = []
    tstream = read(filename)
    xmlfile = _get_station_file(filename, tstream)
    inventory = read_inventory(xmlfile)
    traces = []
    for ttrace in tstream:
        trace = StationTrace(data=ttrace.data,
                             header=ttrace.stats,
                             inventory=inventory)
        location = ttrace.stats.location
        network = ttrace.stats.network
        if network in LOCATION_CODES:
            codes = LOCATION_CODES[network]
            if location in codes:
                sdict = codes[location]
                if sdict['free_field']:
                    trace.stats.standard.structure_type = 'free_field'
                else:
                    trace.stats.standard.structure_type = sdict['description']
        head, tail = os.path.split(filename)
        trace.stats['standard']['source_file'] = tail or os.path.basename(head)
        traces.append(trace)
    stream = StationStream(traces=traces)
    streams.append(stream)

    return streams
Пример #32
0
def extractDataFromArchive(t1,
                           t2,
                           fileNames,
                           fileTimes,
                           wantedStaChas=[['*', '*']]):
    # Return nothing if there is no data
    if len(fileTimes) == 0:
        return EmptyStream()
    # Catch the case where the asked time range is completely outside the archive data availability
    if t1 > fileTimes[-1, 1] or t2 < fileTimes[0, 0]:
        return EmptyStream()
    # Figure out what set of files are wanted
    collectArgs = np.where((fileTimes[:, 0] <= t2)
                           & (fileTimes[:, 1] >= t1))[0]
    stream = EmptyStream()
    flagged = False
    # Read in all of the information
    for aFile in fileNames[collectArgs]:
        # Flag to user if the archive structure has changed
        if not os.path.exists(aFile):
            flagged = True
            continue
        aStream = read(aFile)
        for aSta, aCha in wantedStaChas:
            stream += aStream.select(station=aSta, channel=aCha)
    if flagged:
        print('Archive structure as changed, reload the current archive')
    # Merge traces which are adjacent
    try:
        stream.merge(method=1)
    except:
        stream = RemoveOddRateTraces(stream)
        stream.merge(method=1)
    # If any trace has masked values, split
    if True in [isinstance(tr.data, np.ma.masked_array) for tr in stream]:
        stream = stream.split()
    # Trim to wanted times
    stream.trim(UTCDateTime(t1), UTCDateTime(t2))
    return stream
Пример #33
0
def is_fdsn(filename):
    """Check to see if file is a format supported by Obspy (not KNET).

    Args:
        filename (str): Path to possible Obspy format.
    Returns:
        bool: True if obspy supported, otherwise False.
    """
    logging.debug("Checking if format is Obspy.")
    if not os.path.isfile(filename):
        return False
    try:
        stream = read(filename)
        if stream[0].stats._format in IGNORE_FORMATS:
            return False
        xmlfile = _get_station_file(filename, stream)
        if not os.path.isfile(xmlfile):
            return False
        return True
    except Exception:
        return False

    return False
Пример #34
0
def read(filename, format='MSEED', **kwargs):
    if isinstance(filename, Path):
        filename = str(filename)

    if format in ENTRY_POINTS['waveform'].keys():
        format_ep = ENTRY_POINTS['waveform'][format]
        read_format = load_entry_point(
            format_ep.dist.key, 'obspy.plugin.waveform.%s' % format_ep.name,
            'readFormat')

        st = Stream(stream=read_format(filename, **kwargs))

        # making sure the channel names are upper case
        trs = []
        for tr in st:
            tr.stats.channel = tr.stats.channel.upper()
            trs.append(tr.copy())

        st.traces = trs

        return st
    else:
        return Stream(stream=obsstream.read(filename, format=format, **kwargs))
def is_fdsn(filename):
    """Check to see if file is a format supported by Obspy (not KNET).

    Args:
        filename (str): Path to possible Obspy format.
    Returns:
        bool: True if obspy supported, otherwise False.
    """
    logging.debug("Checking if format is Obspy.")
    if not os.path.isfile(filename):
        return False
    try:
        stream = read(filename)
        if stream[0].stats._format in IGNORE_FORMATS:
            return False
        xmlfile = _get_station_file(filename, stream)
        if not os.path.isfile(xmlfile):
            return False
        return True
    except Exception:
        return False

    return False
Пример #36
0
 def test_plotMultipleTraces(self):
     """
     Plots multiple traces underneath.
     """
     st = read()
     # 1 trace
     outfile = os.path.join(self.path, 'waveform_1_trace.png')
     st[0].plot(outfile=outfile, automerge=False)
     # 3 traces
     outfile = os.path.join(self.path, 'waveform_3_traces.png')
     st.plot(outfile=outfile, automerge=False)
     # 5 traces
     st = st[0] * 5
     outfile = os.path.join(self.path, 'waveform_5_traces.png')
     st.plot(outfile=outfile, automerge=False)
     # 10 traces
     st = st[0] * 10
     outfile = os.path.join(self.path, 'waveform_10_traces.png')
     st.plot(outfile=outfile, automerge=False)
     # 10 traces - huge numbers
     st = st[0] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data * 10 ** i
     outfile = os.path.join(self.path, 'waveform_10_traces_huge.png')
     st.plot(outfile=outfile, automerge=False, equal_scale=False)
     # 10 traces - tiny numbers
     st = st[0] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data / (10 ** i)
     outfile = os.path.join(self.path, 'waveform_10_traces_tiny.png')
     st.plot(outfile=outfile, automerge=False, equal_scale=False)
     # 20 traces
     st = st[0] * 20
     outfile = os.path.join(self.path, 'waveform_20_traces.png')
     st.plot(outfile=outfile, automerge=False)
Пример #37
0
 def test_plotMultipleTraces(self):
     """
     Plots multiple traces underneath.
     """
     st = read()
     # 1 trace
     outfile = os.path.join(self.path, 'waveform_1_trace.png')
     st[0].plot(outfile=outfile, automerge=False)
     # 3 traces
     outfile = os.path.join(self.path, 'waveform_3_traces.png')
     st.plot(outfile=outfile, automerge=False)
     # 5 traces
     st = st[0] * 5
     outfile = os.path.join(self.path, 'waveform_5_traces.png')
     st.plot(outfile=outfile, automerge=False)
     # 10 traces
     st = st[0] * 10
     outfile = os.path.join(self.path, 'waveform_10_traces.png')
     st.plot(outfile=outfile, automerge=False)
     # 10 traces - huge numbers
     st = st[0] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data * 10**i
     outfile = os.path.join(self.path, 'waveform_10_traces_huge.png')
     st.plot(outfile=outfile, automerge=False, equal_scale=False)
     # 10 traces - tiny numbers
     st = st[0] * 10
     for i, tr in enumerate(st):
         # scale data to have huge numbers
         st[i].data = tr.data / (10**i)
     outfile = os.path.join(self.path, 'waveform_10_traces_tiny.png')
     st.plot(outfile=outfile, automerge=False, equal_scale=False)
     # 20 traces
     st = st[0] * 20
     outfile = os.path.join(self.path, 'waveform_20_traces.png')
     st.plot(outfile=outfile, automerge=False)
Пример #38
0
 def getFileLims(self):
     # Stop if no more files to load
     if self.nextFileIdx >= self.nFiles:
         self.cancelLoad()
         return
     nextFile = self.files[self.nextFileIdx]
     # Try the quick read function
     if not self.quickReadFail:
         try:
             startEnds = getMseedStartEnds(nextFile)
             minTime, maxTime = np.min(startEnds[:,
                                                 0]), np.max(startEnds[:,
                                                                       1])
         except:
             print('Quick MSEED reading failed on file: ' + nextFile)
             self.quickReadFail = True
     if self.quickReadFail:
         try:
             st = read(nextFile, headonly=True, format='MSEED')
             stats = [tr.stats for tr in st]
             # Get min and max times from file
             minTime = np.min([stat.starttime for stat in stats]).timestamp
             maxTime = np.max([stat.endtime for stat in stats]).timestamp
         except:
             print('Regular MSEED reading failed on file: ' + nextFile)
             minTime, maxTime = 0, 0  # Assign 0,0 to be removed later
     # Get a measure of progress and update the GUI
     perc = int(100 * (self.nextFileIdx + 1) / self.nFiles)
     if perc > self.progressbar.value():
         self.progressbar.setValue(perc)
     # Update the GUI every Xth loop
     if self.nextFileIdx % 10 == 0:
         QtWidgets.qApp.processEvents()
     # Update the index, and add this min/max time
     self.nextFileIdx += 1
     self.fileMinMaxs.append([minTime, maxTime])
Пример #39
0
  def section_plot(self, assoc_id, files, seconds_ahead = 5, record_length = 100, channel = 'Z'):
    
    station=self.assoc_db.query(Candidate.sta).filter(Candidate.assoc_id==assoc_id).all()
    sta_list=[]
    for sta, in station:
      sta_list.append(str(sta))
    station_single = self.assoc_db.query(Pick.sta).filter(Pick.assoc_id==assoc_id).filter(Pick.locate_flag == None).all()
    for sta, in station_single:
      sta_list.append(str(sta))
    #print sta_list
      
    eve=self.assoc_db.query(Associated).filter(Associated.id==assoc_id).first()
    # Earthquakes' epicenter
    eq_lat = eve.latitude
    eq_lon = eve.longitude
      
    # Reading the waveforms
    ST = Stream()
    for file in files:
      st = read(file)
      ST += st


    # in case of some seismometer use channel code like BH1, BH2 or BH3, resign the channel code as:
    if channel=='E' or channel=='e':
      Chan='E1'
    elif channel=='N' or channel=='n':
      Chan='N2'
    elif channel=='Z' or channel=='z':
      Chan='Z3'
    else:
      print('Please input component E, e, N, n, Z, or z, the default is Z')
    
    # Calculating distance from headers lat/lon
    ST_new = Stream()#;print ST
    for tr in ST:
      if tr.stats.channel[2] in Chan and tr.stats.station in sta_list:
        if tr.stats.starttime.datetime < eve.ot and tr.stats.endtime.datetime > eve.ot:
          tr.trim(UTCDateTime(eve.ot-timedelta(seconds=seconds_ahead)), UTCDateTime(eve.ot+timedelta(seconds=record_length)))
          ST_new+=tr
    #print ST_new.__str__(extended=True)


    while True:
      ST_new_sta=[]
      for tr in ST_new:
        ST_new_sta.append(tr.stats.station)
      duplicate=list(set([tr for tr in ST_new_sta if ST_new_sta.count(tr)>1]))
      if not duplicate:
        break
      index=[i for (i,j) in enumerate(ST_new_sta) if j==duplicate[-1]]
      i=0
      while True:
        if ST_new[index[i]].stats.npts<ST_new[index[i+1]].stats.npts:
          del ST_new[index[i]]
          break
        elif ST_new[index[i]].stats.npts>=ST_new[index[i+1]].stats.npts:
          del ST_new[index[i+1]]
          break
    #print ST_new.__str__(extended=True)     


    ST_new.detrend('demean')
#     ST_new.filter('bandpass', freqmin=0.1, freqmax=100)

    factor=10
    numRows=len(ST_new)
    segs=[];ticklocs=[];sta=[];circle_x=[];circle_y=[];segs_picks=[];ticklocs_picks=[]
    for tr in ST_new:
      dmax=tr.data.max()
      dmin=tr.data.min()
      data=tr.data/(dmax-dmin)*factor
      t=np.arange(0,round(tr.stats.npts/tr.stats.sampling_rate/tr.stats.delta))*tr.stats.delta # due to the float point arithmetic issue, can not use "t=np.arange(0,tr.stats.npts/tr.stats.sampling_rate,tr.stats.delta)"
      segs.append(np.hstack((data[:,np.newaxis],t[:,np.newaxis])))
      lon,lat = self.tt_stations_db_3D.query(Station3D.longitude,Station3D.latitude).filter(Station3D.sta==tr.stats.station).first()
      distance = int(gps2DistAzimuth(lat,lon,eq_lat,eq_lon)[0]/1000.)  #gps2DistAzimuth return in meters, convert to km by /1000
#       distance=self.assoc_db.query(Candidate.d_km).filter(Candidate.assoc_id==assoc_id).filter(Candidate.sta==tr.stats.station).first()[0]#;print distance,tr.stats.station
      ticklocs.append(distance)
      sta.append(tr.stats.station)
      # DOT plot where picks are picked, notice that for vertical trace plot p is queried from Pick table, s from PickModified table
      # horizontal trace plot p and s queried from PickModified table
      if channel=='Z3':
        picks_p=self.assoc_db.query(Pick.time).filter(Pick.assoc_id==assoc_id).filter(Pick.sta==tr.stats.station).filter(Pick.chan==tr.stats.channel).filter(Pick.phase=='P').all()
        if not picks_p:
          picks_p=self.assoc_db.query(PickModified.time).filter(PickModified.assoc_id==assoc_id).filter(PickModified.sta==tr.stats.station).filter(PickModified.phase=='P').all()
        picks_s=self.assoc_db.query(PickModified.time).filter(PickModified.assoc_id==assoc_id).filter(PickModified.sta==tr.stats.station).filter(PickModified.phase=='S').all()
#         print picks_p,picks_s
      else:
        picks_p=self.assoc_db.query(PickModified.time).filter(PickModified.assoc_id==assoc_id).filter(PickModified.sta==tr.stats.station).filter(PickModified.phase=='P').all()
        picks_s=self.assoc_db.query(PickModified.time).filter(PickModified.assoc_id==assoc_id).filter(PickModified.sta==tr.stats.station).filter(PickModified.phase=='S').all()
#         print picks_p,picks_s
      picks=picks_p+picks_s
#       picks=self.assoc_db.query(PickModified.time).filter(PickModified.assoc_id==assoc_id).filter(PickModified.sta==tr.stats.station).all()
      for pick, in picks:
        index=int((pick-eve.ot+timedelta(seconds=seconds_ahead)).total_seconds()/tr.stats.delta)#;print pick,eve.ot,index,len(data)
        circle_x.append(distance+data[index])
        circle_y.append(t[index])
        # BAR plot where picks are picked  
        t_picks=np.array([t[index],t[index]])
        data_picks=np.array([data.min(),data.max()])
        segs_picks.append(np.hstack((data_picks[:,np.newaxis],t_picks[:,np.newaxis])))
        ticklocs_picks.append(distance)
    tick_max=max(ticklocs)
    tick_min=min(ticklocs)
    offsets=np.zeros((numRows,2),dtype=float)
    offsets[:,0]=ticklocs
    offsets_picks=np.zeros((len(segs_picks),2),dtype=float)
    offsets_picks[:,0]=ticklocs_picks
    
    #lines=LineCollection(segs,offsets=offsets,transOffset=None,linewidths=.25,colors=[colorConverter.to_rgba(i) for i in ('b','g','r','c','m','y','k')]) #color='gray'
    lines=LineCollection(segs,offsets=offsets,transOffset=None,linewidths=.25,color='gray')
    #lines_picks=LineCollection(segs_picks,offsets=offsets_picks,transOffset=None,linewidths=1,color='r')
    lines_picks=LineCollection(segs_picks,offsets=offsets_picks,transOffset=None,linewidths=1,color='k')
    
    #print sta,ticklocs
    fig=plt.figure(figsize=(15,8))
    ax1 = fig.add_subplot(111)
    #ax1.plot(circle_x,circle_y,'o')  # blue dots indicating where to cross the waveforms
    ax1.plot(circle_x,circle_y,'o',c='gray')
    x0 = tick_min-(tick_max-tick_min)*0.1
    x1 = tick_max+(tick_max-tick_min)*0.1
    ylim(0,record_length);xlim(0,x1)
    ax1.add_collection(lines)
    ax1.add_collection(lines_picks)
    ax1.set_xticks(ticklocs)
    ax1.set_xticklabels(sta)
    ax1.invert_yaxis()
    ax1.xaxis.tick_top()
#     ax2 = ax1.twiny()
#     ax2.xaxis.tick_bottom()   
    plt.setp(plt.xticks()[1], rotation=45)
    #xlabel('Station (km)')
    xlabel('channel: '+channel, fontsize=18)
    ylabel('Record Length (s)', fontsize=18)
#     plt.title('Section Plot of Event at %s'%(tr.stats.starttime))
#     plt.tight_layout()
    
    plt.show()
Пример #40
0
from obspy.core.util import gps2DistAzimuth

host = 'http://examples.obspy.org/'
# Files (fmt: SAC)
files = ['TOK.2011.328.21.10.54.OKR01.HHN.inv',
'TOK.2011.328.21.10.54.OKR02.HHN.inv', 'TOK.2011.328.21.10.54.OKR03.HHN.inv',
'TOK.2011.328.21.10.54.OKR04.HHN.inv', 'TOK.2011.328.21.10.54.OKR05.HHN.inv',
'TOK.2011.328.21.10.54.OKR06.HHN.inv', 'TOK.2011.328.21.10.54.OKR07.HHN.inv',
'TOK.2011.328.21.10.54.OKR08.HHN.inv', 'TOK.2011.328.21.10.54.OKR09.HHN.inv',
'TOK.2011.328.21.10.54.OKR10.HHN.inv']
# Earthquakes' epicenter
eq_lat = 35.565
eq_lon = -96.792

# Reading the waveforms
st = Stream()
for waveform in files:
	st += read(host + waveform)

# Calculating distance from SAC headers lat/lon
# (trace.stats.sac.stla and trace.stats.sac.stlo)
for tr in st:
	tr.stats.distance = gps2DistAzimuth(tr.stats.sac.stla,
									tr.stats.sac.stlo, eq_lat, eq_lon)[0]
	# Setting Network name for plot title
	tr.stats.network = 'TOK'

st.filter('bandpass', freqmin=0.1, freqmax=10)
# Plot
st.plot(type='section', plot_dx=20e3, recordlength=100,
			time_down=True, linewidth=.25, grid_linewidth=.25)
Пример #41
0
    def read_from_SDS(self, sds_root, net_name, sta_name, comp_name,
                      starttime=None, endtime=None, rmean=False, taper=False,
                      pad_value=None):
        """
        Read waveform data from an SDS structured archive.  Simple overlaps and
        adjacent traces are merged if possile.

        :param sds_root: root of the SDS archive
        :param net_name: network name
        :param sta_name: station name
        :param comp_name: component name
        :param starttime: Start time of data to be read.
        :param endtime: End time of data to be read.
        :param rmean: If ``True`` removes the mean from the data upon reading.
            If data are segmented, the mean will be removed from all segments
            individually.
        :param taper: If ``True`` applies a cosine taper to the data upon
            reading.  If data are segmented, tapers are applied to all segments
            individually.
        :param pad_value: If this parameter is set, points between
            ``starttime`` and the first point in the file, and points between
            the last point in the file and ``endtime``, will be set to
            ``pad_value``.  You may want to also use the ``rmean`` and
            ``taper`` parameters, depending on the nature of the data.

        :type sds_root: string
        :type net_name: string
        :type sta_name: string
        :type comp_name: string
        :type starttime: ``obspy.core.utcdatetime.UTCDateTime`` object,
            optional
        :type endtime: ``obspy.core.utcdatetime.UTCDateTime`` object, optional
        :type rmean: boolean, optional
        :type taper: boolean, optional
        :type pad_value: float, optional

        :raises UserWarning: If there are no data between ``starttime`` and
            ``endtime``

        """

        logging.info("Reading from SDS structure %s %s %s ..." %
                     (net_name, sta_name, comp_name))

        # Get the complete file list. If a directory, get all the filenames.
        filename = os.path.join(sds_root, net_name, sta_name,
                                "%s.D" % comp_name, "*")
        logging.debug("Reading %s between %s and %s" %
                      (filename, starttime.isoformat(), endtime.isoformat()))
        if os.path.isdir(glob.glob(filename)[0]):
            filename = os.path.join(filename, "*")
        file_glob = glob.glob(filename)

        # read header from all files to keep only those within the time limits
        fnames_within_times = []
        for fname in file_glob:
            st_head = stream.read(fname, headonly=True)
            # retrieve first_start and last_end time for the stream
            # without making any assumptions on order of traces
            first_start = st_head[0].stats.starttime
            last_end = st_head[0].stats.endtime
            # find earliest start time and latest end time in stream
            for tr in st_head:
                if tr.stats.starttime < first_start:
                    first_start = tr.stats.starttime
                if tr.stats.endtime > last_end:
                    last_end = tr.stats.endtime
            # add to list if start or end time are within our requested limits
            if (first_start < endtime and last_end > starttime):
                fnames_within_times.append(fname)

        logging.debug("Found %d files to read" % len(fnames_within_times))

        # now read the full data only for the relevant files
        st = Stream()
        for fname in fnames_within_times:
            st_tmp = read(fname, starttime=starttime, endtime=endtime)
            for tr in st_tmp:
                st.append(tr)
        # and merge nicely
        st.merge(method=-1)

        if st.count() > 1:  # There are gaps after sensible cleanup merging
            logging.info("File contains gaps:")
            st.printGaps()

        # apply rmean if requested
        if rmean:
            logging.info("Removing the mean from single traces.")
            st = stream_rmean(st)

        # apply rmean if requested
        if taper:
            logging.info("Tapering single traces.")
            st = stream_taper(st)

        if not pad_value is None:
            try:
                first_tr = st.traces[0]
                # save delta (to save typing)
                delta = first_tr.stats.delta
                if (not starttime is None) and \
                   ((first_tr.stats.starttime - starttime) > delta):
                    logging.debug("Padding with value %f from %s to first\
                                   point in file at %s." %
                                  (pad_value,
                                   starttime.isoformat(),
                                   first_tr.stats.starttime.isoformat()))
                    # find the number of points from starttime to
                    # end of the first trace
                    npts_full_trace = \
                        int(np.floor((first_tr.stats.endtime -
                                      starttime) / delta))+1
                    # find the number of points of the padding section
                    n_pad = npts_full_trace-first_tr.stats.npts
                    # fill the full time range with padd value
                    tr_pad = np.zeros(npts_full_trace)+pad_value
                    # substitute in the data
                    tr_pad[n_pad:] = first_tr.data[:]
                    first_tr.data = tr_pad
                    first_tr.stats.starttime = starttime
                    first_tr.stats.npts = npts_full_trace
                    st.traces[0] = first_tr

                last_tr = st.traces[-1]
                # save delta (to save typing)
                delta = last_tr.stats.delta
                if (not endtime is None) and \
                   ((endtime - last_tr.stats.endtime) > delta):
                    logging.debug("Padding with value %f from last point\
                                   in file at %s to %s." %
                                  (pad_value,
                                   last_tr.stats.endtime.isoformat(),
                                   endtime.isoformat()))
                    # find the number of points from endtime to
                    # start of the last trace
                    npts_full_trace = \
                        int(np.floor((endtime -
                                      last_tr.stats.starttime) / delta))+1
                    # fill the full time range with padd value
                    tr_pad = np.zeros(npts_full_trace)+pad_value
                    # substitute in the data
                    tr_pad[0:last_tr.stats.npts] = last_tr.data[:]
                    last_tr.data = tr_pad
                    last_tr.stats.npts = npts_full_trace
                    st.traces[-1] = last_tr

            except IndexError:
                logging.warning('No data within time limits requested')
                raise UserWarning('No data within time limits requested.')

        try:
            self.stream = st
            self.trace = st.traces[0]
            self.proc = "None"
        except IndexError:
            raise UserWarning('No data within time limits requested.')
Пример #42
0
    def read_from_file(self, filename, format=None, starttime=None,
                       endtime=None, rmean=False, taper=False, pad_value=None):
        """
        Read waveform data from file.  Multiple traces are merged if they
        overlap exactly or are adjacent.

        :param filename: Waveform filename
        :param format: ``obspy`` format type (e.g. 'SAC', 'mseed'...)
        :param starttime: Start time of data to be read.
        :param endtime: End time of data to be read.
        :param rmean: If ``True`` removes the mean from the data upon reading.
            If data are segmented, the mean will be removed from all segments
            individually.
        :param taper: If ``True`` applies a cosine taper to the data upon
            reading.  If data are segmented, tapers are applied to all segments
            individually.
        :param pad_value: If this parameter is set, points between
            ``starttime`` and the first point in the file, and points between
            the last point in the file and ``endtime``, will be set to
            ``pad_value``.  You may want to also use the ``rmean`` and
            ``taper`` parameters, depending on the nature of the data.

        :type format: string
        :type starttime: ``obspy.core.utcdatetime.UTCDateTime`` object
        :type endtime: ``obspy.core.utcdatetime.UTCDateTime`` object
        :type rmean: boolean
        :type taper: boolean
        :type pad_value: float

        :raises UserWarning: If there are no data between ``starttime`` and
            ``endtime``

        """

        logging.debug("Reading from %s..." % filename)
        if format is not None:
            st = stream.read(filename, format, starttime=starttime,
                             endtime=endtime)
        else:
            st = stream.read(filename, starttime=starttime, endtime=endtime)

        st.merge(method=-1)

        if st.count() > 1:  # There are gaps after intelligent merge
            logging.info("File contains gaps:")
            st.printGaps()

        if rmean:
            st = stream_rmean(st)

        if taper:
            st = stream_taper(st)

        if not pad_value is None:
            try:

                first_tr = st.traces[0]
                # save delta (to save typing)
                delta = first_tr.stats.delta
                if (not starttime is None) and \
                   ((first_tr.stats.starttime - starttime) > delta):
                    logging.debug("Padding with value %f from %s to first\
                        point in file at %s." % (pad_value,
                        starttime.isoformat(),
                        first_tr.stats.starttime.isoformat()))
                    # find the number of points from starttime to
                    # end of the first trace
                    npts_full_trace = \
                        int(np.floor((first_tr.stats.endtime -
                                      starttime) / delta))+1
                    # find the number of points of the padding section
                    n_pad = npts_full_trace-first_tr.stats.npts
                    # fill the full time range with padd value
                    tr_pad = np.zeros(npts_full_trace)+pad_value
                    # substitute in the data
                    tr_pad[n_pad:] = first_tr.data[:]
                    first_tr.data = tr_pad
                    first_tr.stats.starttime = starttime
                    first_tr.stats.npts = npts_full_trace
                    st.traces[0] = first_tr

                last_tr = st.traces[-1]
                # save delta (to save typing)
                delta = last_tr.stats.delta
                if (not endtime is None) and \
                   ((endtime - last_tr.stats.endtime) > delta):
                    logging.debug("Padding with value %f from last point \
                    in file at %s to %s." % (pad_value,
                                             last_tr.stats.endtime.isoformat(),
                                             endtime.isoformat()))
                    # find the number of points from endtime to
                    # start of the last trace
                    npts_full_trace = \
                        int(np.floor((endtime -
                                      last_tr.stats.starttime) / delta))+1
                    # fill the full time range with pad value
                    tr_pad = np.zeros(npts_full_trace)+pad_value
                    # substitute in the data
                    tr_pad[0:last_tr.stats.npts] = last_tr.data[:]
                    last_tr.data = tr_pad
                    last_tr.stats.npts = npts_full_trace
                    st.traces[-1] = last_tr

            except IndexError:
                logging.warning('No data within time limits requested')
                raise UserWarning('No data within time limits requested.')

        try:
            self.stream = st
            self.trace = st.traces[0]
            self.proc = "None"
        except IndexError:
            raise UserWarning('No data within time limits requested.')
Пример #43
0
	def analyzeRemove(self, seedpath):
		# ---------------------------------------	
		# Read MSEED files from query and analyze
		# ---------------------------------------	
		print "------pullTraces() Start------\n"	
		os.chdir(seedpath)
		filelist = sorted(os.listdir(seedpath), key=os.path.getctime)
		self.filelist = filelist
		filelen = len(filelist)
		stream = [0 for x in range(filelen)]	# streams = [][] where the second entry denotes the trace index
		i = filelen - 1
		while i >= 0:
			try:
				stream[i] = read(filelist[i])	# read MSEED files from query
			except Exception as e:
				print "Exception pullTraces() (read(MSEED)): " + str(e)
				sys.exit(0)
			i = i - 1

		# Check for masked arrays and 0 fill to create np.ndarray types
		stream = removeMask(stream)
		
		# Remove traces with sample rate = 0.0Hz => NFFT = 0 
		try:
			print "Removing traces with 0.0Hz sampling rate from stream..."
			streamlen = len(stream)	# number of streams (ie stream files)
			self.streamlen = streamlen	
			RM = False	
			i = 0	# reset indexing	
			print "streamlen = %s\n" % str(streamlen)		
			for i in range(streamlen):
				tracelen = stream[i].count()	# number of traces in stream
				if tracelen == 1:
					tr = stream[i][0]	# tmp trace
					if tr.stats['sampling_rate'] == 0.0:
						stream[i].remove(tr)
				elif tracelen > 1:
					j = 0	# stream will change sizes when trace is removed
					while j < range(stream[i].count()):
						if j == stream[i].count():	
							break	# index = num traces 
						tr = stream[i][j]	# tmp trace
						if tr.stats['sampling_rate'] == 0.0:
							if not RM:	
								print "Removing empty traces:"	
								print stream[i]
								print	
								RM = True 
							stream[i].remove(tr)	# rm empty trace
							j = 0	# reset index for new size
						else:
							j = j + 1	# mv to next element	
					if RM:
						print "Final stream with removed traces:"
						print stream[i]
						print	
						RM = False
			self.stream = stream	# new stream with removed traces	
			print "-------pullTraces() Complete-------\n\n"	
		except KeyboardInterrupt:
			print "KeyboardInterrupt pullTraces(): terminating analyzeRemove() method"
			sys.exit(0)
			print "Method pullTraces() is terminated!"
		except Exception as e:
			print "Exception pullTraces(): " + str(e)
			sys.exit(0)
			print "Method pullTraces() is terminated!"
Пример #44
0
def process(pro,
            seg,
            cha,
            sta,
            evt,
            dcen,
            station_inventory,
            amp_ratio_threshold,
            arrival_time_delay,
            savewindow_delta,
            taper_max_percentage,
            snr_window_length,
            remove_response_output,
            remove_response_water_level,
            bandpass_corners,
            bandpass_freq_max,
            bandpass_max_nyquist_ratio,
            multi_event_threshold1,
            multi_event_threshold1_duration,
            multi_event_threshold2,
            coda_window_length,
            coda_subwindow_length,
            coda_subwindow_overlap,
            coda_subwindow_amplitude_threshold,
            **kwargs):
    """
        Processes a single segment.
        This function is supposed to perform calculation and set the attributes of the `pro`
        object (it does not need to return it). These attributes are set in the `models` module
        and the value types should match (meaning an attribute reflecting an integer database
        column should be set with integer values only).
        Exceptions are handled externally and should be consulted by looking at the log messages
        stored in the output database (whose address is given in the `config.yaml` file)
        :param pro: a dict-like object (whose keys can be accessed also as attributes, so
        `pro['whatever]=6 is the same as `pro.whatever=4`) which has to be populated with values
        resulting from processing the given segment.
        :param seg: the segment (i.e., time series data) originating the processing. Its actual
        data can be accessed via `loads(seg.data)` which returns a Stream object. Additional
        fields are accessible via attributes and their names can be inspected via `seg.keys()`
        FIXME: write detailed doc!
        parameters and arguments must not conflict with imported functions
    """

    # convert to UTCDateTime for operations later:
    a_time = UTCDateTime(seg.arrival_time) + arrival_time_delay

    mseed = read(StringIO(seg.data))

    if get_gaps(mseed):
        pro.has_gaps = True
    else:
        if len(mseed) != 1:
            raise ValueError("Mseed has more than one Trace")

        pro.has_gaps = False
        # work on the trace now. All functions will return Traces or scalars, which is better
        # so we can write them to database more easily
        mseed = mseed[0]

        ampratio = amp_ratio(mseed)
        pro.amplitude_ratio = ampratio
        if ampratio >= amp_ratio_threshold:
            pro.is_saturated = True
        else:
            pro.is_saturated = False
            mseed = bandpass(mseed, evt.magnitude, freq_max=bandpass_freq_max,
                             max_nyquist_ratio=bandpass_max_nyquist_ratio, corners=bandpass_corners)

            inv_obj = station_inventory

            mseed_acc = remove_response(mseed, inv_obj, output='ACC',
                                        water_level=remove_response_water_level)
            mseed_vel = remove_response(mseed, inv_obj, output='VEL',
                                        water_level=remove_response_water_level)
            mseed_disp = remove_response(mseed, inv_obj, output='DISP',
                                         water_level=remove_response_water_level)
            mseed_wa = simulate_wa(mseed_disp)

            mseed_rem_resp = mseed_disp if remove_response_output == 'DISP' else \
                (mseed_vel if remove_response_output == 'VEL' else mseed_acc)

            mseed_cum = cumsum(mseed_rem_resp)

            cum_times = cumtimes(mseed_cum, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95)

            t05, t10, t90, t95 = cum_times[0], cum_times[1], cum_times[-2], \
                cum_times[-1]

#                     mseed_acc_atime_95 = mseed_acc.slice(a_time, t95)
#                     mseed_vel_atime_t95 = mseed_vel.slice(a_time, t95)
#                     mseed_wa_atime_t95 = mseed_wa.slice(a_time, t95)

            t_PGA, PGA = maxabs(mseed_acc, a_time, t95)
            t_PGV, PGV = maxabs(mseed_vel, a_time, t95)
            t_PWA, PWA = maxabs(mseed_wa, a_time, t95)

            # instantiate the trace below cause it's used also later ...
            mseed_rem_resp_t05_t95 = mseed_rem_resp.slice(t05, t95)

            fft_rem_resp_s = fft(mseed_rem_resp_t05_t95, taper_max_percentage=taper_max_percentage)
            fft_rem_resp_n = fft(mseed_rem_resp, fixed_time=a_time,
                                 window_in_sec=t05-t95,  # negative float (in seconds)
                                 taper_max_percentage=taper_max_percentage)
            # calculate the *real* start time
            snr_rem_resp_t05_t95 = snr(fft_rem_resp_s, fft_rem_resp_n, signals_form='fft',
                                       in_db=False)

            fft_rem_resp_s2 = fft(mseed_rem_resp, t10, t90-t10,
                                  taper_max_percentage=taper_max_percentage)
            fft_rem_resp_n2 = fft(mseed_rem_resp, a_time, t10-t90,
                                  taper_max_percentage=taper_max_percentage)
            snr_rem_resp_t10_t90 = snr(fft_rem_resp_s2, fft_rem_resp_n2, signals_form='fft',
                                       in_db=False)

            fft_rem_resp_s3 = fft(mseed_rem_resp, a_time, snr_window_length,
                                  taper_max_percentage=taper_max_percentage)
            fft_rem_resp_n3 = fft(mseed_rem_resp, a_time, -snr_window_length,
                                  taper_max_percentage=taper_max_percentage)
            snr_rem_resp_fixed_window = snr(fft_rem_resp_s3, fft_rem_resp_n3,
                                            signals_form='fft', in_db=False)

            gme = get_multievent  # rename func just to avoid line below is not too wide
            double_evt = \
                gme(mseed_cum, t05, t95,
                    threshold_inside_tmin_tmax_percent=multi_event_threshold1,
                    threshold_inside_tmin_tmax_sec=multi_event_threshold1_duration,
                    threshold_after_tmax_percent=multi_event_threshold2)

            mseed_rem_resp_savewindow = mseed_rem_resp.slice(a_time-savewindow_delta,
                                                             t95+savewindow_delta).\
                taper(max_percentage=taper_max_percentage)

            wa_savewindow = mseed_wa.slice(a_time-savewindow_delta,
                                           t95+savewindow_delta).\
                taper(max_percentage=taper_max_percentage)

            # deltafreq = dfreq(mseed_rem_resp_t05_t95)

            # write stuff now to instance:
            pro.mseed_rem_resp_savewindow = dumps(mseed_rem_resp_savewindow)
            pro.fft_rem_resp_t05_t95 = dumps(fft_rem_resp_s)
            pro.fft_rem_resp_until_atime = dumps(fft_rem_resp_n)
            pro.wood_anderson_savewindow = dumps(wa_savewindow)
            pro.cum_rem_resp = dumps(mseed_cum)
            pro.pga_atime_t95 = PGA
            pro.pgv_atime_t95 = PGV
            pro.pwa_atime_t95 = PWA
            pro.t_pga_atime_t95 = dtime(t_PGA)
            pro.t_pgv_atime_t95 = dtime(t_PGV)
            pro.t_pwa_atime_t95 = dtime(t_PWA)
            pro.cum_t05 = dtime(t05)
            pro.cum_t10 = dtime(t10)
            pro.cum_t25 = dtime(cum_times[2])
            pro.cum_t50 = dtime(cum_times[3])
            pro.cum_t75 = dtime(cum_times[4])
            pro.cum_t90 = dtime(t90)
            pro.cum_t95 = dtime(t95)
            pro.snr_rem_resp_fixedwindow = snr_rem_resp_fixed_window
            pro.snr_rem_resp_t05_t95 = snr_rem_resp_t05_t95
            pro.snr_rem_resp_t10_t90 = snr_rem_resp_t10_t90
            # pro.amplitude_ratio = Column(Float)
            # pro.is_saturated = Column(Boolean)
            # pro.has_gaps = Column(Boolean)
            pro.double_event_result = double_evt[0]
            pro.secondary_event_time = dtime(double_evt[1])

            # WITH JESSIE IMPLEMENT CODA ANALYSIS:
            # pro.coda_tmax = Column(DateTime)
            # pro.coda_length_sec = Column(Float)
    return pro
Пример #45
0
'''
filelist = os.listdir(seedpath)
filelist = filter(lambda x: not os.path.isdir(x), filelist)
newest = max(filelist, key=lambda x: os.stat(x).st_mtime)
stream = read(newest)
print stream
'''
seedpath = "/home/agonzales/Documents/ObsPy/examples/agonzales/SeedFiles/"
os.chdir(seedpath)
filelist = sorted(os.listdir(seedpath), key=os.path.getctime)
filelen = len(filelist)
stream = [0 for x in range(filelen)]	# multidim streams list, streams for each file contain multiple traces so streams = [][] where the second entry denotes the trace index 
i = filelen-1
while i >= 0: 
	try:	
		stream[i] = read(filelist[i])	# read MSEED files from query
	except Exception as e:
		print "******Exception found = " + str(e)	
	i = i - 1

streamlen = len(stream)	# number of streams (i.e. stream files) 
trace = {}	# dict of traces for each stream
nfft = 0	# number of fft points, necessary for some filtering
for i in range(streamlen):
	strsel = stream[i]	
	tracelen = len(strsel)	# number of traces in stream
	index = str(i)	
	if tracelen == 1:	# single trace stream
		trace[index] = strsel[0]	# trace 0 in stream i
		nfft = trace[index].count()	
	else:			# multiple trace stream