def test_coincidence(self, peaks): """Test the coincidence trigger.""" triggers = coin_trig(peaks, [('a', 'Z'), ('b', 'Z')], samp_rate=10, moveout=3, min_trig=2, trig_int=1) assert triggers, [(0.45, 100)]
def test_coincidence(self): """Test the coincidence trigger.""" from eqcorrscan.utils.findpeaks import coin_trig peaks = [[(0.5, 100), (0.3, 800), (0.3, 105)], [(0.4, 120), (0.7, 850)]] triggers = coin_trig(peaks, [('a', 'Z'), ('b', 'Z')], samp_rate=10, moveout=3, min_trig=2, trig_int=1) self.assertEqual(triggers, [(0.45, 100)])
def _detect(detector, st, threshold, trig_int, moveout=0, min_trig=0, process=True, extract_detections=False, cores=1): """ Detect within continuous data using the subspace method. Not to be called directly, use the detector.detect method. :type detector: eqcorrscan.core.subspace.Detector :param detector: Detector to use. :type st: obspy.core.stream.Stream :param st: Un-processed stream to detect within using the subspace \ detector :type threshold: float :param threshold: Threshold value for detections between 0-1 :type trig_int: float :param trig_int: Minimum trigger interval in seconds. :type moveout: float :param moveout: Maximum allowable moveout window for non-multiplexed, network detection. See note. :type min_trig: int :param min_trig: Minimum number of stations exceeding threshold for \ non-multiplexed, network detection. See note. :type process: bool :param process: Whether or not to process the stream according to the \ parameters defined by the detector. Default is to process the \ data (True). :type extract_detections: bool :param extract_detections: Whether to extract waveforms for each \ detection or not, if true will return detections and streams. :return: list of detections :rtype: list of eqcorrscan.core.match_filter.Detection """ detections = [] # First process the stream if process: Logger.info('Processing Stream') stream, stachans = _subspace_process( streams=[st.copy()], lowcut=detector.lowcut, highcut=detector.highcut, filt_order=detector.filt_order, sampling_rate=detector.sampling_rate, multiplex=detector.multiplex, stachans=detector.stachans, parallel=True, align=False, shift_len=None, reject=False, cores=cores) else: # Check the sampling rate at the very least for tr in st: if not tr.stats.sampling_rate == detector.sampling_rate: raise ValueError('Sampling rates do not match.') stream = [st] stachans = detector.stachans outtic = time.clock() # If multiplexed, how many samples do we increment by? if detector.multiplex: Nc = len(detector.stachans) else: Nc = 1 # Here do all ffts fft_vars = _do_ffts(detector, stream, Nc) Logger.info('Computing detection statistics') Logger.info('Preallocating stats matrix') stats = np.zeros( (len(stream[0]), (len(stream[0][0]) // Nc) - (fft_vars[4] // Nc) + 1)) for det_freq, data_freq_sq, data_freq, i in zip(fft_vars[0], fft_vars[1], fft_vars[2], np.arange(len(stream[0]))): # Calculate det_statistic in frequency domain stats[i] = _det_stat_freq(det_freq, data_freq_sq, data_freq, fft_vars[3], Nc, fft_vars[4], fft_vars[5]) Logger.info('Stats matrix is shape %s' % str(stats[i].shape)) trig_int_samples = detector.sampling_rate * trig_int Logger.info('Finding peaks') peaks = [] for i in range(len(stream[0])): peaks.append( findpeaks.find_peaks2_short(arr=stats[i], thresh=threshold, trig_int=trig_int_samples)) if not detector.multiplex: # Conduct network coincidence triggering peaks = findpeaks.coin_trig(peaks=peaks, samp_rate=detector.sampling_rate, moveout=moveout, min_trig=min_trig, stachans=stachans, trig_int=trig_int) else: peaks = peaks[0] if len(peaks) > 0: for peak in peaks: detecttime = st[0].stats.starttime + \ (peak[1] / detector.sampling_rate) rid = ResourceIdentifier(id=detector.name + '_' + str(detecttime), prefix='smi:local') ev = Event(resource_id=rid) cr_i = CreationInfo(author='EQcorrscan', creation_time=UTCDateTime()) ev.creation_info = cr_i # All detection info in Comments for lack of a better idea thresh_str = 'threshold=' + str(threshold) ccc_str = 'detect_val=' + str(peak[0]) used_chans = 'channels used: ' +\ ' '.join([str(pair) for pair in detector.stachans]) ev.comments.append(Comment(text=thresh_str)) ev.comments.append(Comment(text=ccc_str)) ev.comments.append(Comment(text=used_chans)) for stachan in detector.stachans: tr = st.select(station=stachan[0], channel=stachan[1]) if tr: net_code = tr[0].stats.network else: net_code = '' pick_tm = detecttime wv_id = WaveformStreamID(network_code=net_code, station_code=stachan[0], channel_code=stachan[1]) ev.picks.append(Pick(time=pick_tm, waveform_id=wv_id)) detections.append( Detection(template_name=detector.name, detect_time=detecttime, no_chans=len(detector.stachans), detect_val=peak[0], threshold=threshold, typeofdet='subspace', threshold_type='abs', threshold_input=threshold, chans=detector.stachans, event=ev)) outtoc = time.clock() Logger.info('Detection took %s seconds' % str(outtoc - outtic)) if extract_detections: detection_streams = extract_from_stream(st, detections) return detections, detection_streams return detections
def _detect(detector, st, threshold, trig_int, moveout=0, min_trig=0, process=True, extract_detections=False, debug=0): """ Detect within continuous data using the subspace method. Not to be called directly, use the detector.detect method. :type detector: eqcorrscan.core.subspace.Detector :param detector: Detector to use. :type st: obspy.core.stream.Stream :param st: Un-processed stream to detect within using the subspace \ detector :type threshold: float :param threshold: Threshold value for detections between 0-1 :type trig_int: float :param trig_int: Minimum trigger interval in seconds. :type moveout: float :param moveout: Maximum allowable moveout window for non-multiplexed, network detection. See note. :type min_trig: int :param min_trig: Minimum number of stations exceeding threshold for \ non-multiplexed, network detection. See note. :type process: bool :param process: Whether or not to process the stream according to the \ parameters defined by the detector. Default is to process the \ data (True). :type extract_detections: bool :param extract_detections: Whether to extract waveforms for each \ detection or not, if true will return detections and streams. :type debug: int :param debug: Debug output level from 0-5. :return: list of detections :rtype: list of eqcorrscan.core.match_filter.DETECTION """ from eqcorrscan.core import subspace_statistic detections = [] # First process the stream if process: if debug > 0: print('Processing Stream') stream, stachans = _subspace_process( streams=[st.copy()], lowcut=detector.lowcut, highcut=detector.highcut, filt_order=detector.filt_order, sampling_rate=detector.sampling_rate, multiplex=detector.multiplex, stachans=detector.stachans, parallel=True, align=False, shift_len=None, reject=False) else: # Check the sampling rate at the very least for tr in st: if not tr.stats.sampling_rate == detector.sampling_rate: raise ValueError('Sampling rates do not match.') stream = [st] stachans = detector.stachans outtic = time.clock() if debug > 0: print('Computing detection statistics') stats = np.zeros( (len(stream[0]), len(stream[0][0]) - len(detector.data[0][0]) + 1), dtype=np.float32) for det_channel, in_channel, i in zip(detector.data, stream[0], np.arange(len(stream[0]))): stats[i] = subspace_statistic.\ det_statistic(detector=det_channel.astype(np.float32), data=in_channel.data.astype(np.float32)) if debug > 0: print(stats[i].shape) if debug > 3: plt.plot(stats[i]) plt.show() # Hard typing in Cython loop requires float32 type. # statistics if detector.multiplex: trig_int_samples = (len(detector.stachans) * detector.sampling_rate * trig_int) else: trig_int_samples = detector.sampling_rate * trig_int if debug > 0: print('Finding peaks') peaks = [] for i in range(len(stream[0])): peaks.append( findpeaks.find_peaks2_short(arr=stats[i], thresh=threshold, trig_int=trig_int_samples, debug=debug)) if not detector.multiplex: # Conduct network coincidence triggering peaks = findpeaks.coin_trig(peaks=peaks, samp_rate=detector.sampling_rate, moveout=moveout, min_trig=min_trig, stachans=stachans, trig_int=trig_int) else: peaks = peaks[0] if len(peaks) > 0: for peak in peaks: if detector.multiplex: detecttime = st[0].stats.starttime + ( peak[1] / (detector.sampling_rate * len(detector.stachans))) else: detecttime = st[0].stats.starttime + (peak[1] / detector.sampling_rate) rid = ResourceIdentifier(id=detector.name + '_' + str(detecttime), prefix='smi:local') ev = Event(resource_id=rid) cr_i = CreationInfo(author='EQcorrscan', creation_time=UTCDateTime()) ev.creation_info = cr_i # All detection info in Comments for lack of a better idea thresh_str = 'threshold=' + str(threshold) ccc_str = 'detect_val=' + str(peak[0]) used_chans = 'channels used: ' +\ ' '.join([str(pair) for pair in detector.stachans]) ev.comments.append(Comment(text=thresh_str)) ev.comments.append(Comment(text=ccc_str)) ev.comments.append(Comment(text=used_chans)) for stachan in detector.stachans: tr = st.select(station=stachan[0], channel=stachan[1]) if tr: net_code = tr[0].stats.network else: net_code = '' pick_tm = detecttime wv_id = WaveformStreamID(network_code=net_code, station_code=stachan[0], channel_code=stachan[1]) ev.picks.append(Pick(time=pick_tm, waveform_id=wv_id)) detections.append( DETECTION(detector.name, detecttime, len(detector.stachans), peak[0], threshold, 'subspace', detector.stachans, event=ev)) outtoc = time.clock() print('Detection took %s seconds' % str(outtoc - outtic)) if extract_detections: detection_streams = extract_from_stream(st, detections) return detections, detection_streams return detections