def multiTemplateMatch(stTempLow, stLow, threshLow, stTempHigh, stHigh,
                       threshHigh, numComp, tolerance, distance):

    # make a couple useful list
    detectionsTemp = []
    detections = []

    # iterate through each channel
    for s in range(len(stTempLow)):

        # call the template matching function in each band
        #detectionsLow,sl = correlation_detector(obspy.Stream(stLow[s]),obspy.Stream(stTempLow[s]),threshLow,tolerance)
        #detectionsHigh,sh = correlation_detector(obspy.Stream(stHigh[s]),obspy.Stream(stTempHigh[s]),threshHigh,tolerance)
        detectionsLow, sl = correlation_detector(obspy.Stream(stLow[s]),
                                                 obspy.Stream(stTempLow[s]),
                                                 threshLow, distance)
        detectionsHigh, sh = correlation_detector(obspy.Stream(stHigh[s]),
                                                  obspy.Stream(stTempHigh[s]),
                                                  threshHigh, distance)
        #print(len(detectionsLow))
        #print(len(detectionsHigh))

        # get all high frequency trigger times for today
        detHighTimes = []
        for i in range(len(detectionsHigh)):
            detHighTimes.append(detectionsHigh[i].get("time"))

        # loop through all low frequency triggers for today
        for i in range(len(detectionsLow)):
            detLowTime = detectionsLow[i].get("time")

            # calculate time difference between low freq trigger and all high freq triggers
            diffs = np.subtract(detLowTime, detHighTimes)

            # only interested in positive values of 'diffs', which indicates high freq trigger first
            diffs[diffs < -1 * tolerance] = float("nan")

            # save low freq trigger if a high freq trigger is sufficiently close
            if len(diffs) > 0:
                if min(diffs) < tolerance:
                    detectionsTemp.append(detLowTime)

    # sort detections chronologically
    detectionsTemp.sort()

    #print(detectionsTemp)
    # save detections if they show up on desired number of components
    if len(detectionsTemp) > 0:
        for d in range(len(detectionsTemp) - numComp - 1):
            #print(detectionsTemp[d+numComp-1] - detectionsTemp[d])
            if detectionsTemp[d + numComp - 1] - detectionsTemp[d] < tolerance:
                detections.append(detectionsTemp[d])

    return detections
Beispiel #2
0
    def xcorr(self, data, template, detect_value=0.5):
        # pass_step = 0.1  # pass step in seconds

        # samples / seconds
        # 128 s     1 sec
        #  64 s   0.5 sec
        #  32 s  0.25 sec +
        #  16 s 0.125 sec

        detections, sims = correlation_detector(stream=data,
                                                templates=template,
                                                heights=detect_value,
                                                distance=self.corr_step,
                                                plot=None)
        if len(detections) != 0:
            # exclude self correlation
            detections = [v for v in detections if v['similarity'] < 0.999]
            # del detections[0]
        return detections, sims
Beispiel #3
0
    def test_correlate_stream_template_and_correlation_detector(self):
        template = read().filter('highpass', freq=5).normalize()
        pick = UTCDateTime('2009-08-24T00:20:07.73')
        template.trim(pick, pick + 10)
        n1 = len(template[0])
        n2 = 100 * 3600  # 1 hour
        dt = template[0].stats.delta
        # shift one template Trace
        template[1].stats.starttime += 5
        stream = template.copy()
        np.random.seed(42)
        for tr, trt in zip(stream, template):
            tr.stats.starttime += 24 * 3600
            tr.data = np.random.random(n2) - 0.5  # noise
            if tr.stats.channel[-1] == 'Z':
                tr.data[n1:2 * n1] += 10 * trt.data
                tr.data = tr.data[:-n1]
            tr.data[5 * n1:6 * n1] += 100 * trt.data
            tr.data[20 * n1:21 * n1] += 2 * trt.data
        # make one template trace a bit shorter
        template[2].data = template[2].data[:-n1 // 5]
        # make two stream traces a bit shorter
        stream[0].trim(5, None)
        stream[1].trim(1, 20)
        # second template
        pick2 = stream[0].stats.starttime + 20 * n1 * dt
        template2 = stream.slice(pick2 - 5, pick2 + 5)
        # test cross correlation
        stream_orig = stream.copy()
        template_orig = template.copy()
        ccs = correlate_stream_template(stream, template)
        self.assertEqual(len(ccs), len(stream))
        self.assertEqual(stream[1].stats.starttime, ccs[0].stats.starttime)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test if traces with not matching seed ids are discarded
        ccs = correlate_stream_template(stream[:2], template[1:])
        self.assertEqual(len(ccs), 1)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test template_time parameter
        ccs1 = correlate_stream_template(stream, template)
        template_time = template[0].stats.starttime + 100
        ccs2 = correlate_stream_template(stream,
                                         template,
                                         template_time=template_time)
        self.assertEqual(len(ccs2), len(ccs1))
        delta = ccs2[0].stats.starttime - ccs1[0].stats.starttime
        self.assertAlmostEqual(delta, 100)
        # test if all three events found
        detections, sims = correlation_detector(stream, template, 0.2, 30)
        self.assertEqual(len(detections), 3)
        dtime = pick + n1 * dt + 24 * 3600
        self.assertAlmostEqual(detections[0]['time'], dtime)
        self.assertEqual(len(sims), 1)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test if xcorr stream is suitable for coincidence_trigger
        # result should be the same, return values related
        ccs = correlate_stream_template(stream, template)
        triggers = coincidence_trigger(None,
                                       0.2,
                                       -1,
                                       ccs,
                                       2,
                                       max_trigger_length=30,
                                       details=True)
        self.assertEqual(len(triggers), 2)
        for d, t in zip(detections[1:], triggers):
            self.assertAlmostEqual(np.mean(t['cft_peaks']), d['similarity'])
        # test template_magnitudes
        detections, _ = correlation_detector(stream,
                                             template,
                                             0.2,
                                             30,
                                             template_magnitudes=1)
        self.assertAlmostEqual(detections[1]['amplitude_ratio'], 100, delta=1)
        self.assertAlmostEqual(detections[1]['magnitude'],
                               1 + 8 / 3,
                               delta=0.01)
        self.assertAlmostEqual(detections[2]['amplitude_ratio'], 2, delta=2)
        detections, _ = correlation_detector(stream,
                                             template,
                                             0.2,
                                             30,
                                             template_magnitudes=True)
        self.assertAlmostEqual(detections[1]['amplitude_ratio'], 100, delta=1)
        self.assertNotIn('magnitude', detections[1])
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test template names
        detections, _ = correlation_detector(stream,
                                             template,
                                             0.2,
                                             30,
                                             template_names='eq')
        self.assertEqual(detections[0]['template_name'], 'eq')
        detections, _ = correlation_detector(stream,
                                             template,
                                             0.2,
                                             30,
                                             template_names=['eq'],
                                             plot=True)
        self.assertEqual(detections[0]['template_name'], 'eq')

        # test similarity parameter with additional constraints
        # test details=True

        def simf(ccs):
            ccmatrix = np.array([tr.data for tr in ccs])
            comp_thres = np.sum(ccmatrix > 0.2, axis=0) > 1
            similarity = ccs[0].copy()
            similarity.data = np.mean(ccmatrix, axis=0) * comp_thres
            return similarity

        detections, _ = correlation_detector(stream,
                                             template,
                                             0.1,
                                             30,
                                             similarity_func=simf,
                                             details=True)
        self.assertEqual(len(detections), 2)
        for d in detections:
            self.assertAlmostEqual(np.mean(list(d['cc_values'].values())),
                                   d['similarity'])
        # test if properties from find_peaks function are returned
        detections, sims = correlation_detector(stream,
                                                template,
                                                0.1,
                                                30,
                                                threshold=0.16,
                                                details=True,
                                                similarity_func=simf)
        try:
            from scipy.signal import find_peaks  # noqa
        except ImportError:
            self.assertEqual(len(detections), 2)
            self.assertNotIn('left_threshold', detections[0])
        else:
            self.assertEqual(len(detections), 1)
            self.assertIn('left_threshold', detections[0])
        # also check the _find_peaks function
        distance = int(round(30 / sims[0].stats.delta))
        indices = _find_peaks(sims[0].data, 0.1, distance, distance)
        self.assertEqual(len(indices), 2)
        # test distance parameter
        detections, _ = correlation_detector(stream, template, 0.2, 500)
        self.assertEqual(len(detections), 1)
        # test more than one template
        # just 2 detections for first template, because second template has
        # a higher similarity for third detection
        templates = (template, template2)
        templatetime2 = pick2 - 10
        template_times = (template[0].stats.starttime, templatetime2)
        detections, _ = correlation_detector(stream,
                                             templates, (0.2, 0.3),
                                             30,
                                             plot=stream,
                                             template_times=template_times,
                                             template_magnitudes=(2, 5))
        self.assertGreater(len(detections), 0)
        self.assertIn('template_id', detections[0])
        detections0 = [d for d in detections if d['template_id'] == 0]
        self.assertEqual(len(detections0), 2)
        self.assertEqual(len(detections), 3)
        self.assertAlmostEqual(detections[2]['similarity'], 1)
        self.assertAlmostEqual(detections[2]['magnitude'], 5)
        self.assertEqual(detections[2]['time'], templatetime2)
        # test if everything is correct if template2 and stream do not have
        # any ids in common
        templates = (template, template2[2:])
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            detections, sims = correlation_detector(
                stream[:1],
                templates,
                0.2,
                30,
                plot=True,
                template_times=templatetime2,
                template_magnitudes=2)
        detections0 = [d for d in detections if d['template_id'] == 0]
        self.assertEqual(len(detections0), 3)
        self.assertEqual(len(detections), 3)
        self.assertEqual(len(sims), 2)
        self.assertIsInstance(sims[0], Trace)
        self.assertIs(sims[1], None)
def multiTemplateMatchDeprecated(path, stat, chans, tempLims, freqLow,
                                 threshLow, freqHigh, threshHigh, tolerance):

    # internal control over detection plotting
    plotting = False

    # define cross correlation search parameters
    distance = 10

    # make a couple useful list
    detectionArray = [[], [], []]
    allDetections = []

    # make vector of all filenames for a single channel
    fileMat = []
    filePath = "/media/Data/Data/PIG/MSEED/noIR/" + stat + "/" + chans[0] + "/*"
    files = glob.glob(filePath)
    files.sort()
    fileMat.extend(files)

    # specify a specific file (for testing)
    fileMat = [
        "/media/Data/Data/PIG/MSEED/noIR/PIG2/" + chans[0] + "/2012-05-22." +
        stat + "." + chans[0] + ".noIR.MSEED"
    ]

    # get templates for low and high frequency bands
    stTempLow = makeTemplates(path, stat, "H*", tempLims, freqLow)
    stTempHigh = makeTemplates(path, stat, "H*", tempLims, freqHigh)

    # loop through all files- we will replace channel string with wildcard as we go
    for f in range(len(fileMat)):

        timer = time.time()

        # get filename from full path
        fname = fileMat[f]

        # make filename with wildcard channel
        fname = fname.replace(chans[0], "H*")

        # pull out day string for user output
        day = fname.split("/")[9].split(".")[0]

        # read files and do basic preprocessing
        stRaw = obspy.read(fname)
        stRaw.detrend("demean")
        stRaw.detrend("linear")
        stRaw.taper(max_percentage=0.01, max_length=10.)

        # copy the file
        stLow = stRaw.copy()
        stHigh = stRaw.copy()

        # filter the data to each band
        stLow.filter("bandpass", freqmin=freqLow[0], freqmax=freqLow[1])
        stHigh.filter("bandpass", freqmin=freqHigh[0], freqmax=freqHigh[1])

        # call the template matching function in each band
        detectionsLow, sl = correlation_detector(stLow, stTempLow, threshLow,
                                                 distance)
        detectionsHigh, sh = correlation_detector(stHigh, stTempHigh,
                                                  threshHigh, distance)
        print(detectionsLow)
        print(detectionsHigh)
        # at this point, channel consolidation is roughly done
        # carry out multiband consolidation

        # get all high frequency times
        detHighTimes = []
        for i in range(len(detectionsHigh)):
            detHighTimes.append(detectionsHigh[i].get("time"))

        for i in range(len(detectionsLow)):
            detLowTime = detectionsLow[i].get("time")
            # calculate difference between current detection and all high frequency detections
            diffs = abs(np.subtract(detLowTime, detHighTimes))

            # save low frequency detections if minimum difference is less than threshold time
            if any(diffs) and min(diffs) < tolerance:
                allDetections.append(detLowTime)
        runtime = time.time() - timer
        print("Finished detections for " + day + " in " + str(runtime) +
              " seconds\n")

    # sort detections chronologically
    allDetections.sort()

    # get indices of redundant detections
    removeInd = []
    for d in range(len(allDetections) - 1):
        if allDetections[d + 1] - allDetections[d] < 60:
            removeInd.append(d + 1)

    # replace redundant detections with arbitrary placeholder
    for r in removeInd:
        allDetections[r] = obspy.UTCDateTime(0)

    # make final list of detections and fill with all values not equal to placeholder
    finalDetections = []
    finalDetections[:] = [
        x for x in allDetections if x != obspy.UTCDateTime(0)
    ]

    # give the user some output
    print(
        str(len(finalDetections)) + " detections found over " +
        str(len(fileMat[0])) + " days \n")

    if plotting:

        # load all traces into one stream for plotting
        stHigh += stLow
        stHigh += stRaw
        plotWinLen = tempLimsLow[1] - tempLimsLow[0]

        # plot each detection
        for d in range(len(finalDetections)):
            stHigh.plot(starttime=finalDetections[d],
                        endtime=finalDetections[d] + plotWinLen,
                        equal_scale=False)

    return finalDetections
Beispiel #5
0
def multiTemplateMatch(path, stat, chans, tempLimsLow, freqLow, threshLow,
                       tempLimsHigh, freqHigh, threshHigh, tolerance):

    # internal control over detection plotting
    plotting = False

    # make a couple useful list
    detectionArray = [[], [], []]
    allDetections = []

    for c in range(len(chans)):

        # make empty vector of detections
        detections = []

        # get templates for low and high frequency bands
        stTempLow = makeTemplates(path, stat, chans[c], tempLimsLow, freqLow)
        stTempHigh = makeTemplates(path, stat, chans[c], tempLimsHigh,
                                   freqHigh)
        #stTempLow.plot()

        # define cross correlation search parameters
        distance = 10

        # make vector of all filenames
        fileMat = []
        filePath = "/media/Data/Data/PIG/MSEED/noIR/" + stat + "/" + chans[
            c] + "/*"
        files = glob.glob(filePath)
        files.sort()
        fileMat.append(files)

        # specify a specific file (for testing)
        fileMat = [[
            "/media/Data/Data/PIG/MSEED/noIR/PIG2/" + chans[c] +
            "/2012-05-22." + stat + "." + chans[c] + ".noIR.MSEED"
        ]]

        # loop through all files
        for f in range(len(fileMat[0])):

            try:

                timer = time.time()

                # get filename from full path
                fname = fileMat[0][f]

                # pull out day string for output
                day = fname.split("/")[9].split(".")[0]

                # read file
                stLow = obspy.read(fname)

                # basic preprocessing
                stLow.detrend("demean")
                stLow.detrend("linear")
                stLow.taper(max_percentage=0.01, max_length=10.)

                # copy the file
                stHigh = stLow.copy()
                stRaw = stLow.copy()

                # filter the data to each band
                stLow.filter("bandpass",
                             freqmin=freqLow[0],
                             freqmax=freqLow[1])
                stHigh.filter("bandpass",
                              freqmin=freqHigh[0],
                              freqmax=freqHigh[1])

                # call the template matching function in each band
                detectionsLow, sl = correlation_detector(
                    stLow, stTempLow, threshLow, distance)
                detectionsHigh, sh = correlation_detector(
                    stHigh, stTempHigh, threshHigh, distance)

                print(len(detectionsLow))
                print(len(detectionsHigh))

                # extract time values from detections
                differences = np.zeros(
                    (len(detectionsLow), len(detectionsHigh)))

                # calculate detection time difference for each pair of detections
                for n in range(len(detectionsLow)):
                    for m in range(len(detectionsHigh)):
                        differences[n, m] = detectionsLow[n].get(
                            "time") - detectionsHigh[m].get("time")

                # replace pairs where low frequency detection is first with nan
                differences[differences < -1 * distance] = np.nan

                try:
                    # find closest low frequency detection for each high frequency detection
                    minDiffs = np.nanmin(differences, axis=0)

                    # keep if the time difference is less than user-inputted threshold
                    detectDiffs = np.where(minDiffs < tolerance, 1, 0)

                    # fill detection vector with times returned by high frequency template match
                    for d in range(len(detectDiffs)):
                        if detectDiffs[d] == 1:
                            detections.append(detectionsHigh[d].get("time"))
                    print("Found " + str(len(detections)) + " detections in " +
                          fname)
                except:
                    print("No detections for " + fname + "\n")

            except:
                print("Skipping " + fname + "\n")

        # fill array with detections from each file
        detectionArray[c].append(detections)

    # iterate through first two channels
    for c in range(len(chans) - 1):

        # iterate through list of detections from channel c
        for i in range(len(detectionArray[c][0])):

            # iterate through list of detections from channel c+1
            for j in range(len(detectionArray[c + 1][0])):

                # if both detections are closely spaced in time, add to list of all detections
                if np.abs(detectionArray[c][0][i] -
                          detectionArray[c + 1][0][j]) < tolerance:
                    allDetections.append(detectionArray[c][0][i])

            if c == 0:
                # iterate through list of detections from channel c+2 if c == 0 (compare first and third channel)
                for j in range(len(detectionArray[c + 2][0])):

                    # if both detections are closely spaced in time, add to list of all detections
                    if np.abs(detectionArray[c][0][i] -
                              detectionArray[c + 2][0][j]) < tolerance:
                        allDetections.append(detectionArray[c + 2][0][j])

        runtime = time.time() - timer
        print("Finished detections for " + fname + " in " + str(runtime) +
              " seconds\n")

    # sort detections chronologically
    allDetections.sort()

    # get indices of redundant detections
    removeInd = []
    for d in range(len(allDetections) - 1):
        if allDetections[d + 1] - allDetections[d] < 60:
            removeInd.append(d + 1)

    # replace redundant detections with arbitrary placeholder
    for r in removeInd:
        allDetections[r] = obspy.UTCDateTime(0)

    # make final list of detections and fill with all values not equal to placeholder
    finalDetections = []
    finalDetections[:] = [
        x for x in allDetections if x != obspy.UTCDateTime(0)
    ]

    # give the user some output
    print(
        str(len(finalDetections)) + " detections found over " +
        str(len(fileMat[0])) + " days \n")

    if plotting:

        # load all traces into one stream for plotting
        stHigh += stLow
        stHigh += stRaw
        plotWinLen = tempLimsLow[1] - tempLimsLow[0]

        # plot each detection
        for d in range(len(finalDetections)):
            stHigh.plot(starttime=finalDetections[d],
                        endtime=finalDetections[d] + plotWinLen,
                        equal_scale=False)

    return finalDetections
def multiTemplateMatch(path,stat,chans,tempLimsLow,freqLow,threshLow,tempLimsHigh,freqHigh,threshHigh):

    # make empty vector of detections
    detections = []

    # get templates for low and high frequency bands
    stTempLow = makeTemplates(path,stat,chans[c],tempLimsLow,freqLow)
    stTempHigh = makeTemplates(path,stat,chans[c],tempLimsHigh,freqHigh)

    # define cross correlation search parameters
    distance = 10
    tolerance = 10*60

    # make vector of all filenames
    fileMat = []
    filePath = "/media/Data/Data/PIG/MSEED/noIR/" + stat + "/" + chans[c] + "/*"
    files = glob.glob(filePath)
    files.sort()
    fileMat.append(files)

    # specify a specific file (for testing)
    fileMat = [["/media/Data/Data/PIG/MSEED/noIR/PIG2/" + chans[c] + "/2012-05-09." + stat + "." + chans[c] + ".noIR.MSEED"]]
                #"/media/Data/Data/PIG/MSEED/noIR/PIG2/HHZ/2012-08-21.PIG2.HHZ.noIR.MSEED",
                #"/media/Data/Data/PIG/MSEED/noIR/PIG2/HHZ/2012-11-10.PIG2.HHZ.noIR.MSEED"]]

    # loop through all files
    for f in range(len(fileMat[0])):

        # get filename from full path
        fname = fileMat[0][f]

        # pull out day string for output
        day = fname.split("/")[9].split(".")[0]

        # read file
        stLow = obspy.read(fname)

        # basic preprocessing
        stLow.detrend("demean")
        stLow.detrend("linear")
        stLow.taper(max_percentage=0.01, max_length=10.)

        # copy the file
        stHigh = stLow.copy()
        stRaw = stLow.copy()

        # filter the data to each band
        stLow.filter("bandpass",freqmin=freqLow[0],freqmax=freqLow[1])
        stHigh.filter("bandpass",freqmin=freqHigh[0],freqmax=freqHigh[1])

        # call the template matching function in each band
        detectionsLow,sl = correlation_detector(stLow,stTempLow,threshLow,distance)
        detectionsHigh,sh = correlation_detector(stHigh,stTempHigh,threshHigh,distance)

        # extract time values from detections
        differences = np.zeros((len(detectionsLow),len(detectionsHigh)))

        # calculate detection time difference for each pair of detections
        for n in range(len(detectionsLow)):
            for m in range(len(detectionsHigh)):
                differences[n,m] = detectionsLow[n].get("time")-detectionsHigh[m].get("time")

        # replace pairs where low frequency detection is first with nan
        differences[differences < -2] = np.nan

        # find closest low frequency detection for each high frequency detection
        minDiffs = np.nanmin(differences,axis=0)

        # keep if the time difference is less than user-inputted threshold
        detectDiffs = np.where(minDiffs < tolerance,1,0)

        # fill detection vector with times returned by high frequency template match
        for d in range(len(detectDiffs)):
            if detectDiffs[d] == 1:
                detections.append(detectionsHigh[d].get("time"))

    return detections
    def test_correlate_stream_template_and_correlation_detector(self):
        template = read().filter('highpass', freq=5).normalize()
        pick = UTCDateTime('2009-08-24T00:20:07.73')
        template.trim(pick, pick + 10)
        n1 = len(template[0])
        n2 = 100 * 3600  # 1 hour
        dt = template[0].stats.delta
        # shift one template Trace
        template[1].stats.starttime += 5
        stream = template.copy()
        np.random.seed(42)
        for tr, trt in zip(stream, template):
            tr.stats.starttime += 24 * 3600
            tr.data = np.random.random(n2) - 0.5  # noise
            if tr.stats.channel[-1] == 'Z':
                tr.data[n1:2*n1] += 10 * trt.data
                tr.data = tr.data[:-n1]
            tr.data[5*n1:6*n1] += 100 * trt.data
            tr.data[20*n1:21*n1] += 2 * trt.data
        # make one template trace a bit shorter
        template[2].data = template[2].data[:-n1 // 5]
        # make two stream traces a bit shorter
        stream[0].trim(5, None)
        stream[1].trim(1, 20)
        # second template
        pick2 = stream[0].stats.starttime + 20 * n1 * dt
        template2 = stream.slice(pick2 - 5, pick2 + 5)
        # test cross correlation
        stream_orig = stream.copy()
        template_orig = template.copy()
        ccs = correlate_stream_template(stream, template)
        self.assertEqual(len(ccs), len(stream))
        self.assertEqual(stream[1].stats.starttime, ccs[0].stats.starttime)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test if traces with not matching seed ids are discarded
        ccs = correlate_stream_template(stream[:2], template[1:])
        self.assertEqual(len(ccs), 1)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test template_time parameter
        ccs1 = correlate_stream_template(stream, template)
        template_time = template[0].stats.starttime + 100
        ccs2 = correlate_stream_template(stream, template,
                                         template_time=template_time)
        self.assertEqual(len(ccs2), len(ccs1))
        delta = ccs2[0].stats.starttime - ccs1[0].stats.starttime
        self.assertAlmostEqual(delta, 100)
        # test if all three events found
        detections, sims = correlation_detector(stream, template, 0.2, 30)
        self.assertEqual(len(detections), 3)
        dtime = pick + n1 * dt + 24 * 3600
        self.assertAlmostEqual(detections[0]['time'], dtime)
        self.assertEqual(len(sims), 1)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test if xcorr stream is suitable for coincidence_trigger
        # result should be the same, return values related
        ccs = correlate_stream_template(stream, template)
        triggers = coincidence_trigger(None, 0.2, -1, ccs, 2,
                                       max_trigger_length=30, details=True)
        self.assertEqual(len(triggers), 2)
        for d, t in zip(detections[1:], triggers):
            self.assertAlmostEqual(np.mean(t['cft_peaks']), d['similarity'])
        # test template_magnitudes
        detections, _ = correlation_detector(stream, template, 0.2, 30,
                                             template_magnitudes=1)
        self.assertAlmostEqual(detections[1]['amplitude_ratio'], 100, delta=1)
        self.assertAlmostEqual(detections[1]['magnitude'], 1 + 8 / 3,
                               delta=0.01)
        self.assertAlmostEqual(detections[2]['amplitude_ratio'], 2, delta=2)
        detections, _ = correlation_detector(stream, template, 0.2, 30,
                                             template_magnitudes=True)
        self.assertAlmostEqual(detections[1]['amplitude_ratio'], 100, delta=1)
        self.assertNotIn('magnitude', detections[1])
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test template names
        detections, _ = correlation_detector(stream, template, 0.2, 30,
                                             template_names='eq')
        self.assertEqual(detections[0]['template_name'], 'eq')
        detections, _ = correlation_detector(stream, template, 0.2, 30,
                                             template_names=['eq'], plot=True)
        self.assertEqual(detections[0]['template_name'], 'eq')
        # test similarity parameter with additional constraints
        # test details=True

        def simf(ccs):
            ccmatrix = np.array([tr.data for tr in ccs])
            comp_thres = np.sum(ccmatrix > 0.2, axis=0) > 1
            similarity = ccs[0].copy()
            similarity.data = np.mean(ccmatrix, axis=0) * comp_thres
            return similarity
        detections, _ = correlation_detector(stream, template, 0.1, 30,
                                             similarity_func=simf,
                                             details=True)
        self.assertEqual(len(detections), 2)
        for d in detections:
            self.assertAlmostEqual(np.mean(list(d['cc_values'].values())),
                                   d['similarity'])
        # test if properties from find_peaks function are returned
        detections, sims = correlation_detector(stream, template, 0.1, 30,
                                                threshold=0.16, details=True,
                                                similarity_func=simf)
        try:
            from scipy.signal import find_peaks  # noqa
        except ImportError:
            self.assertEqual(len(detections), 2)
            self.assertNotIn('left_threshold', detections[0])
        else:
            self.assertEqual(len(detections), 1)
            self.assertIn('left_threshold', detections[0])
        # also check the _find_peaks function
        distance = int(round(30 / sims[0].stats.delta))
        indices = _find_peaks(sims[0].data, 0.1, distance, distance)
        self.assertEqual(len(indices), 2)
        # test distance parameter
        detections, _ = correlation_detector(stream, template, 0.2, 500)
        self.assertEqual(len(detections), 1)
        # test more than one template
        # just 2 detections for first template, because second template has
        # a higher similarity for third detection
        templates = (template, template2)
        templatetime2 = pick2 - 10
        template_times = (template[0].stats.starttime, templatetime2)
        detections, _ = correlation_detector(stream, templates, (0.2, 0.3), 30,
                                             plot=stream,
                                             template_times=template_times,
                                             template_magnitudes=(2, 5))
        self.assertGreater(len(detections), 0)
        self.assertIn('template_id', detections[0])
        detections0 = [d for d in detections if d['template_id'] == 0]
        self.assertEqual(len(detections0), 2)
        self.assertEqual(len(detections), 3)
        self.assertAlmostEqual(detections[2]['similarity'], 1)
        self.assertAlmostEqual(detections[2]['magnitude'], 5)
        self.assertEqual(detections[2]['time'], templatetime2)
        # test if everything is correct if template2 and stream do not have
        # any ids in common
        templates = (template, template2[2:])
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            detections, sims = correlation_detector(
                stream[:1], templates, 0.2, 30, plot=True,
                template_times=templatetime2, template_magnitudes=2)
        detections0 = [d for d in detections if d['template_id'] == 0]
        self.assertEqual(len(detections0), 3)
        self.assertEqual(len(detections), 3)
        self.assertEqual(len(sims), 2)
        self.assertIsInstance(sims[0], Trace)
        self.assertIs(sims[1], None)