예제 #1
0
    def waveletSegment(self, filtnum, wpmode="new"):
        """ Main analysis wrapper (segmentation in batch mode).
            Also do species-specific post-processing.
            Reads data pre-loaded onto self.WF.tree by self.readBatch.
            Args:
            1. filtnum: index of the current filter in self.spInfo (which is a list of filters...)
            2. wpmode: old/new/aa to indicate no/partial/full antialias
            Returns: list of lists of segments found (over each subfilter)-->[[sub-filter1 segments], [sub-filter2 segments]]
        """
        opst = time.time()

        # No resampling here. Will read nodes from self.spInfo, which may already be adjusted

        ### find segments with each subfilter separately
        detected_allsubf = []
        for subfilter in self.spInfo[filtnum]["Filters"]:
            print("Identifying calls using subfilter", subfilter["calltype"])
            goodnodes = subfilter['WaveletParams']["nodes"]

            detected = self.detectCalls(self.WF, nodelist=goodnodes, subfilter=subfilter, rf=True, aa=wpmode!="old")

            # merge neighbours in order to convert the detections into segments
            # note: detected np[0 1 1 1] becomes [[1,3]]
            segmenter = Segment.Segmenter()
            detected = segmenter.convert01(detected)
            detected = segmenter.joinGaps(detected, maxgap=0)
            detected_allsubf.append(detected)
        print("Wavelet segmenting completed in", time.time() - opst)
        return detected_allsubf
예제 #2
0
파일: CNN.py 프로젝트: smarsland/AviaNZ
    def findAllsegments(self, dirName):
        ''' dirName got manually annotated GT.data
        Generates noise segments as the complement to GT segments
        (i.e. every not marked second is used as noise)
        :returns noise segments [[filename, seg, label], ...]
        '''
        manSegNum = 0
        noiseSegments = []
        segmenter = Segment.Segmenter()
        print('Generating GT...')
        for root, dirs, files in os.walk(dirName):
            for file in files:
                wavFile = os.path.join(root, file)
                if file.lower().endswith('.wav') and os.stat(
                        wavFile).st_size != 0 and file + '.data' in files:
                    # Generate GT files from annotations in dir1
                    segments = Segment.SegmentList()
                    segments.parseJSON(wavFile + '.data')
                    sppSegments = segments.getSpecies(self.species)
                    manSegNum += len(sppSegments)

                    # Currently, we ignore call types here and just
                    # look for all calls for the target species.
                    segments.exportGT(wavFile, self.species, resolution=1.0)

                    print('Determining noise...')
                    autoseg = Segment.SegmentList()
                    for sec in range(
                            math.floor(segments.metadata["Duration"]) - 1):
                        autoseg.addSegment([sec, sec + 1, 0, 0, []])
                    autoSegments = segmenter.joinGaps(autoseg, maxgap=0)

                    print("autoSeg, file", wavFile, autoSegments)
                    for segAuto in autoSegments:
                        noiseSegments.append(
                            [wavFile, segAuto,
                             len(self.calltypes)])

        if manSegNum == 0:
            print("ERROR: no segments for species %s found" % self.species)
            return

        return noiseSegments
예제 #3
0
def testMC():
    import wavio
    import pyqtgraph as pg
    from pyqtgraph.Qt import QtCore, QtGui

    #wavobj = wavio.read('Sound Files/kiwi_1min.wav')
    wavobj = wavio.read('Sound Files/tril1.wav')
    fs = wavobj.rate
    data = wavobj.data  #[:20*fs]

    if data.dtype is not 'float':
        data = data.astype('float')  #/ 32768.0

    if np.shape(np.shape(data))[0] > 1:
        data = data[:, 0]

    import SignalProc
    sp = SignalProc.SignalProc(data, fs, 256, 128)
    sg = sp.spectrogram(data=data,
                        window_width=256,
                        incr=128,
                        window='Hann',
                        mean_normalise=True,
                        onesided=True,
                        multitaper=False,
                        need_even=False)
    s = Segment.Segmenter(data, sg, sp, fs)

    #print np.shape(sg)

    #s1 = s.medianClip()
    s1, p, t = s.yin(returnSegs=True)
    app = QtGui.QApplication([])

    mw = QtGui.QMainWindow()
    mw.show()
    mw.resize(800, 600)

    win = pg.GraphicsLayoutWidget()
    mw.setCentralWidget(win)
    vb1 = win.addViewBox(enableMouse=False, enableMenu=False, row=0, col=0)
    im1 = pg.ImageItem(enableMouse=False)
    vb1.addItem(im1)
    im1.setImage(10. * np.log10(sg))

    # vb2 = win.addViewBox(enableMouse=False, enableMenu=False, row=1, col=0)
    # im2 = pg.ImageItem(enableMouse=False)
    # vb2.addItem(im2)
    # im2.setImage(c)

    vb3 = win.addViewBox(enableMouse=False, enableMenu=False, row=1, col=0)
    im3 = pg.ImageItem(enableMouse=False)
    vb3.addItem(im3)
    im3.setImage(10. * np.log10(sg))

    vb4 = win.addViewBox(enableMouse=False, enableMenu=False, row=2, col=0)
    im4 = pg.PlotDataItem(enableMouse=False)
    vb4.addItem(im4)
    im4.setData(data)

    for seg in s1:
        a = pg.LinearRegionItem()
        a.setRegion([
            convertAmpltoSpec(seg[0], fs, 128),
            convertAmpltoSpec(seg[1], fs, 128)
        ])
        #a.setRegion([seg[0],seg[1]])
        vb3.addItem(a, ignoreBounds=True)

    QtGui.QApplication.instance().exec_()
예제 #4
0
def cluster_by_dist(dir,
                    feature='we',
                    n_mels=24,
                    fs=0,
                    minlen=0.2,
                    f_1=0,
                    f_2=0,
                    denoise=False,
                    single=False,
                    distance='dtw',
                    max_clusters=10):
    """
    Given wav + annotation files,
        1) identify syllables using median clipping/ FIR
        2) generate features WE/MFCC/chroma
        3) calculate DTW distances and decide class/ generate new class
    :param dir: directory of audio and annotations
    :param feature: 'WE' or 'MFCC' or 'chroma'
    :param n_mels: number of mel coefs for MFCC
    :param fs: prefered sampling frequency, 0 leads to calculate it from the anotations
    :param minlen: min syllable length in secs
    :param f_1: lower frequency bound, 0 leads to calculate it from the anotations
    :param f_2: upper frequency bound, 0 leads to calculate it from the anotations
    :param denoise: wavelet denoise
    :param single: True means when there are multiple syllables in a segment, add only one syllable to the cluster info
    :param distance: 'dtw' or 'xcor'
    :return: possible clusters
    """
    import Segment
    import SignalProc
    from scipy import signal

    # Get flow and fhigh for bandpass from annotations
    lowlist = []
    highlist = []
    srlist = []
    for root, dirs, files in os.walk(str(dir)):
        for file in files:
            if file.endswith('.wav') and file + '.data' in files:
                wavobj = wavio.read(os.path.join(root, file))
                srlist.append(wavobj.rate)
                # Read the annotation
                segments = Segment.SegmentList()
                segments.parseJSON(os.path.join(root, file + '.data'))
                for seg in segments:
                    lowlist.append(seg[2])
                    highlist.append(seg[3])
    print(lowlist)
    print(highlist)
    print(srlist)
    if f_1 == 0:
        f_1 = np.min(lowlist)
    if f_2 == 0:
        f_2 = np.median(highlist)

    if fs == 0:
        arr = [4000, 8000, 16000]
        pos = np.abs(arr - np.median(highlist) * 2).argmin()
        fs = arr[pos]

    print('fs: ', fs)

    if fs > np.min(srlist):
        print(fs)
        fs = np.min(srlist)

    if fs < f_2 * 2 + 50:
        f_2 = fs // 2 - 50

    minlen_samples = minlen * fs

    print('Frequency band:', f_1, '-', f_2)
    print('fs: ', fs)

    # Find the lower and upper bounds (relevant to the frq range), when the range is given
    if feature == 'mfcc' and f_1 != 0 and f_2 != 0:
        mels = librosa.core.mel_frequencies(n_mels=n_mels,
                                            fmin=0.0,
                                            fmax=fs / 2,
                                            htk=False)
        ind_flow = (np.abs(mels - f_1)).argmin()
        ind_fhigh = (np.abs(mels - f_2)).argmin()

    elif feature == 'we' and f_1 != 0 and f_2 != 0:
        linear = np.linspace(0, fs / 2, 62)
        ind_flow = (np.abs(linear - f_1)).argmin()
        ind_fhigh = (np.abs(linear - f_2)).argmin()

    # Ready for clustering
    max_clusters = max_clusters
    n_clusters = 0
    clusters = []
    for root, dirs, files in os.walk(str(dir)):
        for file in files:
            if file.endswith('.wav') and file + '.data' in files:
                # Read the annotation
                segments = Segment.SegmentList()
                segments.parseJSON(os.path.join(root, file + '.data'))

                # Sort the segments longest to shortest, would be a good idea to avoid making first class with only
                # one member :)
                if len(segments) > 0 and segments[0][0] == -1:
                    del segments[0]
                segments_len = [seg[1] - seg[0] for seg in segments]
                inds = np.argsort(segments_len)[::-1]
                sortedsegments = [segments[i] for i in inds]

                # Now find syllables within each segment, median clipping
                for seg in sortedsegments:
                    if seg[0] == -1:
                        continue
                    audiodata, sr = loadFile(filename=os.path.join(root, file),
                                             duration=seg[1] - seg[0],
                                             offset=seg[0],
                                             fs=fs,
                                             denoise=denoise,
                                             f1=f_1,
                                             f2=f_2)
                    start = int(seg[0] * fs)
                    sp = SignalProc.SignalProc(audiodata, fs, 256, 128)
                    sgRaw = sp.spectrogram(audiodata, 256, 128)
                    segment = Segment.Segmenter(data=audiodata,
                                                sg=sgRaw,
                                                sp=sp,
                                                fs=fs,
                                                window_width=256,
                                                incr=128)
                    syls = segment.medianClip(thr=3,
                                              medfiltersize=5,
                                              minaxislength=9,
                                              minSegment=50)
                    if len(syls) == 0:  # Try again with FIR
                        syls = segment.segmentByFIR(threshold=0.05)
                    syls = segment.checkSegmentOverlap(
                        syls)  # merge overlapped segments
                    syls = [[int(s[0] * sr), int(s[1] * fs)] for s in syls]

                    if len(
                            syls
                    ) == 0:  # Sanity check, when annotating syllables tight,
                        syls = [[0, int((seg[1] - seg[0]) * fs)]
                                ]  # median clipping doesn't detect it.
                    if len(syls) > 1:
                        # TODO: samples to seconds
                        syls = segment.joinGaps(
                            syls, minlen_samples)  # Merge short segments
                    if len(syls) == 1 and syls[0][1] - syls[0][
                            0] < minlen_samples:  # Sanity check
                        syls = [[0, int((seg[1] - seg[0]) * fs)]]
                    temp = [[
                        np.round((x[0] + start) / fs, 2),
                        np.round((x[1] + start) / fs, 2)
                    ] for x in syls]
                    print('\nCurrent:', seg, '--> syllables >', minlen,
                          'secs ', temp)

                    # Calculate features of the syllables in the current segment.
                    f = []
                    for s in syls:
                        data = audiodata[s[0]:s[1]]
                        if feature == 'mfcc':  # MFCC
                            mfcc = librosa.feature.mfcc(y=data,
                                                        sr=fs,
                                                        n_mfcc=n_mels)
                            if f_1 != 0 and f_2 != 0:
                                mfcc = mfcc[
                                    ind_flow:
                                    ind_fhigh, :]  # Limit the frequency to the fixed range [f_1, f_2]
                            mfcc_delta = librosa.feature.delta(mfcc,
                                                               mode='nearest')
                            mfcc = np.concatenate((mfcc, mfcc_delta), axis=0)
                            mfcc = scale(mfcc, axis=1)
                            # librosa.display.specshow(mfcc, sr=fs, x_axis='time')
                            # m = [i for sublist in mfcc for i in sublist]
                            f.append(mfcc)

                        elif feature == 'we':  # Wavelet Energy
                            ws = WaveletSegment.WaveletSegment(spInfo=[])
                            we = ws.computeWaveletEnergy(data=data,
                                                         sampleRate=fs,
                                                         nlevels=5,
                                                         wpmode='new')
                            we = we.mean(axis=1)
                            if f_1 != 0 and f_2 != 0:
                                we = we[
                                    ind_flow:
                                    ind_fhigh]  # Limit the frequency to a fixed range f_1, f_2
                            f.append(we)
                        elif feature == 'chroma':
                            chroma = librosa.feature.chroma_cqt(y=data, sr=fs)
                            # chroma = librosa.feature.chroma_stft(y=data, sr=fs)
                            chroma = scale(chroma, axis=1)
                            f.append(chroma)

                    matched = False
                    if n_clusters == 0:
                        print('**Case 1: First class')
                        newclass = class_create(label=n_clusters,
                                                syl=syls,
                                                features=f,
                                                f_low=seg[2],
                                                f_high=seg[3],
                                                segs=[
                                                    (os.path.join(root,
                                                                  file), seg)
                                                ],
                                                single=single,
                                                dist_method=distance)
                        clusters.append(newclass)
                        n_clusters += 1
                        print('Created new class: Class ', "'",
                              newclass["label"], "'", ',\tIn-class_d: ',
                              newclass["d"], '\tf_low: ', newclass["f_low"],
                              '\tf_high: ', newclass["f_high"])
                        matched = True
                    if not matched:
                        # See if the syllables in the current seg match with any existing class
                        min_ds = [
                        ]  # Keep track of the minimum distances to each class
                        clusters = random.sample(clusters, len(
                            clusters))  # Shuffle the clusters to avoid bias
                        for c in range(len(clusters)):
                            f_c = clusters[c][
                                "features"]  # features of the current class c
                            dist_c = np.zeros(
                                (len(f_c),
                                 len(f)))  # distances to the current class c
                            for i in range(len(f_c)):
                                for j in range(len(f)):
                                    if distance == 'dtw':
                                        d, _ = librosa.sequence.dtw(
                                            f_c[i], f[j], metric='euclidean')
                                        dist_c[i, j] = d[d.shape[0] -
                                                         1][d.shape[1] - 1]
                                    elif distance == 'xcor':
                                        corr = signal.correlate(f_c[i],
                                                                f[j],
                                                                mode='full')
                                        dist_c[i, j] = np.sum(corr) / max(
                                            len(f_c[i]), len(f[j]))

                            # Min distance to the current class
                            print('Distance to Class ', clusters[c]["label"],
                                  ': ', np.amin(dist_c[dist_c != 0]),
                                  '( In-class distance: ', clusters[c]["d"],
                                  ')')
                            min_ds.append(np.amin(dist_c[dist_c != 0]))

                        # Now get the clusters sorted according to the min dist
                        ind = np.argsort(min_ds)
                        min_ds = np.sort(min_ds)
                        # make the cluster order
                        clusters = [clusters[i] for i in ind]
                        for c in range(len(clusters)):
                            if (clusters[c]["d"] != 0) and min_ds[c] < (
                                    clusters[c]["d"] + clusters[c]["d"] * 0.1):
                                print(
                                    '**Case 2: Found a match with a class > one syllable'
                                )
                                print('Class ', clusters[c]["label"],
                                      ', dist ', min_ds[c])
                                # Update this class
                                clusters[c] = class_update(
                                    cluster=clusters[c],
                                    newfeatures=f,
                                    newf_low=seg[2],
                                    newf_high=seg[3],
                                    newsyl=syls,
                                    newseg=(os.path.join(root, file), seg),
                                    single=single,
                                    dist_method=distance)
                                matched = True
                                break  # found a match, exit from the for loop, go to the next segment

                            elif c < len(clusters) - 1:
                                continue  # continue to the next class

                    # Checked most of the classes by now, if still no match found, check the classes with only one
                    # data point (clusters[c]["d"] == 0).
                    # Note the arbitrary thr.
                    if not matched:
                        if distance == 'dtw':
                            thr = 25
                        elif distance == 'xcor':
                            thr = 1000
                        for c in range(len(clusters)):
                            if clusters[c]["d"] == 0 and min_ds[c] < thr:
                                print('**Case 3: In-class dist of ',
                                      clusters[c]["label"], '=',
                                      clusters[c]["d"], 'and this example < ',
                                      thr, ' dist')
                                print('Class ', clusters[c]["label"],
                                      ', dist ', min_ds[c])
                                # Update this class
                                clusters[c] = class_update(
                                    cluster=clusters[c],
                                    newfeatures=f,
                                    newf_low=seg[2],
                                    newf_high=seg[3],
                                    newsyl=syls,
                                    newseg=(os.path.join(root, file), seg),
                                    single=single,
                                    dist_method=distance)
                                matched = True
                                break  # Break the search and go to the next segment

                    # If no match found yet, check the max clusters
                    if not matched:
                        if n_clusters == max_clusters:
                            print(
                                '**Case 4: Reached max classes, therefore adding current seg to the closest '
                                'class... ')
                            # min_ind = np.argmin(min_ds)
                            # classes are sorted in ascending order of distance already
                            for c in range(len(clusters)):
                                if min_ds[c] <= 4 * clusters[c][
                                        "d"] or clusters[c]["d"] == 0:
                                    print('Class ', clusters[c]["label"],
                                          ', dist ', min_ds[c],
                                          '(in-class distance:',
                                          clusters[c]["d"], ')')
                                    # Update this class
                                    clusters[c] = class_update(
                                        cluster=clusters[c],
                                        newfeatures=f,
                                        newf_low=seg[2],
                                        newf_high=seg[3],
                                        newsyl=syls,
                                        newseg=(os.path.join(root, file), seg),
                                        single=single,
                                        dist_method=distance)
                                    matched = True
                                    break
                            if not matched:
                                print('Class ', clusters[0]["label"],
                                      ', dist ', min_ds[0],
                                      '(in-class distance:', clusters[0]["d"],
                                      ')')
                                # Update this class
                                # TODO: don't update the class as it is an outlier?
                                clusters[0] = class_update(
                                    cluster=clusters[0],
                                    newfeatures=f,
                                    newf_low=seg[2],
                                    newf_high=seg[3],
                                    newsyl=syls,
                                    newseg=(os.path.join(root, file), seg),
                                    single=single,
                                    dist_method=distance)
                                matched = True
                            continue  # Continue to next segment

                    #  If still no luck, create a new class
                    if not matched:
                        print('**Case 5: None of Case 1-4')
                        newclass = class_create(label=n_clusters,
                                                syl=syls,
                                                features=f,
                                                f_low=seg[2],
                                                f_high=seg[3],
                                                segs=[
                                                    (os.path.join(root,
                                                                  file), seg)
                                                ],
                                                single=single,
                                                dist_method=distance)
                        print('Created a new class: Class ', n_clusters + 1)
                        clusters.append(newclass)
                        n_clusters += 1
                        print('Created new class: Class ', "'",
                              newclass["label"], "'", ',\tin-class_d: ',
                              newclass["d"], '\tf_low: ', newclass["f_low"],
                              '\tf_high: ', newclass["f_high"])

    print('\n\n--------------Clusters created-------------------')
    clustered_segs = []
    for c in range(len(clusters)):
        print('Class ', clusters[c]['label'], ': ', len(clusters[c]['segs']))
        for s in range(len(clusters[c]['segs'])):
            print('\t', clusters[c]['segs'][s])
            if single:
                clustered_segs.append([
                    clusters[c]['segs'][s][0], clusters[c]['segs'][s][1],
                    [clusters[c]['features'][s]], clusters[c]['label']
                ])
            else:
                clustered_segs.append([
                    clusters[c]['segs'][s][0], clusters[c]['segs'][s][1],
                    clusters[c]['label']
                ])

    # Clustered segments
    print(
        '\n\n################### Clustered segments ############################'
    )
    for s in clustered_segs:
        print(s)
    return clustered_segs, fs, n_clusters
예제 #5
0
                        else:
                            continue

                    audiodata = loadFile(filename=os.path.join(root, file),
                                         duration=seg[1] - seg[0],
                                         offset=seg[0],
                                         fs=fs,
                                         denoise=False)
                    # minlen = minlen * fs
                    start = seg[0]
                    # start = int(seg[0] * fs)
                    sp = SignalProc.SignalProc(256, 128)
                    sp.data = audiodata
                    sp.sampleRate = fs
                    _ = sp.spectrogram(256, 128)
                    segment = Segment.Segmenter(sp, fs)
                    syls = segment.medianClip(thr=3,
                                              medfiltersize=5,
                                              minaxislength=9,
                                              minSegment=50)
                    if len(syls) == 0:  # Sanity check
                        segment = Segment.Segmenter(sp, fs)
                        syls = segment.medianClip(thr=2,
                                                  medfiltersize=5,
                                                  minaxislength=9,
                                                  minSegment=50)
                    syls = segment.checkSegmentOverlap(
                        syls)  # merge overlapped segments
                    #syls = segment.joinGaps(syls, minlen)
                    # syls = [[int(s[0] * fs) + start, int(s[1] * fs + start)] for s in syls]
                    syls = [[s[0] + start, s[1] + start] for s in syls]
예제 #6
0
    def detectFile(self, speciesStr, filters):
        """ Actual worker for a file in the detection loop.
            Does not return anything - for use with external try/catch
        """
        # Segment over pages separately, to allow dealing with large files smoothly:
        # TODO: page size fixed for now
        samplesInPage = 900 * 16000
        # (ceil division for large integers)
        numPages = (self.datalength - 1) // samplesInPage + 1

        # Actual segmentation happens here:
        for page in range(numPages):
            print("Segmenting page %d / %d" % (page + 1, numPages))
            start = page * samplesInPage
            end = min(start + samplesInPage, self.datalength)
            thisPageLen = (end - start) / self.sampleRate

            if thisPageLen < 2 and self.method != "Click":
                print("Warning: can't process short file ends (%.2f s)" %
                      thisPageLen)
                continue

            # Process
            if speciesStr == "Any sound":
                # Create spectrogram for median clipping etc
                if not hasattr(self, 'sp'):
                    self.sp = SignalProc.SignalProc(
                        self.config['window_width'], self.config['incr'])
                self.sp.data = self.audiodata[start:end]
                self.sp.sampleRate = self.sampleRate
                _ = self.sp.spectrogram(window='Hann',
                                        mean_normalise=True,
                                        onesided=True,
                                        multitaper=False,
                                        need_even=False)
                self.seg = Segment.Segmenter(self.sp, self.sampleRate)
                # thisPageSegs = self.seg.bestSegments()
                thisPageSegs = self.seg.medianClip(thr=3.5)
                # Post-process
                # 1. Delete windy segments
                # 2. Merge neighbours
                # 3. Delete short segments
                print("Segments detected: ", len(thisPageSegs))
                print("Post-processing...")
                maxgap = int(self.maxgap.value()) / 1000
                minlen = int(self.minlen.value()) / 1000
                maxlen = int(self.maxlen.value()) / 1000
                post = Segment.PostProcess(configdir=self.configdir,
                                           audioData=self.audiodata[start:end],
                                           sampleRate=self.sampleRate,
                                           segments=thisPageSegs,
                                           subfilter={},
                                           cert=0)
                if self.wind:
                    post.wind()
                post.joinGaps(maxgap)
                post.deleteShort(minlen)
                # avoid extra long segments (for Isabel)
                post.splitLong(maxlen)

                # adjust segment starts for 15min "pages"
                if start != 0:
                    for seg in post.segments:
                        seg[0][0] += start / self.sampleRate
                        seg[0][1] += start / self.sampleRate
                # attach mandatory "Don't Know"s etc and put on self.segments
                self.makeSegments(post.segments)
                del self.seg
                gc.collect()
            else:
                if self.method != "Click":
                    # read in the page and resample as needed
                    self.ws.readBatch(self.audiodata[start:end],
                                      self.sampleRate,
                                      d=False,
                                      spInfo=filters,
                                      wpmode="new")

                data_test = []
                click_label = 'None'
                for speciesix in range(len(filters)):
                    print("Working with recogniser:", filters[speciesix])
                    if self.method != "Click":
                        # note: using 'recaa' mode = partial antialias
                        thisPageSegs = self.ws.waveletSegment(speciesix,
                                                              wpmode="new")
                    else:
                        click_label, data_test, gen_spec = self.ClickSearch(
                            self.sp.sg, self.filename)
                        print('number of detected clicks = ', gen_spec)
                        thisPageSegs = []

                    # Post-process:
                    # CNN-classify, delete windy, rainy segments, check for FundFreq, merge gaps etc.
                    print("Segments detected (all subfilters): ", thisPageSegs)
                    if not self.testmode:
                        print("Post-processing...")
                    # postProcess currently operates on single-level list of segments,
                    # so we run it over subfilters for wavelets:
                    spInfo = filters[speciesix]
                    for filtix in range(len(spInfo['Filters'])):
                        if not self.testmode:
                            # TODO THIS IS FULL POST-PROC PIPELINE FOR BIRDS AND BATS
                            # -- Need to check how this should interact with the testmode
                            CNNmodel = None
                            if 'CNN' in spInfo:
                                if spInfo['CNN'][
                                        'CNN_name'] in self.CNNDicts.keys():
                                    # This list contains the model itself, plus parameters for running it
                                    CNNmodel = self.CNNDicts[spInfo['CNN']
                                                             ['CNN_name']]

                            if self.method == "Click":
                                # bat-style CNN:
                                model = CNNmodel[0]
                                thr1 = CNNmodel[5][0]
                                thr2 = CNNmodel[5][1]
                                if click_label == 'Click':
                                    # we enter in the cnn only if we got a click
                                    sg_test = np.ndarray(
                                        shape=(np.shape(data_test)[0],
                                               np.shape(data_test[0][0])[0],
                                               np.shape(data_test[0][0])[1]),
                                        dtype=float)
                                    spec_id = []
                                    print('Number of file spectrograms = ',
                                          np.shape(data_test)[0])
                                    for j in range(np.shape(data_test)[0]):
                                        maxg = np.max(data_test[j][0][:])
                                        sg_test[
                                            j][:] = data_test[j][0][:] / maxg
                                        spec_id.append(data_test[j][1:3])

                                    # CNN classification of clicks
                                    x_test = sg_test
                                    test_images = x_test.reshape(
                                        x_test.shape[0], 6, 512, 1)
                                    test_images = test_images.astype('float32')

                                    # recovering labels
                                    predictions = model.predict(test_images)
                                    # predictions is an array #imagesX #of classes which entries are the probabilities for each class

                                    # Create a label (list of dicts with species, certs) for the single segment
                                    print('Assessing file label...')
                                    label = self.File_label(predictions,
                                                            thr1=thr1,
                                                            thr2=thr2)
                                    print('CNN detected: ', label)
                                    if len(label) > 0:
                                        # Convert the annotation into a full segment in self.segments
                                        thisPageStart = start / self.sampleRate
                                        self.makeSegments([
                                            thisPageStart, thisPageLen, label
                                        ])
                                else:
                                    # do not create any segments
                                    print("Nothing detected")
                            else:
                                # bird-style CNN and other processing:
                                post = Segment.PostProcess(
                                    configdir=self.configdir,
                                    audioData=self.audiodata[start:end],
                                    sampleRate=self.sampleRate,
                                    tgtsampleRate=spInfo["SampleRate"],
                                    segments=thisPageSegs[filtix],
                                    subfilter=spInfo['Filters'][filtix],
                                    CNNmodel=CNNmodel,
                                    cert=50)
                                print("Segments detected after WF: ",
                                      len(thisPageSegs[filtix]))
                                if self.wind and self.useWindF(
                                        spInfo['Filters'][filtix]['FreqRange']
                                    [0], spInfo['Filters'][filtix]['FreqRange']
                                    [1]):
                                    post.wind()

                                if CNNmodel:
                                    print('Post-processing with CNN')
                                    post.CNN()
                                if 'F0' in spInfo['Filters'][
                                        filtix] and 'F0Range' in spInfo[
                                            'Filters'][filtix]:
                                    if spInfo['Filters'][filtix]["F0"]:
                                        print(
                                            "Checking for fundamental frequency..."
                                        )
                                        post.fundamentalFrq()

                                post.joinGaps(maxgap=spInfo['Filters'][filtix]
                                              ['TimeRange'][3])
                                post.deleteShort(minlength=spInfo['Filters']
                                                 [filtix]['TimeRange'][0])

                                # adjust segment starts for 15min "pages"
                                if start != 0:
                                    for seg in post.segments:
                                        seg[0][0] += start / self.sampleRate
                                        seg[0][1] += start / self.sampleRate
                                # attach filter info and put on self.segments:
                                self.makeSegments(post.segments,
                                                  self.species[speciesix],
                                                  spInfo["species"],
                                                  spInfo['Filters'][filtix])
                        else:
                            # TODO: THIS IS testmode. NOT USING ANY BAT STUFF THEN
                            # I.E. testmode not adapted to bats
                            post = Segment.PostProcess(
                                configdir=self.configdir,
                                audioData=self.audiodata[start:end],
                                sampleRate=self.sampleRate,
                                tgtsampleRate=spInfo["SampleRate"],
                                segments=thisPageSegs[filtix],
                                subfilter=spInfo['Filters'][filtix],
                                CNNmodel=None,
                                cert=50)
                            # adjust segment starts for 15min "pages"
                            if start != 0:
                                for seg in post.segments:
                                    seg[0][0] += start / self.sampleRate
                                    seg[0][1] += start / self.sampleRate
                            # attach filter info and put on self.segments:
                            self.makeSegments(post.segments,
                                              self.species[speciesix],
                                              spInfo["species"],
                                              spInfo['Filters'][filtix])