Exemple #1
0
def sum_streams(stream1, stream2):
    """Add two sample streams together
    
    The time elements of each stream will not be aligned if they do not match.
    Instead the time values from stream1 are used for the result. The iterator
    terminates when either of the two streams ends.
    
    This is a generator function.
    
    stream1 (iterable of SampleChunk objects)
    stream2 (iterable of SampleChunk objects)
        The two sample streams to have their corresponding values added together.
        
    Yields a sample stream.
    """

    ex1 = ChunkExtractor(stream1)
    ex2 = ChunkExtractor(stream2)

    while True:
        c1 = ex1.next_chunk()
        c2 = ex2.next_chunk()

        if c1 is None or c2 is None:
            break

        if len(c1.samples) != len(c2.samples):
            size = min(len(c1.samples), len(c2.samples))
            filt = c1.samples[:size] + c2.samples[:size]
        else:
            filt = c1.samples + c2.samples

        yield SampleChunk(filt, c1.start_time, c1.sample_period)
def sum_streams(stream1, stream2):
    '''Add two sample streams together
    
    The time elements of each stream will not be aligned if they do not match.
    Instead the time values from stream1 are used for the result. The iterator
    terminates when either of the two streams ends.
    
    This is a generator function.
    
    stream1 (iterable of SampleChunk objects)
    stream2 (iterable of SampleChunk objects)
        The two sample streams to have their corresponding values added together.
        
    Yields a sample stream.
    '''

    ex1 = ChunkExtractor(stream1)
    ex2 = ChunkExtractor(stream2)

    while True:
        c1 = ex1.next_chunk()
        c2 = ex2.next_chunk()

        if c1 is None or c2 is None:
            break

        if len(c1.samples) != len(c2.samples):
            size = min(len(c1.samples), len(c2.samples))
            filt = c1.samples[:size] + c2.samples[:size]
        else:
            filt = c1.samples + c2.samples

        yield SampleChunk(filt, c1.start_time, c1.sample_period)
Exemple #3
0
def find_logic_levels(samples, max_samples=20000, buf_size=2000):
    '''Automatically determine the binary logic levels of a digital signal.
    
    This function consumes up to max_samples from samples in an attempt
    to build a buffer containing a representative set of samples at high
    and low logic levels. Less than max_samples may be consumed if an edge
    is found and the remaining half of the buffer is filled before the
    max_samples threshold is reached.

    Warning: this function is insensitive to any edge transition that
    occurs within the first 100 samples. If the distribution of samples
    is heavily skewed toward one level over the other None may be returned.
    To be reliable, a set of samples should contain more than one edge or
    a solitary edge after the 400th sample.
    
    samples (iterable of SampleChunk objects)
        An iterable sample stream. Each element is a SampleChunk containing
        an array of samples.

    max_samples (int)
        The maximum number of samples to consume from the samples iterable.
        This should be at least 2x buf_size and will be coerced to that value
        if it is less.
        
    buf_size (int)
        The maximum size of the sample buffer to analyze for logic levels.
        This should be less than max_samples. 
        
    Returns a 2-tuple (low, high) representing the logic levels of the samples
    Returns None if less than two peaks are found in the sample histogram.

    '''

    # Get a minimal pool of samples containing both logic levels
    # We use a statistical measure to find a likely first edge to minimize
    # the chance that our buffer doesn't contain any edge transmissions.
    
    
    et_buf_size = buf_size // 10 # accumulate stats on 1/10 buf_size samples before edge search
    mvavg_size = 10
    noise_filt_size = 3
    
    S_FIND_EDGE = 0
    S_FINISH_BUF = 1
    
    state = S_FIND_EDGE
    sc = 0
    
    # Coerce max samples to ensure that an edge occuring toward the end of an initial
    # buf_size samples can be centered in the buffer.
    if max_samples < 2 * buf_size:
        max_samples = 2 * buf_size


    # Perform an initial analysis to determine the edge threshold of the samples
    samp_it, samp_dly_it, et_it = itertools.tee(samples, 3)
    
    et_cex = ChunkExtractor(et_it)
    et_samples = et_cex.next_samples(et_buf_size)


    # We will create two moving averages of this pool of data
    # The first has a short period (3 samples) meant to smooth out isolated spikes of
    # noise. The second (10 samples) creates a smoother waveform representing the
    # local median for the creation of the differences later.
    nf_mvavg_buf = collections.deque(maxlen=noise_filt_size) # noise filter
    noise_filtered = []
    et_mvavg_buf = collections.deque(maxlen=mvavg_size)
    et_mvavg = []
    for ns in et_samples:
        nf_mvavg_buf.append(ns)
        noise_filtered.append(sum(nf_mvavg_buf) / len(nf_mvavg_buf)) # calculate moving avg.
        et_mvavg_buf.append(ns)
        et_mvavg.append(sum(et_mvavg_buf) / len(et_mvavg_buf)) # calculate moving avg.

    # The magnitude difference between the samples and their moving average indicates where
    # steady state samples are and where edge transitions are. 
    mvavg_diff = [abs(x - y) for x, y in zip(noise_filtered, et_mvavg)]

    # The "noise" difference is the same as above but with the moving average delay removed.
    # This minimizes the peaks from edge transitions and is more representative of the noise level
    # in the signal.
    noise_diff = [abs(x - y) for x, y in zip(noise_filtered, et_mvavg[(mvavg_size//2)-1:])]
    noise_threshold = max(noise_diff) * 1.5
    
    # The noise threshold gives us a simple test for the presence of edges in the initial
    # pool of data. This will guide our determination of the edge threshold for filling the
    # edge detection buffer.
    edges_present = True if max(mvavg_diff) > noise_threshold else False

    # NOTE: This test for edges present will not work reliably for slowly changing edges
    # (highly oversampled) especially when the SNR is low (<20dB). This should not pose an issue
    # as in this case the edge_threshold (set with 5x multiplier instead of 0.6x) will stay low
    # enough to permit edge detection in the next stage.

    # The test for edges present will also fail when the initial samples are a periodic signal
    # with a short period relative to the sample rate. To cover this case we compute an
    # auto-correlation and look for more than one peak indicating the presence of periodicity.
    acorr_edges_present = False
    if not edges_present:
        norm_noise_filt = noise_filtered - np.mean(noise_filtered)
        auto_corr = np.correlate(norm_noise_filt, norm_noise_filt, 'same')

        ac_max = np.max(auto_corr)
        if ac_max > 0.0:
            # Take the right half of the auto-correlation and normalize to 1000.0
            norm_ac = auto_corr[len(auto_corr)//2:] / ac_max * 1000.0
            ac_peaks = find_hist_peaks(norm_ac, thresh_scale=1.0)
            if len(ac_peaks) > 1:
                p1_max = np.max(norm_ac[ac_peaks[1][0]:ac_peaks[1][1]+1])
                #print('$$$ p1 max:', p1_max)
                if p1_max > 500.0:
                    acorr_edges_present = True

        #print('\n$$$ auto-correlation peaks:', ac_peaks, acorr_edges_present)

        #plt.plot(et_samples)
        #plt.plot(norm_ac)
        #plt.show()


    #rev_mvavg = [(x - y) for x, y in zip(et_mvavg, reversed(et_mvavg))]
    #os = OnlineStats()
    #os.accumulate(rev_mvavg)
    #rev_mvavg = [abs(x - os.mean()) for x in rev_mvavg]

    if edges_present or acorr_edges_present:
        #edge_threshold = max(mad2) * 0.75
        edge_threshold = max(mvavg_diff) * 0.6
    else:
        # Just noise
        #edge_threshold = max(mad2) * 10
        edge_threshold = max(mvavg_diff) * 5

    #print('$$$ edges present:', edges_present, acorr_edges_present, edge_threshold)

	# For synthetic waveforms with no noise present and no edges in the initial samples we will
	# get an edge_threshold of 0.0. In this case we will just set the threshold high enough to
	# detect a deviation from 0.0 for any reasonable real world input

    edge_threshold = max(edge_threshold, 1.0e-9)
        
    
    #print('### noise, edge threshold:', noise_threshold, edge_threshold, edges_present)
    
    del et_it
    
    # We have established the edge threshold. We will now construct the moving avg. difference
    # again. This time, any difference above the threshold will be an indicator of an edge
    # transition.

    if acorr_edges_present:
        samp_cex = ChunkExtractor(samp_it)
        buf = samp_cex.next_samples(buf_size)
        state = S_FINISH_BUF
    else:
    
        mvavg_buf = collections.deque(maxlen=mvavg_size)
        mvavg_dly_buf = collections.deque(maxlen=mvavg_size)
        buf = collections.deque(maxlen=buf_size)

        # skip initial samples to create disparity between samp_cex and dly_cex
        samp_cex = ChunkExtractor(samp_it)
        dly_cex = ChunkExtractor(samp_dly_it)
        delay_samples = 100
        samp_cex.next_samples(delay_samples)

        end_loop = False
        while True:
            cur_samp = samp_cex.next_samples()
            cur_dly_samp = dly_cex.next_samples()

            if cur_samp is None:
                break
        
            for i in xrange(len(cur_samp)):
            
                ns = cur_samp[i]
                sc += 1
                
                buf.append(ns)
                
                if state == S_FIND_EDGE:
                    if sc > (max_samples - buf_size):
                        end_loop = True
                        break

                    mvavg_buf.append(ns)
                    mvavg = sum(mvavg_buf) / len(mvavg_buf)  # calculate moving avg.
                    mvavg_dly_buf.append(cur_dly_samp[i])
                    mvavg_dly = sum(mvavg_dly_buf) / len(mvavg_dly_buf)  # calculate moving avg.
                    if abs(mvavg_dly - mvavg) > edge_threshold:
                        # This is likely an edge event
                        state = S_FINISH_BUF
                        if len(buf) < buf_size // 2:
                            buf_remaining = buf_size - len(buf)
                        else:
                            buf_remaining = buf_size // 2
                            
                        #print('##### Found edge {} {}'.format(len(buf), sc))
                    

                else: # S_FINISH_BUF
                    # Accumulate samples until the edge event is in the middle of the
                    # buffer or the buffer is filled
                    buf_remaining -= 1
                    if buf_remaining <= 0 and len(buf) >= buf_size:
                        end_loop = True
                        break

            if end_loop:
                break
            

    #plt.plot(et_samples)
    #plt.plot(et_mvavg)
    #plt.plot(noise_filtered)
    #plt.plot(mvavg_diff)
    #plt.plot(noise_diff)
    #plt.plot(rev_mvavg)
    #plt.axhline(noise_threshold, color='r')
    #plt.axhline(edge_threshold, color='g')
    #plt.plot(buf)
    #plt.show()
    
    # If we didn't see any edges in the buffered sample data then abort
    # before the histogram analysis
    if state != S_FINISH_BUF:
        return None

    try:
        logic_levels = find_bot_top_hist_peaks(buf, 100, use_kde=True)
        #print('### ll:', logic_levels, min(buf), max(buf))
    except ValueError:
        logic_levels = None


    #print('%%% logic_levels', logic_levels)

    return logic_levels
def find_logic_levels(samples, max_samples=20000, buf_size=2000):
    '''Automatically determine the binary logic levels of a digital signal.
    
    This function consumes up to max_samples from samples in an attempt
    to build a buffer containing a representative set of samples at high
    and low logic levels. Less than max_samples may be consumed if an edge
    is found and the remaining half of the buffer is filled before the
    max_samples threshold is reached.

    Warning: this function is insensitive to any edge transition that
    occurs within the first 100 samples. If the distribution of samples
    is heavily skewed toward one level over the other None may be returned.
    To be reliable, a set of samples should contain more than one edge or
    a solitary edge after the 400th sample.
    
    samples (iterable of SampleChunk objects)
        An iterable sample stream. Each element is a SampleChunk containing
        an array of samples.

    max_samples (int)
        The maximum number of samples to consume from the samples iterable.
        This should be at least 2x buf_size and will be coerced to that value
        if it is less.
        
    buf_size (int)
        The maximum size of the sample buffer to analyze for logic levels.
        This should be less than max_samples. 
        
    Returns a 2-tuple (low, high) representing the logic levels of the samples
    Returns None if less than two peaks are found in the sample histogram.

    '''

    # Get a minimal pool of samples containing both logic levels
    # We use a statistical measure to find a likely first edge to minimize
    # the chance that our buffer doesn't contain any edge transmissions.

    et_buf_size = buf_size // 10  # accumulate stats on 1/10 buf_size samples before edge search
    mvavg_size = 10
    noise_filt_size = 3

    S_FIND_EDGE = 0
    S_FINISH_BUF = 1

    state = S_FIND_EDGE
    sc = 0

    # Coerce max samples to ensure that an edge occuring toward the end of an initial
    # buf_size samples can be centered in the buffer.
    if max_samples < 2 * buf_size:
        max_samples = 2 * buf_size

    # Perform an initial analysis to determine the edge threshold of the samples
    samp_it, samp_dly_it, et_it = itertools.tee(samples, 3)

    et_cex = ChunkExtractor(et_it)
    et_samples = et_cex.next_samples(et_buf_size)

    # We will create two moving averages of this pool of data
    # The first has a short period (3 samples) meant to smooth out isolated spikes of
    # noise. The second (10 samples) creates a smoother waveform representing the
    # local median for the creation of the differences later.
    nf_mvavg_buf = collections.deque(maxlen=noise_filt_size)  # noise filter
    noise_filtered = []
    et_mvavg_buf = collections.deque(maxlen=mvavg_size)
    et_mvavg = []
    for ns in et_samples:
        nf_mvavg_buf.append(ns)
        noise_filtered.append(sum(nf_mvavg_buf) /
                              len(nf_mvavg_buf))  # calculate moving avg.
        et_mvavg_buf.append(ns)
        et_mvavg.append(sum(et_mvavg_buf) /
                        len(et_mvavg_buf))  # calculate moving avg.

    # The magnitude difference between the samples and their moving average indicates where
    # steady state samples are and where edge transitions are.
    mvavg_diff = [abs(x - y) for x, y in zip(noise_filtered, et_mvavg)]

    # The "noise" difference is the same as above but with the moving average delay removed.
    # This minimizes the peaks from edge transitions and is more representative of the noise level
    # in the signal.
    noise_diff = [
        abs(x - y)
        for x, y in zip(noise_filtered, et_mvavg[(mvavg_size // 2) - 1:])
    ]
    noise_threshold = max(noise_diff) * 1.5

    # The noise threshold gives us a simple test for the presence of edges in the initial
    # pool of data. This will guide our determination of the edge threshold for filling the
    # edge detection buffer.
    edges_present = True if max(mvavg_diff) > noise_threshold else False

    # NOTE: This test for edges present will not work reliably for slowly changing edges
    # (highly oversampled) especially when the SNR is low (<20dB). This should not pose an issue
    # as in this case the edge_threshold (set with 5x multiplier instead of 0.6x) will stay low
    # enough to permit edge detection in the next stage.

    # The test for edges present will also fail when the initial samples are a periodic signal
    # with a short period relative to the sample rate. To cover this case we compute an
    # auto-correlation and look for more than one peak indicating the presence of periodicity.
    acorr_edges_present = False
    if not edges_present:
        norm_noise_filt = noise_filtered - np.mean(noise_filtered)
        auto_corr = np.correlate(norm_noise_filt, norm_noise_filt, 'same')

        ac_max = np.max(auto_corr)
        if ac_max > 0.0:
            # Take the right half of the auto-correlation and normalize to 1000.0
            norm_ac = auto_corr[len(auto_corr) // 2:] / ac_max * 1000.0
            ac_peaks = find_hist_peaks(norm_ac, thresh_scale=1.0)
            if len(ac_peaks) > 1:
                p1_max = np.max(norm_ac[ac_peaks[1][0]:ac_peaks[1][1] + 1])
                #print('$$$ p1 max:', p1_max)
                if p1_max > 500.0:
                    acorr_edges_present = True

        #print('\n$$$ auto-correlation peaks:', ac_peaks, acorr_edges_present)

        #plt.plot(et_samples)
        #plt.plot(norm_ac)
        #plt.show()

    #rev_mvavg = [(x - y) for x, y in zip(et_mvavg, reversed(et_mvavg))]
    #os = OnlineStats()
    #os.accumulate(rev_mvavg)
    #rev_mvavg = [abs(x - os.mean()) for x in rev_mvavg]

    if edges_present or acorr_edges_present:
        #edge_threshold = max(mad2) * 0.75
        edge_threshold = max(mvavg_diff) * 0.6
    else:
        # Just noise
        #edge_threshold = max(mad2) * 10
        edge_threshold = max(mvavg_diff) * 5

    #print('$$$ edges present:', edges_present, acorr_edges_present, edge_threshold)

# For synthetic waveforms with no noise present and no edges in the initial samples we will
# get an edge_threshold of 0.0. In this case we will just set the threshold high enough to
# detect a deviation from 0.0 for any reasonable real world input

    edge_threshold = max(edge_threshold, 1.0e-9)

    #print('### noise, edge threshold:', noise_threshold, edge_threshold, edges_present)

    del et_it

    # We have established the edge threshold. We will now construct the moving avg. difference
    # again. This time, any difference above the threshold will be an indicator of an edge
    # transition.

    if acorr_edges_present:
        samp_cex = ChunkExtractor(samp_it)
        buf = samp_cex.next_samples(buf_size)
        state = S_FINISH_BUF
    else:

        mvavg_buf = collections.deque(maxlen=mvavg_size)
        mvavg_dly_buf = collections.deque(maxlen=mvavg_size)
        buf = collections.deque(maxlen=buf_size)

        # skip initial samples to create disparity between samp_cex and dly_cex
        samp_cex = ChunkExtractor(samp_it)
        dly_cex = ChunkExtractor(samp_dly_it)
        delay_samples = 100
        samp_cex.next_samples(delay_samples)

        end_loop = False
        while True:
            cur_samp = samp_cex.next_samples()
            cur_dly_samp = dly_cex.next_samples()

            if cur_samp is None:
                break

            for i in xrange(len(cur_samp)):

                ns = cur_samp[i]
                sc += 1

                buf.append(ns)

                if state == S_FIND_EDGE:
                    if sc > (max_samples - buf_size):
                        end_loop = True
                        break

                    mvavg_buf.append(ns)
                    mvavg = sum(mvavg_buf) / len(
                        mvavg_buf)  # calculate moving avg.
                    mvavg_dly_buf.append(cur_dly_samp[i])
                    mvavg_dly = sum(mvavg_dly_buf) / len(
                        mvavg_dly_buf)  # calculate moving avg.
                    if abs(mvavg_dly - mvavg) > edge_threshold:
                        # This is likely an edge event
                        state = S_FINISH_BUF
                        if len(buf) < buf_size // 2:
                            buf_remaining = buf_size - len(buf)
                        else:
                            buf_remaining = buf_size // 2

                        #print('##### Found edge {} {}'.format(len(buf), sc))

                else:  # S_FINISH_BUF
                    # Accumulate samples until the edge event is in the middle of the
                    # buffer or the buffer is filled
                    buf_remaining -= 1
                    if buf_remaining <= 0 and len(buf) >= buf_size:
                        end_loop = True
                        break

            if end_loop:
                break

    #plt.plot(et_samples)
    #plt.plot(et_mvavg)
    #plt.plot(noise_filtered)
    #plt.plot(mvavg_diff)
    #plt.plot(noise_diff)
    #plt.plot(rev_mvavg)
    #plt.axhline(noise_threshold, color='r')
    #plt.axhline(edge_threshold, color='g')
    #plt.plot(buf)
    #plt.show()

    # If we didn't see any edges in the buffered sample data then abort
    # before the histogram analysis
    if state != S_FINISH_BUF:
        return None

    try:
        logic_levels = find_bot_top_hist_peaks(buf, 100, use_kde=True)
        #print('### ll:', logic_levels, min(buf), max(buf))
    except ValueError:
        logic_levels = None

    #print('%%% logic_levels', logic_levels)

    return logic_levels
def filter_waveform(samples,
                    sample_rate,
                    rise_time,
                    ripple_db=60.0,
                    chunk_size=10000):
    '''Apply a bandwidth limiting low-pass filter to a sample stream
    
    This is a generator function.
    
    samples (iterable of SampleChunk objects)
        An iterable sample stream to be filtered.
    
    sample_rate (float)
        The sample rate for converting the sample stream.
    
    rise_time (float)
        Rise (and fall) time for the filtered samples.
    
    ripple_db (float)
        Noise suppression in dB for the bandwidth filter stop band. This should
        be a positive value.
        
    chunk_size (int)
        Internal FIR filter sample pool size. This can generally be ignored. To support
        streaming of samples, the FIR filter operation is done piecewise so we don't have
        to consume the entire input before producing filtered output. Larger values will
        reduce the number of filter operations performed. Excessively small values will
        waste time due to the reprocessing of overlapping samples between successive pools.
    
    Yields a stream of SampleChunk objects.
    '''

    sample_period = 1.0 / sample_rate
    nyquist = sample_rate / 2.0
    edge_bw = approximate_bandwidth(rise_time)
    transition_bw = edge_bw * 4.0  # This gives a nice smooth transition with no Gibbs effect
    cutoff_hz = edge_bw

    if cutoff_hz > nyquist:
        min_rise = min_rise_time(sample_rate)
        raise ValueError(
            'Rise time is too fast for current sample rate (min: {0})'.format(
                min_rise))

    N, beta = signal.kaiserord(ripple_db, transition_bw / nyquist)
    taps = signal.firwin(N, cutoff_hz / nyquist, window=('kaiser', beta))

    # Filter delay
    # delay = 0.5 * (N-1) / sample_rate

    if chunk_size < 2 * N:
        chunk_size = 2 * N

    samp_ce = ChunkExtractor(samples)

    # Get a pool of samples
    spool = np.zeros((chunk_size + N - 1, ), dtype=np.float)

    # Prime the initial portion of the pool with data that will be filtered out
    prime_size = N - N // 2
    sc = samp_ce.next_chunk(prime_size)
    if sc is not None:
        spool[0:N // 2 - 1] += sc.samples[
            0]  # Pad the first part of the pool with a copy of the first sample
        spool[N // 2 - 1:N - 1] = sc.samples

        while True:
            # Fill the pool with samples
            sc = samp_ce.next_chunk(chunk_size)
            if sc is None:
                break

            spool[N - 1:len(sc.samples) + N - 1] = sc.samples
            valid_samples = len(sc.samples) + N - 1

            filt = signal.lfilter(
                taps, 1.0, spool[:valid_samples]
            )  #NOTE: there may be an off-by-one error in the slice

            # copy end samples to start of pool
            spool[0:N - 1] = spool[chunk_size:chunk_size + N - 1]

            #print('$$$ ce chunk', N, valid_samples, sc.start_time, sample_period)

            yield SampleChunk(filt[N - 1:valid_samples], sc.start_time,
                              sample_period)
Exemple #6
0
def filter_waveform(samples, sample_rate, rise_time, ripple_db=60.0, chunk_size=10000):
    """Apply a bandwidth limiting low-pass filter to a sample stream
    
    This is a generator function.
    
    samples (iterable of SampleChunk objects)
        An iterable sample stream to be filtered.
    
    sample_rate (float)
        The sample rate for converting the sample stream.
    
    rise_time (float)
        Rise (and fall) time for the filtered samples.
    
    ripple_db (float)
        Noise suppression in dB for the bandwidth filter stop band. This should
        be a positive value.
        
    chunk_size (int)
        Internal FIR filter sample pool size. This can generally be ignored. To support
        streaming of samples, the FIR filter operation is done piecewise so we don't have
        to consume the entire input before producing filtered output. Larger values will
        reduce the number of filter operations performed. Excessively small values will
        waste time due to the reprocessing of overlapping samples between successive pools.
    
    Yields a stream of SampleChunk objects.
    """

    sample_period = 1.0 / sample_rate
    nyquist = sample_rate / 2.0
    edge_bw = approximate_bandwidth(rise_time)
    transition_bw = edge_bw * 4.0  # This gives a nice smooth transition with no Gibbs effect
    cutoff_hz = edge_bw

    if cutoff_hz > nyquist:
        min_rise = min_rise_time(sample_rate)
        raise ValueError("Rise time is too fast for current sample rate (min: {0})".format(min_rise))

    N, beta = signal.kaiserord(ripple_db, transition_bw / nyquist)
    taps = signal.firwin(N, cutoff_hz / nyquist, window=("kaiser", beta))

    # Filter delay
    # delay = 0.5 * (N-1) / sample_rate

    if chunk_size < 2 * N:
        chunk_size = 2 * N

    samp_ce = ChunkExtractor(samples)

    # Get a pool of samples
    spool = np.zeros((chunk_size + N - 1,), dtype=np.float)

    # Prime the initial portion of the pool with data that will be filtered out
    prime_size = N - N // 2
    sc = samp_ce.next_chunk(prime_size)
    if sc is not None:
        spool[0 : N // 2 - 1] += sc.samples[0]  # Pad the first part of the pool with a copy of the first sample
        spool[N // 2 - 1 : N - 1] = sc.samples

        while True:
            # Fill the pool with samples
            sc = samp_ce.next_chunk(chunk_size)
            if sc is None:
                break

            spool[N - 1 : len(sc.samples) + N - 1] = sc.samples
            valid_samples = len(sc.samples) + N - 1

            filt = signal.lfilter(
                taps, 1.0, spool[:valid_samples]
            )  # NOTE: there may be an off-by-one error in the slice

            # copy end samples to start of pool
            spool[0 : N - 1] = spool[chunk_size : chunk_size + N - 1]

            # print('$$$ ce chunk', N, valid_samples, sc.start_time, sample_period)

            yield SampleChunk(filt[N - 1 : valid_samples], sc.start_time, sample_period)