def sum_streams(stream1, stream2):
    '''Add two sample streams together
    
    The time elements of each stream will not be aligned if they do not match.
    Instead the time values from stream1 are used for the result. The iterator
    terminates when either of the two streams ends.
    
    This is a generator function.
    
    stream1 (iterable of SampleChunk objects)
    stream2 (iterable of SampleChunk objects)
        The two sample streams to have their corresponding values added together.
        
    Yields a sample stream.
    '''

    ex1 = ChunkExtractor(stream1)
    ex2 = ChunkExtractor(stream2)

    while True:
        c1 = ex1.next_chunk()
        c2 = ex2.next_chunk()

        if c1 is None or c2 is None:
            break

        if len(c1.samples) != len(c2.samples):
            size = min(len(c1.samples), len(c2.samples))
            filt = c1.samples[:size] + c2.samples[:size]
        else:
            filt = c1.samples + c2.samples

        yield SampleChunk(filt, c1.start_time, c1.sample_period)
Exemplo n.º 2
0
def sum_streams(stream1, stream2):
    """Add two sample streams together
    
    The time elements of each stream will not be aligned if they do not match.
    Instead the time values from stream1 are used for the result. The iterator
    terminates when either of the two streams ends.
    
    This is a generator function.
    
    stream1 (iterable of SampleChunk objects)
    stream2 (iterable of SampleChunk objects)
        The two sample streams to have their corresponding values added together.
        
    Yields a sample stream.
    """

    ex1 = ChunkExtractor(stream1)
    ex2 = ChunkExtractor(stream2)

    while True:
        c1 = ex1.next_chunk()
        c2 = ex2.next_chunk()

        if c1 is None or c2 is None:
            break

        if len(c1.samples) != len(c2.samples):
            size = min(len(c1.samples), len(c2.samples))
            filt = c1.samples[:size] + c2.samples[:size]
        else:
            filt = c1.samples + c2.samples

        yield SampleChunk(filt, c1.start_time, c1.sample_period)
def filter_waveform(samples,
                    sample_rate,
                    rise_time,
                    ripple_db=60.0,
                    chunk_size=10000):
    '''Apply a bandwidth limiting low-pass filter to a sample stream
    
    This is a generator function.
    
    samples (iterable of SampleChunk objects)
        An iterable sample stream to be filtered.
    
    sample_rate (float)
        The sample rate for converting the sample stream.
    
    rise_time (float)
        Rise (and fall) time for the filtered samples.
    
    ripple_db (float)
        Noise suppression in dB for the bandwidth filter stop band. This should
        be a positive value.
        
    chunk_size (int)
        Internal FIR filter sample pool size. This can generally be ignored. To support
        streaming of samples, the FIR filter operation is done piecewise so we don't have
        to consume the entire input before producing filtered output. Larger values will
        reduce the number of filter operations performed. Excessively small values will
        waste time due to the reprocessing of overlapping samples between successive pools.
    
    Yields a stream of SampleChunk objects.
    '''

    sample_period = 1.0 / sample_rate
    nyquist = sample_rate / 2.0
    edge_bw = approximate_bandwidth(rise_time)
    transition_bw = edge_bw * 4.0  # This gives a nice smooth transition with no Gibbs effect
    cutoff_hz = edge_bw

    if cutoff_hz > nyquist:
        min_rise = min_rise_time(sample_rate)
        raise ValueError(
            'Rise time is too fast for current sample rate (min: {0})'.format(
                min_rise))

    N, beta = signal.kaiserord(ripple_db, transition_bw / nyquist)
    taps = signal.firwin(N, cutoff_hz / nyquist, window=('kaiser', beta))

    # Filter delay
    # delay = 0.5 * (N-1) / sample_rate

    if chunk_size < 2 * N:
        chunk_size = 2 * N

    samp_ce = ChunkExtractor(samples)

    # Get a pool of samples
    spool = np.zeros((chunk_size + N - 1, ), dtype=np.float)

    # Prime the initial portion of the pool with data that will be filtered out
    prime_size = N - N // 2
    sc = samp_ce.next_chunk(prime_size)
    if sc is not None:
        spool[0:N // 2 - 1] += sc.samples[
            0]  # Pad the first part of the pool with a copy of the first sample
        spool[N // 2 - 1:N - 1] = sc.samples

        while True:
            # Fill the pool with samples
            sc = samp_ce.next_chunk(chunk_size)
            if sc is None:
                break

            spool[N - 1:len(sc.samples) + N - 1] = sc.samples
            valid_samples = len(sc.samples) + N - 1

            filt = signal.lfilter(
                taps, 1.0, spool[:valid_samples]
            )  #NOTE: there may be an off-by-one error in the slice

            # copy end samples to start of pool
            spool[0:N - 1] = spool[chunk_size:chunk_size + N - 1]

            #print('$$$ ce chunk', N, valid_samples, sc.start_time, sample_period)

            yield SampleChunk(filt[N - 1:valid_samples], sc.start_time,
                              sample_period)
Exemplo n.º 4
0
def filter_waveform(samples, sample_rate, rise_time, ripple_db=60.0, chunk_size=10000):
    """Apply a bandwidth limiting low-pass filter to a sample stream
    
    This is a generator function.
    
    samples (iterable of SampleChunk objects)
        An iterable sample stream to be filtered.
    
    sample_rate (float)
        The sample rate for converting the sample stream.
    
    rise_time (float)
        Rise (and fall) time for the filtered samples.
    
    ripple_db (float)
        Noise suppression in dB for the bandwidth filter stop band. This should
        be a positive value.
        
    chunk_size (int)
        Internal FIR filter sample pool size. This can generally be ignored. To support
        streaming of samples, the FIR filter operation is done piecewise so we don't have
        to consume the entire input before producing filtered output. Larger values will
        reduce the number of filter operations performed. Excessively small values will
        waste time due to the reprocessing of overlapping samples between successive pools.
    
    Yields a stream of SampleChunk objects.
    """

    sample_period = 1.0 / sample_rate
    nyquist = sample_rate / 2.0
    edge_bw = approximate_bandwidth(rise_time)
    transition_bw = edge_bw * 4.0  # This gives a nice smooth transition with no Gibbs effect
    cutoff_hz = edge_bw

    if cutoff_hz > nyquist:
        min_rise = min_rise_time(sample_rate)
        raise ValueError("Rise time is too fast for current sample rate (min: {0})".format(min_rise))

    N, beta = signal.kaiserord(ripple_db, transition_bw / nyquist)
    taps = signal.firwin(N, cutoff_hz / nyquist, window=("kaiser", beta))

    # Filter delay
    # delay = 0.5 * (N-1) / sample_rate

    if chunk_size < 2 * N:
        chunk_size = 2 * N

    samp_ce = ChunkExtractor(samples)

    # Get a pool of samples
    spool = np.zeros((chunk_size + N - 1,), dtype=np.float)

    # Prime the initial portion of the pool with data that will be filtered out
    prime_size = N - N // 2
    sc = samp_ce.next_chunk(prime_size)
    if sc is not None:
        spool[0 : N // 2 - 1] += sc.samples[0]  # Pad the first part of the pool with a copy of the first sample
        spool[N // 2 - 1 : N - 1] = sc.samples

        while True:
            # Fill the pool with samples
            sc = samp_ce.next_chunk(chunk_size)
            if sc is None:
                break

            spool[N - 1 : len(sc.samples) + N - 1] = sc.samples
            valid_samples = len(sc.samples) + N - 1

            filt = signal.lfilter(
                taps, 1.0, spool[:valid_samples]
            )  # NOTE: there may be an off-by-one error in the slice

            # copy end samples to start of pool
            spool[0 : N - 1] = spool[chunk_size : chunk_size + N - 1]

            # print('$$$ ce chunk', N, valid_samples, sc.start_time, sample_period)

            yield SampleChunk(filt[N - 1 : valid_samples], sc.start_time, sample_period)