예제 #1
0
def microphone_update(stream):
    global y_roll
    # Normalize new audio samples
    y = np.fromstring(stream.read(samples_per_frame), dtype=np.int16)
    y = y / 2.0**15
    # Construct a rolling window of audio samples
    y_roll = np.roll(y_roll, -1, axis=0)
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0)
    volume.update(np.nanmean(y_data**2))

    if volume.value < config.MIN_VOLUME_THRESHOLD:
        print('No audio input. Volume below threshold. Volume:', volume.value)
        visualize(np.tile(0.0, config.N_PIXELS))
    else:
        XS, YS = dsp.fft(y_data, window=np.hamming)
        # Construct Mel filterbank
        YS = YS[XS >= 0.0]
        XS = XS[XS >= 0.0]
        YS = np.atleast_2d(np.abs(YS)).T * dsp.mel_y.T
        YS = np.sum(YS, axis=0)**2.0
        mel = np.concatenate((YS[::-1], YS))
        mel = interpolate(mel, config.N_PIXELS)
        mel = (mel)**2.
        mel_gain.update(mel)
        mel = mel / mel_gain.value
        visualize(mel)

    GUI.app.processEvents()
    print('FPS {:.0f} / {:.0f}'.format(frames_per_second(), config.FPS))
예제 #2
0
def microphone_update(stream):
    global y_roll, prev_rms, prev_exp
    # Retrieve and normalize the new audio samples
    try:
        y = np.fromstring(stream.read(samples_per_frame), dtype=np.int16)

    except IOError:
        y = y_roll[config.N_ROLLING_HISTORY - 1, :]
        global buffer_overflows
        print('Buffer overflows: {0}'.format(buffer_overflows))
        buffer_overflows += 1

    # Seperates a stereo feed into the left and right streams,
    # then pulls out the left channel for further processing
    if config.USE_LOOPBACK:
        y = np.reshape(y, (int(config.MIC_RATE / config.FPS), 2))
        y = y[:, 0]
    # Normalize samples between 0 and 1
    y = y / 2.0**15
    # Construct a rolling window of audio samples
    y_roll = np.roll(y_roll, -1, axis=0)
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0)
    volume.update(np.nanmean(y_data**2))

    if volume.value < config.MIN_VOLUME_THRESHOLD:
        print('No audio input. Volume below threshold. Volume:', volume.value)
        led.pixels = np.tile(0, (3, config.N_PIXELS))
        led.update()
    else:
        # Transform audio input into the frequency domain
        XS, YS = dsp.fft(y_data, window=np.hamming)
        # Remove half of the FFT data because of symmetry
        YS = YS[:len(YS) // 2]
        XS = XS[:len(XS) // 2]
        # Construct a Mel filterbank from the FFT data
        mel = np.atleast_2d(np.abs(YS)).T * dsp.mel_y.T
        # Scale data to values more suitable for visualization
        mel = np.mean(mel, axis=0)
        mel = mel**2.0
        # Gain normalization
        mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
        mel = mel / mel_gain.value
        mel = mel_smoothing.update(mel)
        # Map filterbank output onto LED strip
        output = visualization_effect(mel)
        led.pixels = output
        led.update()
        if config.USE_GUI:
            # Plot filterbank output
            x = np.linspace(config.MIN_FREQUENCY, config.MAX_FREQUENCY,
                            len(mel))
            mel_curve.setData(x=x, y=fft_plot_filter.update(mel))
            # Plot the color channels
            r_curve.setData(y=led.pixels[0])
            g_curve.setData(y=led.pixels[1])
            b_curve.setData(y=led.pixels[2])
            app.processEvents()
    if config.DISPLAY_FPS:
        print('FPS {:.0f} / {:.0f}'.format(frames_per_second(), config.FPS))
예제 #3
0
def microphone_update(stream):
    global y_roll
    # Retrieve new audio samples and construct the rolling window
    y = np.fromstring(stream.read(samples_per_frame), dtype=np.int16)
    y = y / 2.0**15
    y_roll = np.roll(y_roll, -1, axis=0)
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0)
    # # Calculate onset detection functions
    # SF, NWPD, RCD = dsp.onset(y_data)
    # # Apply Gaussian blur to improve agreement between onset functions
    # SF = gaussian_filter1d(SF, 1.0)
    # NWPD = gaussian_filter1d(NWPD, 1.0)
    # RCD = gaussian_filter1d(RCD, 1.0)
    # # Update and normalize peak followers
    # SF_peak.update(np.max(SF))
    # NWPD_peak.update(np.max(NWPD))
    # RCD_peak.update(np.max(RCD))
    # SF /= SF_peak.value
    # NWPD /= NWPD_peak.value
    # RCD /= RCD_peak.value
    # # Normalize and update onset spectrum
    # onset = SF + NWPD + RCD
    # onset_peak.update(np.max(onset))
    # onset /= onset_peak.value
    # onsets.update(onset)
    # # Map the onset values to LED strip pixels
    # if len(onsets.value) != config.N_PIXELS:
    #     onset_values = interpolate(onsets.value, config.N_PIXELS)
    # else:
    #     onset_values = np.copy(onsets.value)
    # brightness = led_visualization(onset_values)

    XS, YS = dsp.fft(y_data, window=np.hamming)
    YS = YS[XS >= 0.0]
    XS = XS[XS >= 0.0]
    YS = np.atleast_2d(np.abs(YS)).T * dsp.mel_y.T
    YS = np.sum(YS, axis=0)**2.0
    #YS = gaussian_filter1d(YS, 2.0)
    YS = np.diff(YS, n=0)
    YS_peak.update(np.max(YS))
    YS /= YS_peak.value

    if len(YS) != config.N_PIXELS:
        YS = interpolate(YS, config.N_PIXELS)

    #YS = led_visualization(YS)
    YS = led_vis3(YS)

    # Plot the onsets
    #plot_x = np.array(range(1, len(onsets.value) + 1))
    plot_x = np.array(range(1, len(YS) + 1))
    #plot_y = [onsets.value**i for i in np.linspace(2.0, 0.25, config.N_CURVES)]
    plot_y = [YS]
    update_plot_1(plot_x, plot_y)
    app.processEvents()
    print('FPS {:.0f} / {:.0f}'.format(frames_per_second(), config.FPS))