Ejemplo n.º 1
0
def led_visualization(onset_values):
    # Visualizations that we want to use (normalized to ~[0, 1])
    #pixels_A = update_leds_6(onset_values)
    #pixels_B = update_leds_4(onset_values)
    # Combine the effects by taking the product
    #brightness = pixels_A #* pixels_B
    brightness = update_leds_6(onset_values**2.0)
    brightness = gaussian_filter1d(brightness, 4.0)
    #brightness = hyperbolic_tan(brightness)
    brightness = bloom_peaks(brightness)**2.
    # Combine pixels with color map
    color = rainbow_gen(onset_values.shape[0],
                        speed=1.,
                        center=0.5,
                        width=0.5,
                        f=[1.1, .5, .2]) * 255.0
    color = np.tile(255.0, (config.N_PIXELS, 3))
    # color = rainbow(onset_values.shape[0]) * 255.0
    pixels = (brightness * color.T).T
    pixels = leak_saturated_pixels(pixels)
    pixels = np.clip(pixels, 0., 255.)
    # Apply low-pass filter to the output
    pixels_filt.update(np.copy(pixels))
    # Display values on the LED strip
    led.pixels = np.round(pixels_filt.value).astype(int)
    led.update()
    return brightness
Ejemplo n.º 2
0
def ambiance():
    for i in range(config.N_PIXELS):
        r[i] = 74
        g[i] = 0
        b[i] = 106
    led.pixels = np.array([r, g, b])
    led.update(components)
def microphone_update(audio_samples):
    global y_roll, prev_rms, prev_exp, prev_fps_update
    # Normalize samples between 0 and 1
    y = audio_samples / 2.0**15
    # Construct a rolling window of audio samples
    y_roll[:-1] = y_roll[1:]
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0).astype(np.float32)

    vol = np.max(np.abs(y_data))
    if vol < config.MIN_VOLUME_THRESHOLD:
        print('No audio input. Volume below threshold. Volume:', vol)
        led.pixels = np.tile(0, (3, config.N_PIXELS))
        led.update()
    else:
        # Transform audio input into the frequency domain
        N = len(y_data)
        N_zeros = 2**int(np.ceil(np.log2(N))) - N
        # Pad with zeros until the next power of two
        y_data *= fft_window
        y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
        YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
        # Construct a Mel filterbank from the FFT data
        mel = np.atleast_2d(YS).T * dsp.mel_y.T
        # Scale data to values more suitable for visualization
        # mel = np.sum(mel, axis=0)
        mel = np.sum(mel, axis=0)
        mel = mel**2.0
        # Gain normalization
        mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
        mel /= mel_gain.value
        mel = mel_smoothing.update(mel)
        # Map filterbank output onto LED strip
        output = visualization_effect(mel)
        led.pixels = output
        led.update()
        if config.USE_GUI:
            # Plot filterbank output
            x = np.linspace(config.MIN_FREQUENCY, config.MAX_FREQUENCY,
                            len(mel))
            mel_curve.setData(x=x, y=fft_plot_filter.update(mel))
            # Plot the color channels
            r_curve.setData(y=led.pixels[0])
            g_curve.setData(y=led.pixels[1])
            b_curve.setData(y=led.pixels[2])
            int_led = led.pixels.astype(int)
            combined_plot.setBrush([
                get_qt_brush(rgb[0], rgb[1], rgb[2])
                for rgb in np.stack([int_led[0], int_led[1], int_led[2]],
                                    axis=1)
            ])

    if config.USE_GUI:
        app.processEvents()

    if config.DISPLAY_FPS:
        fps = frames_per_second()
        if time.time() - 0.5 > prev_fps_update:
            prev_fps_update = time.time()
            print('FPS {:.0f} / {:.0f}'.format(fps, config.FPS))
Ejemplo n.º 4
0
def microphone_update(stream):
    global y_roll, prev_rms, prev_exp
    # Retrieve and normalize the new audio samples
    try:
        y = np.fromstring(stream.read(samples_per_frame), dtype=np.int16)

    except IOError:
        y = y_roll[config.N_ROLLING_HISTORY - 1, :]
        global buffer_overflows
        print('Buffer overflows: {0}'.format(buffer_overflows))
        buffer_overflows += 1

    # Seperates a stereo feed into the left and right streams,
    # then pulls out the left channel for further processing
    if config.USE_LOOPBACK:
        y = np.reshape(y, (int(config.MIC_RATE / config.FPS), 2))
        y = y[:, 0]
    # Normalize samples between 0 and 1
    y = y / 2.0**15
    # Construct a rolling window of audio samples
    y_roll = np.roll(y_roll, -1, axis=0)
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0)
    volume.update(np.nanmean(y_data**2))

    if volume.value < config.MIN_VOLUME_THRESHOLD:
        print('No audio input. Volume below threshold. Volume:', volume.value)
        led.pixels = np.tile(0, (3, config.N_PIXELS))
        led.update()
    else:
        # Transform audio input into the frequency domain
        XS, YS = dsp.fft(y_data, window=np.hamming)
        # Remove half of the FFT data because of symmetry
        YS = YS[:len(YS) // 2]
        XS = XS[:len(XS) // 2]
        # Construct a Mel filterbank from the FFT data
        mel = np.atleast_2d(np.abs(YS)).T * dsp.mel_y.T
        # Scale data to values more suitable for visualization
        mel = np.mean(mel, axis=0)
        mel = mel**2.0
        # Gain normalization
        mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
        mel = mel / mel_gain.value
        mel = mel_smoothing.update(mel)
        # Map filterbank output onto LED strip
        output = visualization_effect(mel)
        led.pixels = output
        led.update()
        if config.USE_GUI:
            # Plot filterbank output
            x = np.linspace(config.MIN_FREQUENCY, config.MAX_FREQUENCY,
                            len(mel))
            mel_curve.setData(x=x, y=fft_plot_filter.update(mel))
            # Plot the color channels
            r_curve.setData(y=led.pixels[0])
            g_curve.setData(y=led.pixels[1])
            b_curve.setData(y=led.pixels[2])
            app.processEvents()
    if config.DISPLAY_FPS:
        print('FPS {:.0f} / {:.0f}'.format(frames_per_second(), config.FPS))
Ejemplo n.º 5
0
def CylonBounce(red, green, blue, EyeSize, SpeedDelay, ReturnDelay):
    global r, g, b
    for i in range(config.N_PIXELS - EyeSize - 2):
        r[i] = red / 10
        g[i] = green / 10
        b[i] = blue / 10
        for j in range(EyeSize):
            r[i + j] = red
            g[i + j] = green
            b[i + j] = blue
        r[i + EyeSize + 1] = red / 10
        g[i + EyeSize + 1] = green / 10
        b[i + EyeSize + 1] = blue / 10
        led.pixels = np.array([r, g, b])
        led.update(components)
        time.sleep(SpeedDelay / 1000)
    time.sleep(ReturnDelay / 1000)

    for i in range(config.N_PIXELS - EyeSize - 2, 0, -1):
        r[i] = red / 10
        g[i] = green / 10
        b[i] = blue / 10
        for j in range(EyeSize):
            r[i + j] = red
            g[i + j] = green
            b[i + j] = blue
        r[i + EyeSize + 1] = red / 10
        g[i + EyeSize + 1] = green / 10
        b[i + EyeSize + 1] = blue / 10
        led.update(components)
        time.sleep(SpeedDelay / 1000)

    time.sleep(ReturnDelay / 1000)
Ejemplo n.º 6
0
def microphone_update(audio_samples):
    global y_roll, prev_rms, prev_exp, prev_fps_update
    y = audio_samples / 2.0**15
    y_roll[:-1] = y_roll[1:]
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0).astype(np.float32)

    vol = np.max(np.abs(y_data))
    if vol < config.MIN_VOLUME_THRESHOLD:
        print('No audio input. Volume below threshold. Volume:', vol)
        led.pixels = np.tile(0, (3, config.N_PIXELS))
        led.update()
    else:
        N = len(y_data)
        N_zeros = 2**int(np.ceil(np.log2(N))) - N
        y_data *= fft_window
        y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
        YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
        mel = np.atleast_2d(YS).T * dsp.mel_y.T
        mel = np.sum(mel, axis=0)
        mel = mel**2.0
        mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
        mel /= mel_gain.value
        mel = mel_smoothing.update(mel)
        output = visualization_effect(mel)
        led.pixels = output
        led.update()
Ejemplo n.º 7
0
def update_leds_now():
    global updates_this_second
    global strip_0_pixels, strip_1_pixels
    global shift
    shift = (shift + 0.01) % 1.0
    updates_this_second += 1

    for pixel_array in [strip_0_pixels, strip_1_pixels]:  #really inefficient
        for num in range(0, 100):

            hsv = (colorsys.rgb_to_hsv(
                float(pixel_array[0][num]) / 255,
                float(pixel_array[1][num]) / 255,
                float(pixel_array[2][num]) / 255))
            pixel_array[0][num] = colorsys.hsv_to_rgb(
                (hsv[0] + shift) % 1.0, hsv[1], hsv[2])[0] * 255
            pixel_array[1][num] = colorsys.hsv_to_rgb(
                (hsv[0] + shift) % 1.0, hsv[1], hsv[2])[1] * 255
            pixel_array[2][num] = colorsys.hsv_to_rgb(
                (hsv[0] + shift) % 1.0, hsv[1], hsv[2])[2] * 255

    led.pixels[0][0:100] = np.flip(strip_0_pixels[0])
    led.pixels[1][0:100] = np.flip(strip_0_pixels[1])
    led.pixels[2][0:100] = np.flip(strip_0_pixels[2])
    led.pixels[0][100:200] = (strip_1_pixels[0])
    led.pixels[1][100:200] = (strip_1_pixels[1])
    led.pixels[2][100:200] = (strip_1_pixels[2])
    led.update()
Ejemplo n.º 8
0
def rainbow(wait_ms=20, iterations=1):
    for j in range(256 * iterations):
        for i in range(config.N_PIXELS):
            r[i], g[i], b[i] = wheel(
                round(((i * 256 / config.N_PIXELS) + j)) & 255)
        led.pixels = np.array([r, g, b])
        led.update(components)
        time.sleep(wait_ms / 1000.0)
Ejemplo n.º 9
0
def clear():
    global r, g, b
    for i in range(config.N_PIXELS):
        r[i] = 0
        g[i] = 0
        b[i] = 0
    led.pixels = np.array([r, g, b])
    led.update(components)
def microphone_update(audio_samples):
    global y_roll, prev_rms, prev_exp, prev_fps_update
    # Normalize samples between 0 and 1
    y = audio_samples / 2.0**15
    # Construct a rolling window of audio samples
    y_roll[:-1] = y_roll[1:]
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0).astype(np.float32)
    
    vol = np.max(np.abs(y_data))
    if vol < config.MIN_VOLUME_THRESHOLD:
        if config.USE_GUI:
            gui.label_error.setText("No audio input. Volume below threshold.")
        else:
            print("No audio input. Volume below threshold. Volume: {}".format(vol))
        visualizer.prev_output = np.multiply(visualizer.prev_output,0.95)
        led.pixels = visualizer.prev_output
        led.update()
    else:
        # Transform audio input into the frequency domain
        N = len(y_data)
        N_zeros = 2**int(np.ceil(np.log2(N))) - N
        # Pad with zeros until the next power of two
        y_data *= fft_window
        y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
        YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
        # Construct a Mel filterbank from the FFT data
        mel = np.atleast_2d(YS).T * dsp.mel_y.T
        # Scale data to values more suitable for visualization
        # mel = np.sum(mel, axis=0)
        mel = np.sum(mel, axis=0)
        mel = mel**2.0
        # Gain normalization
        mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
        mel /= mel_gain.value
        mel = mel_smoothing.update(mel)
        # Map filterbank output onto LED strip
        led.pixels = visualizer.get_vis(mel)
        led.update()
        if config.USE_GUI:
            # Plot filterbank output
            x = np.linspace(config.MIN_FREQUENCY, config.MAX_FREQUENCY, len(mel))
            gui.mel_curve.setData(x=x, y=fft_plot_filter.update(mel))
            gui.label_error.setText("")
    if config.USE_GUI:
        fps = frames_per_second()
        if time.time() - 0.5 > prev_fps_update:
            prev_fps_update = time.time()
        app.processEvents()
        # Plot the color channels
        gui.r_curve.setData(y=led.pixels[0])
        gui.g_curve.setData(y=led.pixels[1])
        gui.b_curve.setData(y=led.pixels[2])
        # Update fps counter
        gui.label_fps.setText('{:.0f} / {:.0f} FPS'.format(fps, config.FPS))
    if config.DISPLAY_FPS:
        print('FPS {:.0f} / {:.0f}'.format(fps, config.FPS))
Ejemplo n.º 11
0
def microphone_update(audio_samples):
    global y_roll, prev_rms, prev_exp, prev_fps_update, n_frame
    # Normalize samples between 0 and 1
    y = audio_samples / 32767.0
    # Construct a rolling window of audio samples
    y_roll[:-1] = y_roll[1:]
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0).astype(np.float32)

    vol = np.max(np.abs(y_data))
    if vol < config['MIN_VOLUME_THRESHOLD']:
        if config['TURN_OFF_ON_SILENCE']:
            led.pixels = np.tile(0, (4, config['N_PIXELS']))
            led.update(config)
    else:
        # Transform audio input into the frequency domain
        N = len(y_data)
        N_zeros = 2**int(np.ceil(np.log2(N))) - N
        # Pad with zeros until the next power of two
        y_data *= fft_window
        y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
        YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
        # Construct a Mel filterbank from the FFT data
        mel = np.atleast_2d(YS).T * dsp.mel_y.T
        # Scale data to values more suitable for visualization
        # mel = np.sum(mel, axis=0)
        mel = np.sum(mel, axis=0)
        mel = mel**2.0
        # Gain normalization
        mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
        mel /= mel_gain.value
        mel = mel_smoothing.update(mel)
        # Map filterbank output onto LED strip
        output = effects[config['SELECTED_VISUALIZATION'] % len(effects)](mel)
        led.pixels = output
        led.update(config)
        n_frame += 1
        if config['USE_GUI']:
            # Plot filterbank output
            x = np.linspace(config['MIN_FREQUENCY'], config['MAX_FREQUENCY'],
                            len(mel))
            mel_curve.setData(x=x, y=fft_plot_filter.update(mel))
            # Plot the color channels
            #            r_curve.setData(y=led.pixels[0])
            #            g_curve.setData(y=led.pixels[1])
            #            b_curve.setData(y=led.pixels[2])
            r_curve.setData(y=output[0])
            g_curve.setData(y=output[1])
            b_curve.setData(y=output[2])
    if config['USE_GUI']:
        app.processEvents()

    if config['DISPLAY_FPS']:
        fps = frames_per_second()
        if time.time() - 0.5 > prev_fps_update:
            prev_fps_update = time.time()
            print('FPS {:.0f} / {:.0f}'.format(fps, config['FPS']))
Ejemplo n.º 12
0
def colorWipe(rcolor, gcolor, bcolor, wait_ms=50):
    global r, g, b
    """Wipe color across display a pixel at a time."""
    for i in range(config.N_PIXELS):
        r[i] = rcolor
        g[i] = gcolor
        b[i] = bcolor
        led.pixels = np.array([r, g, b])
        led.update(components)
        time.sleep(wait_ms / 1000.0)
def microphone_update(audio_samples):
    global y_roll, prev_rms, prev_exp, prev_fps_update
    # Normalize samples between 0 and 1
    y = audio_samples / 2.0**15
    # Construct a rolling window of audio samples
    y_roll[:-1] = y_roll[1:]
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0).astype(np.float32)
    
    vol = np.max(np.abs(y_data))
    if vol < config.MIN_VOLUME_THRESHOLD:
        print('No audio input. Volume below threshold. Volume:', vol)
        led.pixels = np.tile(0, (3, config.N_PIXELS))
        led.update()
    else:
        # Transform audio input into the frequency domain
        N = len(y_data)
        N_zeros = 2**int(np.ceil(np.log2(N))) - N
        # Pad with zeros until the next power of two
        y_data *= fft_window
        y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
        YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
        # Construct a Mel filterbank from the FFT data
        mel = np.atleast_2d(YS).T * dsp.mel_y.T
        # Scale data to values more suitable for visualization
        # mel = np.sum(mel, axis=0)
        mel = np.sum(mel, axis=0)
        mel = mel**2.0
        # Gain normalization
        mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
        mel /= mel_gain.value
        mel = mel_smoothing.update(mel)
        # Map filterbank output onto LED strip
        output = visualization_effect(mel)
        led.pixels = output
        led.update()
        if config.USE_GUI:
            # Plot filterbank output
            x = np.linspace(config.MIN_FREQUENCY, config.MAX_FREQUENCY, len(mel))
            mel_curve.setData(x=x, y=fft_plot_filter.update(mel))
            # Plot the color channels
            r_curve.setData(y=led.pixels[0])
            g_curve.setData(y=led.pixels[1])
            b_curve.setData(y=led.pixels[2])
    if config.USE_GUI:
        app.processEvents()
    
    if config.DISPLAY_FPS:
        fps = frames_per_second()
        if time.time() - 0.5 > prev_fps_update:
            prev_fps_update = time.time()
            print('FPS {:.0f} / {:.0f}'.format(fps, config.FPS))
Ejemplo n.º 14
0
def start_everything():
    global _audio_reader, _light_streamer
    # Initialize LEDs to off
    led.pixels = np.tile(1, (3, config.N_PIXELS))
    led.update()
    # Create audio queue
    audio_queue = queue.Queue()
    _audio_reader = microphone.AudioReader(audio_queue, config.MIC_RATE,
                                           config.FPS)
    _light_streamer = LightStreamer(audio_queue)
    _audio_reader.start()
    _light_streamer.start()
    return
Ejemplo n.º 15
0
def theaterChaseRainbow(wait_ms=50):
    for j in range(256):
        for q in range(3):
            for i in range(0, config.N_PIXELS, 3):
                r[i + q], g[i + q], b[i + q] = wheel((i + j) % 255)
            led.pixels = np.array([r, g, b])
            led.update(components)
            time.sleep(wait_ms / 1000.0)
            for i in range(0, config.N_PIXELS, 3):
                r[i + q] = 0
                g[i + q] = 0
                b[i + q] = 0
            led.pixels = np.array([r, g, b])
Ejemplo n.º 16
0
def led_vis3(x):
    energy = np.mean(x**.5)
    pixels = np.tile(0.0, config.N_PIXELS)
    for i in range(N):
        E[i].update(energy)
        pixels[i] = hyperbolic_tan(max(energy / E[i].value - 1.0, 0))

    color = np.tile(0.0, (3, config.N_PIXELS))
    color[0, :] = pixels
    color[1, :] = pixels
    color[2, :] = pixels
    color = color.T * 255.0
    pixels_filt.update(color)
    led.pixels = np.round(pixels_filt.value).astype(int)
    led.update()
    return (color[:, 0] + color[:, 1] + color[:, 2]) / (3. * 255.0)
Ejemplo n.º 17
0
def theaterChase(rcolor, gcolor, bcolor, wait_ms):
    global r, g, b
    """Movie theater light style chaser animation."""
    for q in range(3):
        for i in range(0, config.N_PIXELS, 3):
            r[i + q] = rcolor
            g[i + q] = gcolor
            b[i + q] = bcolor
        led.pixels = np.array([r, g, b])
        led.update(components)
        time.sleep(wait_ms / 1000.0)
        for i in range(0, config.N_PIXELS, 3):
            r[i + q] = 0
            g[i + q] = 0
            b[i + q] = 0
        led.pixels = np.array([r, g, b])
Ejemplo n.º 18
0
def thunder():

    for i in range(config.N_PIXELS):
        r[i] = 255
        g[i] = 255
        b[i] = 153
        led.pixels = np.array([r, g, b])
        time.sleep(7 / 1000.0)
        led.update(components)

    for i in range(config.N_PIXELS):
        r[i] = 0
        g[i] = 0
        b[i] = 0
        led.pixels = np.array([r, g, b])
        time.sleep(7 / 1000.0)
        led.update(components)
Ejemplo n.º 19
0
def stop_everything():
    global _audio_reader, _light_streamer
    if _audio_reader and _audio_reader.is_alive():
        _audio_reader.shutdown()
        _audio_reader.join(1)
        _audio_reader = None
        pass
    if _light_streamer and _light_streamer.is_alive():
        _light_streamer.shutdown()
        _light_streamer.join(1)
        _light_streamer = None
        pass

    # Initialize LEDs to off
    led.pixels = np.tile(1, (3, config.N_PIXELS))
    led.update()
    return
Ejemplo n.º 20
0
def Fire(Cooling, Sparking, SpeedDelay):
    global heat
    for i in range(config.N_PIXELS):
        cooldown = randint(0, round(((Cooling * 10) / config.N_PIXELS) + 2))
        if cooldown > heat[i]:
            heat[i] = 0
        else:
            heat[i] = heat[i] - cooldown
    for k in range(config.N_PIXELS - 1, 2, -1):
        heat[k] = (heat[k - 1] + heat[k - 2] + heat[k - 2]) / 3
    if randint(0, 255) < Sparking:
        y = randint(0, 7)
        heat[y] = randint(160, 255)
    for j in range(config.N_PIXELS):
        print(heat[j])
        r[j], g[j], b[j] = setPixelHeatColor(heat[j])
    led.pixels = np.array([r, g, b])
    led.update(components)
    time.sleep(SpeedDelay / 1000)
Ejemplo n.º 21
0
def microphone_update(audio_samples):
    global y_roll, prev_rms, prev_exp, prev_fps_update
    # Normalize samples between 0 and 1
    y = audio_samples / 2.0**15
    # Construct a rolling window of audio samples
    y_roll[:-1] = y_roll[1:]
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0).astype(np.float32)

    vol = np.max(np.abs(y_data))
    if vol < config.MIN_VOLUME_THRESHOLD:
        logger.info('No audio input. Volume below threshold. Volume: %f' % vol)
        led.pixels = np.tile(0, (3, config.N_PIXELS))
        led.update()
        return
    # Transform audio input into the frequency domain
    N = len(y_data)
    N_zeros = 2**int(np.ceil(np.log2(N))) - N
    # Pad with zeros until the next power of two
    y_data *= fft_window
    y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
    YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
    # Construct a Mel filterbank from the FFT data
    mel = np.atleast_2d(YS).T * dsp.mel_y.T
    # Scale data to values more suitable for visualization
    # mel = np.sum(mel, axis=0)
    mel = np.sum(mel, axis=0)
    mel = mel**2.0
    # Gain normalization
    mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
    mel /= mel_gain.value
    mel = mel_smoothing.update(mel)
    # Map filterbank output onto LED strip
    output = visualization_effect(mel)
    led.pixels = output
    led.update()

    if config.DISPLAY_FPS:
        fps = frames_per_second()
        if time.time() - 0.5 > prev_fps_update:
            prev_fps_update = time.time()
            logger.info('FPS {:.0f} / {:.0f}'.format(fps, config.FPS))
    return
Ejemplo n.º 22
0
def snow():
    while True:
        for i in range(len(rain_current)):
            if rain_increasing[i]:
                if rain_current[i] >= rain_goal[i]:
                    rain_increasing[i] = False
                    rain_current[i] -= 10
                else:
                    rain_current[i] += 10
            else:
                if rain_current[i] != 0:
                    rain_current[i] -= 10
                else:
                    temp = randrange(50, 200)
                    if temp % 32 == 0:
                        rain_goal[i] = temp
                        rain_increasing[i] = True
        led.pixels = np.array([r, g, rain_current])
        led.update(components)
        time.sleep(50 / 1000.0)
Ejemplo n.º 23
0
    def microphone_update(audio_samples):
        global y_roll
        # Normalize samples between 0 and 1
        y = audio_samples / 2.0**15
        # Construct a rolling window of audio samples
        y_roll[:-1] = y_roll[1:]
        y_roll[-1, :] = np.copy(y)
        y_data = np.concatenate(y_roll, axis=0).astype(np.float32)

        vol = np.max(np.abs(y_data))
        if vol < config.MIN_VOLUME_THRESHOLD:
            print('No audio input. Volume below threshold. Volume:', vol)
            led.pixels = np.tile(0, (3, config.N_PIXELS))
            led.update()
        else:
            # Transform audio input into the frequency domain
            N = len(y_data)
            N_zeros = 2**int(np.ceil(np.log2(N))) - N
            # Pad with zeros until the next power of two
            y_data *= fft_window
            y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
            YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
            # Construct a Mel filterbank from the FFT data
            mel = np.atleast_2d(YS).T * dsp.mel_y.T
            # Scale data to values more suitable for visualization
            # mel = np.sum(mel, axis=0)
            mel = np.sum(mel, axis=0)
            mel = mel**2.0
            # Gain normalization
            mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
            mel /= mel_gain.value
            mel = mel_smoothing.update(mel)
            # Map filterbank output onto LED strip
            output = effect(mel)
            led.pixels = output
            led.update()
            if config.USE_GUI:
                gui.update(mel)
        if config.USE_GUI:
            app.processEvents()
def microphone_update(audio_samples):
    global samples_roll
    # Normalize samples between 0 and 1
    normalised_samples = audio_samples / 2.0**15
    # Construct a rolling window of audio samples
    samples_roll[:-1] = samples_roll[1:]
    samples_roll[-1, :] = np.copy(normalised_samples)
    sample_data = np.concatenate(samples_roll, axis=0).astype(np.float32)
    
    # Normalise Brightness from Amplitude
    vol = np.max(np.abs(sample_data))
    # Cancel noise when no sound is played
    if vol < 0.0001:
        vol = 0
        print('No audio input. Volume below threshold. Volume:', vol)
        led.pixels = np.tile(0, (3, config.N_PIXELS))
        led.update()
    else:
        # Transform audio input into the frequency domain
        N = len(sample_data)
        N_zeros = 2**int(np.ceil(np.log2(N))) - N # 2^11 - 1470
        # Pad with zeros until the next power of two
        sample_data *= fft_window
        sample_padded = np.pad(sample_data, (0, N_zeros), mode='constant')
        YS = np.abs(np.fft.rfft(sample_padded)[:N // 2])
        # Construct a Mel filterbank from the FFT data
        mel = np.atleast_2d(YS).T * dsp.mel_y.T
        # Scale data to values more suitable for visualization
        mel = np.sum(mel, axis=0)
        mel = mel**2.0
        # Gain normalization
        exp_filter.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
        mel /= exp_filter.value
        mel = mel_smoothing.update(mel)
        # Map filterbank output onto LED strip
        output = visualize_scroll(mel) 
        led.pixels = output
        led.update()
Ejemplo n.º 25
0
def visualize(y):
    y = np.copy(interpolate(y, config.N_PIXELS)) * 255.0
    # Blur the color channels with different strengths
    r = gaussian_filter1d(y, sigma=0.15)
    g = gaussian_filter1d(y, sigma=2.0)
    b = gaussian_filter1d(y, sigma=0.0)
    # Take the geometric mean of the raw and normalized histograms
    r = np.sqrt(r * normalize(r))
    g = np.sqrt(g * normalize(g))
    b = np.sqrt(b * normalize(b))
    # Update the low pass filters for each color channel
    r_filt.update(r)
    g_filt.update(g)
    b_filt.update(b)
    # Update the LED strip values
    led.pixels[:, 0] = r_filt.value
    led.pixels[:, 1] = g_filt.value
    led.pixels[:, 2] = b_filt.value
    # Update the GUI plots
    GUI.curve[0][0].setData(x=range(len(r_filt.value)), y=r_filt.value)
    GUI.curve[0][1].setData(x=range(len(g_filt.value)), y=g_filt.value)
    GUI.curve[0][2].setData(x=range(len(b_filt.value)), y=b_filt.value)
    led.update()
Ejemplo n.º 26
0
    def microphone_update(self, audio_samples):
        """
        Update the leds and gui with the rgb values taken from the audio sample
        """

        led.pixels, mel = self.audio_to_rgb(audio_samples)
        led.update()
        if self.configs['use_gui']:
            # Plot filterbank output
            x = np.linspace(self.configs['min_frequency'],
                            self.configs['max_frequency'], len(mel))
            mel_curve.setData(x=x, y=self.fft_plot_filter.update(mel))
            # Plot the color channels
            r_curve.setData(y=led.pixels[0])
            g_curve.setData(y=led.pixels[1])
            b_curve.setData(y=led.pixels[2])
        if self.configs['use_gui']:
            app.processEvents()

        if self.configs['display_fps']:
            fps = frames_per_second()
            if time.time() - 0.5 > self.prev_fps_update:
                self.prev_fps_update = time.time()
                print('FPS {:.0f} / {:.0f}'.format(fps, self.configs['fps']))
Ejemplo n.º 27
0
def radiate(beats, energy, beat_speed=.6, max_length=7, min_beats=1):
    N_beats = len(beats[beats == True])

    if N_beats > 0 and N_beats >= min_beats:
        index_to_color = rainbow()
        # Beat properties
        beat_power = float(N_beats) / config.N_SUBBANDS
        # energy = np.copy(energy)
        # energy -= np.min(energy)
        # energy /= (np.max(energy) - np.min(energy))
        beat_brightness = np.round(256.0 / config.N_SUBBANDS)
        beat_brightness *= np.sqrt(config.N_SUBBANDS / N_beats)
        beat_brightness *= 1.3
        beat_length = int(np.sqrt(beat_power) * max_length)
        beat_length = max(beat_length, 2)
        beat_pixels = np.tile(0.0, (config.N_PIXELS / 2, 3))
        for i in range(len(beats)):
            if beats[i]:
                beat_color = np.round(index_to_color[i] * beat_brightness *
                                      energy[i] / 2.0)
                beat_pixels[:beat_length] += beat_color
        beat_pixels = np.clip(beat_pixels, 0.0, 255.0)
        beat = Beat(beat_pixels, beat_speed)
        radiate.beats = np.append(radiate.beats, beat)

    # Pixels that will be displayed on the LED strip
    pixels = np.zeros((config.N_PIXELS / 2, 3))
    if len(radiate.beats):
        pixels += sum([b.pixels for b in radiate.beats])
    for b in radiate.beats:
        b.update_pixels()
    radiate.beats = [b for b in radiate.beats if not b.finished()]
    pixels = np.append(pixels[::-1], pixels, axis=0)
    pixels = np.clip(pixels, 0.0, 255.0)
    led.pixels = np.round(pixels).astype(int)
    led.update()
Ejemplo n.º 28
0
def led_vis2(x):
    energy = np.mean(x**.5)
    mean_energy.update(energy)
    energy = energy / mean_energy.value - 1.0
    edge = np.exp(-10 * np.linspace(0, 1, len(x)))
    edge = edge + edge[::-1]
    edge *= max(energy, 0)
    edge /= 2.0

    x = gaussian_filter1d(x, 3.0)
    x = update_leds_6(x)
    red = bloom_peaks(x**1.0, width=1, blur_factor=1.5)
    green = bloom_peaks(x**1.0, width=2, blur_factor=0.5)
    blue = bloom_peaks(x**1.0, width=1, blur_factor=0.5)
    # Set LEDs
    color = np.tile(0.0, (3, config.N_PIXELS))
    color[0, :] = 1.0 * edge + red * 1.0
    color[1, :] = 1.2 * edge + green * 1.0
    color[2, :] = 1.5 * edge + blue * 1.0
    color = color.T * 255.0
    pixels_filt.update(color)
    led.pixels = np.round(pixels_filt.value).astype(int)
    led.update()
    return (color[:, 0] + color[:, 1] + color[:, 2]) / (3. * 255.0)
            spectrum_label.setText('Spectrum', color=inactive_color)

        def spectrum_click(x):
            global visualization_effect
            visualization_effect = visualize_spectrum
            energy_label.setText('Energy', color=inactive_color)
            scroll_label.setText('Scroll', color=inactive_color)
            spectrum_label.setText('Spectrum', color=active_color)

        # Create effect "buttons" (labels with click event)
        energy_label = pg.LabelItem('Energy')
        scroll_label = pg.LabelItem('Scroll')
        spectrum_label = pg.LabelItem('Spectrum')
        energy_label.mousePressEvent = energy_click
        scroll_label.mousePressEvent = scroll_click
        spectrum_label.mousePressEvent = spectrum_click
        energy_click(0)
        # Layout
        layout.nextRow()
        layout.addItem(freq_label, colspan=3)
        layout.nextRow()
        layout.addItem(freq_slider, colspan=3)
        layout.nextRow()
        layout.addItem(energy_label)
        layout.addItem(scroll_label)
        layout.addItem(spectrum_label)
    # Initialize LEDs
    led.update()
    # Start listening to live audio stream
    microphone.start_stream(microphone_update)
Ejemplo n.º 30
0
def microphone_update(audio_samples):
    global y_roll, prev_rms, prev_exp, prev_fps_update, WheelPosition, silent_timeout, brigh
    # Normalize samples between 0 and 1
    y = audio_samples / 2.0**15
    # Construct a rolling window of audio samples
    y_roll[:-1] = y_roll[1:]
    y_roll[-1, :] = np.copy(y)
    y_data = np.concatenate(y_roll, axis=0).astype(np.float32)
    vol = np.max(np.abs(y_data))
    if vol < config.MIN_VOLUME_THRESHOLD:
        #print('No audio input. Volume below threshold. Volume:', vol)
        if WheelPosition < 256:
            r_fact,g_fact,b_fact = Wheel(WheelPosition)
            WheelPosition += 1
        else:
            WheelPosition = 0
            r_fact,g_fact,b_fact = Wheel(WheelPosition)
        r = np.ones((60,), dtype=int)*r_fact*brigh/400
        g = np.ones((60,), dtype=int)*g_fact*brigh/400
        b = np.ones((60,), dtype=int)*b_fact*brigh/400
        led.pixels = np.array([r,g,b])
        led.update()

        if silent_timeout > 0:
            silent_timeout -= 1
        elif brigh < 400:
            brigh += 1

    else:
        silent_timeout = 40
        brigh = 0
        # Transform audio input into the frequency domain
        N = len(y_data)
        N_zeros = 2**int(np.ceil(np.log2(N))) - N
        # Pad with zeros until the next power of two
        y_data *= fft_window
        y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
        YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
        # Construct a Mel filterbank from the FFT data
        mel = np.atleast_2d(YS).T * dsp.mel_y.T
        # Scale data to values more suitable for visualization
        # mel = np.sum(mel, axis=0)
        mel = np.sum(mel, axis=0)
        mel = mel**2.0
        # Gain normalization
        mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
        mel /= mel_gain.value
        mel = mel_smoothing.update(mel)
        # Map filterbank output onto LED strip
        output = visualization_effect(mel)
        led.pixels = output
        led.update()
        if config.USE_GUI:
            # Plot filterbank output
            x = np.linspace(config.MIN_FREQUENCY, config.MAX_FREQUENCY, len(mel))
            mel_curve.setData(x=x, y=fft_plot_filter.update(mel))
            # Plot the color channels
            r_curve.setData(y=led.pixels[0])
            g_curve.setData(y=led.pixels[1])
            b_curve.setData(y=led.pixels[2])
    if config.USE_GUI:
        app.processEvents()
    
    if config.DISPLAY_FPS:
        fps = frames_per_second()
        if time.time() - 0.5 > prev_fps_update:
            prev_fps_update = time.time()
            print('FPS {:.0f} / {:.0f}'.format(fps, config.FPS))
            energy_label.setText('Energy', color=inactive_color)
            scroll_label.setText('Scroll', color=active_color)
            spectrum_label.setText('Spectrum', color=inactive_color)
        def spectrum_click(x):
            global visualization_effect
            visualization_effect = visualize_spectrum
            energy_label.setText('Energy', color=inactive_color)
            scroll_label.setText('Scroll', color=inactive_color)
            spectrum_label.setText('Spectrum', color=active_color)
        # Create effect "buttons" (labels with click event)
        energy_label = pg.LabelItem('Energy')
        scroll_label = pg.LabelItem('Scroll')
        spectrum_label = pg.LabelItem('Spectrum')
        energy_label.mousePressEvent = energy_click
        scroll_label.mousePressEvent = scroll_click
        spectrum_label.mousePressEvent = spectrum_click
        energy_click(0)
        # Layout
        layout.nextRow()
        layout.addItem(freq_label, colspan=3)
        layout.nextRow()
        layout.addItem(freq_slider, colspan=3)
        layout.nextRow()
        layout.addItem(energy_label)
        layout.addItem(scroll_label)
        layout.addItem(spectrum_label)
    # Initialize LEDs
    led.update()
    # Start listening to live audio stream
    microphone.start_stream(microphone_update)
def visualization_start():
    # Initialize LEDs
    led.update()
    # Start listening to live audio stream
    microphone.start_stream(microphone_update)