def color_cycle(): led.pixels *= 0 #led.pixels = np.stack(np.tile(255, (1, config.N_PIXELS)), np.tile(0, (3, config.N_PIXELS))) a = np.zeros(shape=(3, config.N_PIXELS)) led.pixels = np.append(a, np.tile(255, (1, config.N_PIXELS)), axis=0) while True: led.pixels = np.roll(led.pixels, 1, axis=0) led.update() time.sleep(1)
def microphone_update(audio_samples): global y_roll, prev_rms, prev_exp, prev_fps_update # Normalize samples between 0 and 1 y = audio_samples / 2.0**15 # Construct a rolling window of audio samples y_roll[:-1] = y_roll[1:] y_roll[-1, :] = np.copy(y) y_data = np.concatenate(y_roll, axis=0).astype(np.float32) vol = np.max(np.abs(y_data)) if vol < config.MIN_VOLUME_THRESHOLD: #print('No audio input. Volume below threshold. Volume:', vol) #led.pixels = np.tile(0, (4, config.N_PIXELS)) led.pixels *= 0 #led.pixels = np.zeros(shape=(4, config.N_PIXELS), dtype=np.int8) led.update() else: # Transform audio input into the frequency domain N = len(y_data) N_zeros = 2**int(np.ceil(np.log2(N))) - N # Pad with zeros until the next power of two y_data *= fft_window #detect_bpm(y_data) y_padded = np.pad(y_data, (0, N_zeros), mode='constant') YS = np.abs(np.fft.rfft(y_padded)[:N // 2]) # Construct a Mel filterbank from the FFT data mel = np.atleast_2d(YS).T * dsp.mel_y.T # Scale data to values more suitable for visualization # mel = np.sum(mel, axis=0) mel = np.sum(mel, axis=0) mel = mel**2.0 # Gain normalization mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0))) mel /= mel_gain.value mel = mel_smoothing.update(mel) # Map filterbank output onto LED strip output = visualization_effect(mel) led.pixels = output led.update() if config.USE_GUI: # Plot filterbank output x = np.linspace(config.MIN_FREQUENCY, config.MAX_FREQUENCY, len(mel)) mel_curve.setData(x=x, y=fft_plot_filter.update(mel)) # Plot the color channels r_curve.setData(y=led.pixels[0]) g_curve.setData(y=led.pixels[1]) b_curve.setData(y=led.pixels[2]) if config.USE_GUI: app.processEvents() if config.DISPLAY_FPS: fps = frames_per_second() if time.time() - 0.5 > prev_fps_update: prev_fps_update = time.time() print('FPS {:.0f} / {:.0f}'.format(fps, config.FPS))
def single_color(color='white'): rgb = askcolor(title="Tkinter Color Chooser") a = np.empty((0, config.N_PIXELS), int) a = np.append(a, np.tile(int(rgb[0][0]), (1, config.N_PIXELS)), axis=0) a = np.append(a, np.tile(int(rgb[0][1]), (1, config.N_PIXELS)), axis=0) a = np.append(a, np.tile(int(rgb[0][2]), (1, config.N_PIXELS)), axis=0) a = np.append(a, np.tile(0, (1, config.N_PIXELS)), axis=0) led.pixels = a led.update() time.sleep(1) led.update()
def roll(): single_color_full_bri = np.zeros # Turn all pixels off led.pixels *= 0 led.pixels[0, 0] = 255 # Set 1st pixel red led.pixels[1, 1] = 255 # Set 2nd pixel green led.pixels[2, 2] = 255 # Set 3rd pixel blue led.pixels[3, 3] = 255 # Set 3rd pixel white print('Starting LED strand test') while True: led.pixels = np.roll(led.pixels, 1, axis=1) led.update() time.sleep(0.01)
energy_label.setText('Energy', color=inactive_color) scroll_label.setText('Scroll', color=inactive_color) spectrum_label.setText('Spectrum', color=inactive_color) bpm_label.setText('BPM', color=active_color) # Create effect "buttons" (labels with click event) energy_label = pg.LabelItem('Energy') scroll_label = pg.LabelItem('Scroll') spectrum_label = pg.LabelItem('Spectrum') bpm_label = pg.LabelItem('BPM') energy_label.mousePressEvent = energy_click scroll_label.mousePressEvent = scroll_click spectrum_label.mousePressEvent = spectrum_click bpm_label.mousePressEvent = bpm_click energy_click(0) # Layout layout.nextRow() layout.addItem(freq_label, colspan=4) layout.nextRow() layout.addItem(freq_slider, colspan=4) layout.nextRow() layout.addItem(energy_label) layout.addItem(scroll_label) layout.addItem(spectrum_label) layout.addItem(bpm_label) # Initialize LEDs led.update() # Start listening to live audio stream microphone.start_stream(microphone_update)
def clear_all(): led.pixels *= 0 led.update()