from ulab import numpy as np from ulab import scipy as sp SIZE = 512//4 raw_buf = None fb = image.Image(SIZE+50, SIZE, image.RGB565, copy_to_fb=True) audio.init(channels=2, frequency=16000, gain=24, highpass=0.9883) def audio_callback(buf): # NOTE: do Not call any function that allocates memory. global raw_buf if (raw_buf == None): raw_buf = buf # Start audio streaming audio.start_streaming(audio_callback) def draw_fft(img, fft_buf): fft_buf = (fft_buf / max(fft_buf)) * SIZE fft_buf = np.log10(fft_buf + 1) * 20 color = (0xFF, 0x0F, 0x00) for i in range(0, SIZE): img.draw_line(i, SIZE, i, SIZE-int(fft_buf[i]), color, 1) def draw_audio_bar(img, level, offset): blk_size = SIZE//10 color = (0xFF, 0x00, 0xF0) blk_space = (blk_size//4) for i in range(0, int(round(level/10))): fb.draw_rectangle(SIZE+offset, SIZE - ((i+1)*blk_size) + blk_space, 20, blk_size - blk_space, color, 1, True)
import audio, time, tf, micro_speech, pyb labels = ['Silence', 'Unknown', 'Yes', 'No'] led_red = pyb.LED(1) led_green = pyb.LED(2) model = tf.load('/model.tflite') speech = micro_speech.MicroSpeech() audio.init(channels=1, frequency=16000, gain=24, highpass=0.9883) # Start audio streaming audio.start_streaming(speech.audio_callback) while (True): # Run micro-speech without a timeout and filter detections by label index. idx = speech.listen(model, timeout=0, threshold=0.78, filter=[2, 3]) led = led_green if idx == 2 else led_red print(labels[idx]) for i in range(0, 4): led.on() time.sleep_ms(25) led.off() time.sleep_ms(25) # Stop streaming audio.stop_streaming()