Esempio n. 1
0
        stft.analysis(audio)
        stft.process()

        # Send audio back to the browser
        audio[:, 0] = np.sum(stft.synthesis(), axis=1)
        audio[:, 1] = audio[:, 0]
        audio[:, 2:] = 0
        browserinterface.send_audio(audio)

    else:  # continue estimating covariance matrix
        bf.estimate_cov(audio)


"""Interface features"""
browserinterface.register_when_new_config(init)
browserinterface.register_handle_data(beamform_audio)
polar_chart = browserinterface.add_handler(name="Beam pattern",
                                           type='base:polar:line',
                                           parameters={
                                               'title': 'Beam pattern',
                                               'series': ['Intensity'],
                                               'numPoints': num_angles
                                           })
"""START"""
browserinterface.start()
browserinterface.change_config(buffer_frames=buffer_size,
                               channels=num_channels,
                               rate=sampling_freq,
                               volume=80)
browserinterface.loop_callbacks()
Esempio n. 2
0
        print("Did not receive expected audio!")
        return

    # apply stft and istft for a few windows

    stft.analysis(audio[:,0])
    spectrum = (20 * np.log10(np.abs(stft.X[:height])))
    spectrogram.send_data(spectrum.tolist())


    if led_ring:
        numpix = led_ring.num_pixels // 2
        ma_len = int(np.floor(spectrum.shape[0] / numpix))
        spec = np.convolve(np.ones(ma_len) / ma_len, spectrum)[ma_len//2::ma_len]
        spec2 = np.concatenate((spec[:numpix], spec[numpix::-1]))
        led_ring.lightify(vals=spec2)


"""Interface functions"""
browserinterface.register_when_new_config(init)
browserinterface.register_handle_data(handle_data)

spectrogram = browserinterface.add_handler(name="Heat Map", type='base:spectrogram',
        parameters={'width': width, 'height': height, 'min': min_val, 'max': max_val, 'delta_freq': sampling_freq / fft_size})

"""START"""
browserinterface.start()
browserinterface.change_config(channels=num_channels, buffer_frames=buffer_size, volume=80, rate=sampling_freq)
browserinterface.loop_callbacks()

Esempio n. 3
0
    light_spectrum(spectrum_before)

    frame_num += 1


"""Interface functions"""
browserinterface.register_when_new_config(init)
browserinterface.register_handle_data(handle_data)
if viz:
    time_plot = browserinterface.add_handler(
        "Time domain", 'base:graph:line', {
            'xName': 'Duration',
            'min': -1,
            'max': 1,
            'xLimitNb': (sampling_freq / under * num_sec),
            'series': [{
                'name': 'Signal',
                'color': 'blue'
            }]
        })
    c_magnitude = browserinterface.add_handler(
        "Frequency Magnitude", 'base:graph:line', {
            'min': 0,
            'max': 250,
            'xName': 'Frequency',
            'series': [{
                'name': '1'
            }, {
                'name': '2'
            }]
        if led_ring: led_ring.lightify_mono(rgb=[255, 0, 0], realtime=True)
    frame_num += 1


"""Interface features"""
browserinterface.register_when_new_config(when_config)
browserinterface.register_handle_data(apply_vad)
chart = browserinterface.add_handler(
    "Speech Detection", 'base:graph:line', {
        'xName':
        'Duration',
        'min':
        -10000,
        'max':
        10000,
        'xLimitNb': (sampling_freq / under * num_sec),
        'series': [{
            'name': 'Voice',
            'color': 'green'
        }, {
            'name': 'Unvoiced',
            'color': 'red'
        }]
    })
"""START"""
browserinterface.start()
browserinterface.change_config(channels=num_channels,
                               buffer_frames=buffer_size,
                               rate=sampling_freq,
                               volume=80)
browserinterface.loop_callbacks()