def __init__(self): # Allocating Data self.data = collections.deque([0.] * length, maxlen=length) self.wnd = np.array(window.hamming(length)) # For FFT # TODO: Maybe break into an object self.lastWhistleTime = None self.group = [] self.th = None self.finishedUpdate = False # Define the rospy publisher self.pub = rospy.Publisher("/voice", String, queue_size=10) # Setting up parameters # TODO : define rospy.get_param('~..') for these self.searchFreq = 100 # Hz; minimum time between checking for whistles. self.minAmp = 0.0001 # Minimum absolute amplitude of a whistle self.minSharp = 0.0001 # Minimum "sharpness" of a whistle self.maxSlope = 0.02 # Log hz per second; beyond this it's considered two separate whistles self.slopeAvgLen = 5 # Number of samples to average the slope self.minWhistleLen = 0.3 # seconds; below this, whistles are not processed. Expect strange behavior if == 0. self.maxVariance = 0.5 # Maximum variance in frequency for a whistle self.tonicResetLen = 2 # Seconds to reset the scale self.maxGroupSpacing = 3 # Seconds between whistles for a group
def partial(): smix.add(octave_duration, partial_cached()) # Next track/partial event # Octave-based frequency values sequence scale = 2 ** line(duration, finish=True) partial_freq = (scale - 1) * (max_freq - min_freq) + min_freq # Envelope to "hide" the partial beginning/ending env = [k ** 2 for k in window.hamming(int(round(duration)))] # The generator, properly: for el in env * sinusoid(partial_freq) / noctaves: data.append(el) yield el
def partial(): smix.add(octave_duration, partial_cached()) # Next track/partial event # Octave-based frequency values sequence scale = 2**line(duration, finish=True) partial_freq = (scale - 1) * (max_freq - min_freq) + min_freq # Envelope to "hide" the partial beginning/ending env = [k**2 for k in window.hamming(int(round(duration)))] # The generator, properly: for el in env * sinusoid(partial_freq) / noctaves: data.append(el) yield el
from __future__ import division from audiolazy import sHz, chunks, AudioIO, line, pi, window from matplotlib import pyplot as plt from matplotlib.animation import FuncAnimation from numpy.fft import rfft import numpy as np import collections, sys, threading # AudioLazy init rate = 44100 s, Hz = sHz(rate) ms = 1e-3 * s length = 2**12 data = collections.deque([0.] * length, maxlen=length) wnd = np.array(window.hamming(length)) # For FFT api = sys.argv[1] if sys.argv[1:] else None # Choose API via command-line chunks.size = 1 if api == "jack" else 16 # Creates a data updater callback def update_data(): with AudioIO(api=api) as rec: for el in rec.record(rate=rate): data.append(el) if update_data.finish: break # Creates the data updater thread
from __future__ import division from audiolazy import sHz, chunks, AudioIO, line, pi, window from matplotlib import pyplot as plt from matplotlib.animation import FuncAnimation from numpy.fft import rfft import numpy as np import collections, sys, threading # AudioLazy init rate = 44100 s, Hz = sHz(rate) ms = 1e-3 * s length = 2 ** 12 data = collections.deque([0.] * length, maxlen=length) wnd = np.array(window.hamming(length)) # For FFT api = sys.argv[1] if sys.argv[1:] else None # Choose API via command-line chunks.size = 1 if api == "jack" else 16 # Creates a data updater callback def update_data(): with AudioIO(api=api) as rec: for el in rec.record(rate=rate): data.append(el) if update_data.finish: break # Creates the data updater thread update_data.finish = False th = threading.Thread(target=update_data)