def partial(): smix.add(octave_duration, partial_cached()) # Next track/partial event # Octave-based frequency values sequence scale = 2**line(duration, finish=True) partial_freq = (scale - 1) * (max_freq - min_freq) + min_freq # Envelope to "hide" the partial beginning/ending env = [k**2 for k in window.hamming(int(round(duration)))] # The generator, properly: for el in env * sinusoid(partial_freq) / noctaves: data.append(el) yield el
def partial(): smix.add(octave_duration, partial_cached()) # Next track/partial event # Octave-based frequency values sequence scale = 2 ** line(duration, finish=True) partial_freq = (scale - 1) * (max_freq - min_freq) + min_freq # Envelope to "hide" the partial beginning/ending env = [k ** 2 for k in window.hamming(int(round(duration)))] # The generator, properly: for el in env * sinusoid(partial_freq) / noctaves: data.append(el) yield el
def muda_filtro(self, novos_filtros, window): """ Muda o filtro aplicado, garantindo que não haja um "click" ao fazer isso """ #self.chamando = True novo_filtro = CascadeFilter(novos_filtros) last = self.stream.last self.stream.limit(0).append(line(self.release,last,0)) self.stream = ChangeableStream(novo_filtro(self.input)) self.stream.last = last self.streamix.add(0, self.stream)
def on_key_down(evt): # Ignores key up if it came together with a key down (debounce) global has_after if has_after: tk.after_cancel(has_after) has_after = None ch = evt.char if not ch in cstreams and ch in notes: # Prepares the synth freq = notes[ch] cs = ChangeableStream(level) env = line(attack, 0, level).append(cs) snd = env * synth(freq * Hz) # Mix it, storing the ChangeableStream to be changed afterwards cstreams[ch] = cs smix.add(0, snd)
gain = remain * pamp out.add(dur / copies, sig * gain) remain -= gain return out # # Audio mixture # tracks = 3 # besides unpitched track dur_note = 120 * ms dur_perc = 100 * ms smix = Streamix() # Pitched tracks based on a 1:2 triangular wave table = TableLookup(line(100, -1, 1).append(line(200, 1, -1)).take(inf)) for track in xrange(tracks): env = adsr(dur_note, a=20 * ms, d=10 * ms, s=0.8, r=30 * ms) / 1.7 / tracks smix.add(0, geometric_delay(new_note_track(env, table), 80 * ms, 2)) # Unpitched tracks pfuncs = [unpitched_low] * 4 + [unpitched_high] snd = chain.from_iterable(choice(pfuncs)(dur_perc, randint(0, 1)) for unused in zeros()) smix.add(0, geometric_delay(snd * (1 - 1 / 1.7), 20 * ms, 1)) # # Finishes (save in a wave file) # data = lowpass(5000 * Hz)(smix).limit(180 * s) fname = "audiolazy_save_and_memoize_synth.wav"
with AudioIO(api=api) as rec: for el in rec.record(rate=rate): data.append(el) if update_data.finish: break # Creates the data updater thread update_data.finish = False th = threading.Thread(target=update_data) th.start() # Already start updating data # Plot setup fig = plt.figure("AudioLazy in a Matplotlib animation", facecolor='#cccccc') time_values = np.array(list(line(length, -length / ms, 0))) time_ax = plt.subplot(2, 1, 1, xlim=(time_values[0], time_values[-1]), ylim=(-1., 1.), axisbg="black") time_ax.set_xlabel("Time (ms)") time_plot_line = time_ax.plot([], [], linewidth=2, color="#00aaff")[0] dft_max_min, dft_max_max = .01, 1. freq_values = np.array(line(length, 0, 2 * pi / Hz).take(length // 2 + 1)) freq_ax = plt.subplot(2, 1, 2, xlim=(freq_values[0], freq_values[-1]),
def on_key_up_process(evt): ch = evt.char if ch in cstreams: cstreams[ch].limit(0).append(line(release, level, 0)) del cstreams[ch]
def update_data(): with AudioIO(api=api) as rec: for el in rec.record(rate=rate): data.append(el) if update_data.finish: break # Creates the data updater thread update_data.finish = False th = threading.Thread(target=update_data) th.start() # Already start updating data # Plot setup fig = plt.figure("AudioLazy in a Matplotlib animation", facecolor='#cccccc') time_values = np.array(list(line(length, -length / ms, 0))) time_ax = plt.subplot(2, 1, 1, xlim=(time_values[0], time_values[-1]), ylim=(-1., 1.), axisbg="black") time_ax.set_xlabel("Time (ms)") time_plot_line = time_ax.plot([], [], linewidth=2, color="#00aaff")[0] dft_max_min, dft_max_max = .01, 1. freq_values = np.array(line(length, 0, 2 * pi / Hz).take(length // 2 + 1)) freq_ax = plt.subplot(2, 1, 2, xlim=(freq_values[0], freq_values[-1]), ylim=(0., .5 * (dft_max_max + dft_max_min)), axisbg="black") freq_ax.set_xlabel("Frequency (Hz)") freq_plot_line = freq_ax.plot([], [], linewidth=2, color="#00aaff")[0]
# # Created on Wed May 21 19:57:40 2014 # danilo [dot] bellini [at] gmail [dot] com """ Plots ISO/FDIS 226:2003 equal loudness contour curves This is based on figure A.1 of ISO226, and needs Scipy and Matplotlib """ from __future__ import division from audiolazy import exp, line, ln, phon2dB, xrange import pylab title = "ISO226 equal loudness curves" freqs = list(exp(line(2048, ln(20), ln(12500), finish=True))) pylab.figure(title, figsize=[8, 4.5], dpi=120) # Plots threshold freq2dB_threshold = phon2dB.iso226(None) # Threshold pylab.plot(freqs, freq2dB_threshold(freqs), color="blue", linestyle="--") pylab.text(300, 5, "Hearing threshold", fontsize=8, horizontalalignment="right") # Plots 20 to 80 phons for loudness in xrange(20, 81, 10): # in phons freq2dB = phon2dB.iso226(loudness) pylab.plot(freqs, freq2dB(freqs), color="black")
def run(self): self.start() # Set up some variables preserved between search iterations whistle = [] # This whistle so far logWhistle = [] whistleLen = 0 # The whistle's length, in seconds lastTime = time.clock() tonic = 0 # Freq, in Hz, of the tonic note off of which other whistles will be judged try: # Catch ctrl-c nicely. May no longer be needed with rospy. while not rospy.is_shutdown(): #TODO: make sure same self.data is not processed twice array_data = np.array(self.data) spectrum = np.abs(np.fft.rfft(array_data * self.wnd)) / length freqs = np.array( line(length, 0, 2 * pi / Hz).take(length // 2 + 1)) # The first freq is 0, the last freq value is half of rate, and there are length/2 + 1 of them (so the # index of the last is length/2). So, each freq is about rate/length * its index (+/- 1 ?) lower_bound = 400 # Lowest freq (Hz) to look for lower_bound_i = int((lower_bound * length) / rate) spectrum = spectrum[lower_bound_i:] freqs = freqs[lower_bound_i:] kernel = (-0.1, -0.2, 0.6, -0.2, -0.1) d = np.convolve( spectrum, kernel )[2: -2] # trim so that the elements still correspond to the freqs maxi = np.argmax(d) freq = freqs[maxi] sharpness = d[maxi] amplitude = spectrum[maxi] # Do timing stuff just before we record the time, to prevent off-by-1ish thisTime = time.clock() timeSinceLast = thisTime - lastTime lastTime = thisTime if self.minAmp < amplitude and self.minSharp < sharpness: whistle += [freq] logWhistle += [np.log(freq)] whistleLen += timeSinceLast whistleAvg = np.exp( np.mean(logWhistle) ) # Take the log mean to find the cental frequency if self.tonicResetLen <= whistleLen: if np.var(logWhistle) < self.maxVariance: tonic = whistleAvg rospy.loginfo("Reset tonic to " + str(tonic) + " Hz") else: rospy.loginfo("Variance too high") else: if self.slopeAvgLen + 1 <= len(whistle): diffs = np.subtract( logWhistle[-self.slopeAvgLen:], logWhistle[-self.slopeAvgLen - 1:-1]) if np.sqrt(np.mean( np.square(diffs))) <= self.maxSlope: pass else: if self.minWhistleLen <= whistleLen < self.tonicResetLen: cmd = self.processWhistle( whistle, whistleLen, tonic, whistleAvg) if cmd: #print('cmd', cmd) rospy.loginfo( "Sent voice command: \"" + cmd + "\"") self.pub.publish(cmd) self.group = [] else: rospy.logerr( "Failed to parse whistles.") whistle = [] logWhistle = [] whistleLen = 0 else: # if amplitude or sharpness is too low pass # TODO: What is this doing here? # if self.minWhistleLen <= whistleLen < self.tonicResetLen: # processWhistle(whistle, whistleLen, tonic) whistle = [] logWhistle = [] whistleLen = 0 #Try to keep times as close to self.searchFreq as possible if timeSinceLast < (1 / self.searchFreq): time.sleep((1 / self.searchFreq) - timeSinceLast) except KeyboardInterrupt: pass self.stop()
# # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Plots ISO/FDIS 226:2003 equal loudness contour curves This is based on figure A.1 of ISO226, and needs Scipy and Matplotlib """ from __future__ import division from audiolazy import exp, line, ln, phon2dB, xrange import pylab title = "ISO226 equal loudness curves" freqs = list(exp(line(2048, ln(20), ln(12500), finish=True))) pylab.figure(title, figsize=[8, 4.5], dpi=120) # Plots threshold freq2dB_threshold = phon2dB.iso226(None) # Threshold pylab.plot(freqs, freq2dB_threshold(freqs), color="blue", linestyle="--") pylab.text(300, 5, "Hearing threshold", fontsize=8, horizontalalignment="right") # Plots 20 to 80 phons for loudness in xrange(20, 81, 10): # in phons freq2dB = phon2dB.iso226(loudness) pylab.plot(freqs, freq2dB(freqs), color="black") pylab.text(850, loudness + 2, "%d phon" % loudness, fontsize=8, horizontalalignment="center")
gain = remain * pamp out.add(dur / copies, sig * gain) remain -= gain return out # # Audio mixture # tracks = 3 # besides unpitched track dur_note = 120 * ms dur_perc = 100 * ms smix = Streamix() # Pitched tracks based on a 1:2 triangular wave table = TableLookup(line(100, -1, 1).append(line(200, 1, -1)).take(inf)) for track in xrange(tracks): env = adsr(dur_note, a=20 * ms, d=10 * ms, s=.8, r=30 * ms) / 1.7 / tracks smix.add(0, geometric_delay(new_note_track(env, table), 80 * ms, 2)) # Unpitched tracks pfuncs = [unpitched_low] * 4 + [unpitched_high] snd = chain.from_iterable( choice(pfuncs)(dur_perc, randint(0, 1)) for unused in zeros()) smix.add(0, geometric_delay(snd * (1 - 1 / 1.7), 20 * ms, 1)) # # Finishes (save in a wave file) # data = lowpass(5000 * Hz)(smix).limit(180 * s) fname = "audiolazy_save_and_memoize_synth.wav"
def scalloping_loss(wnd): """ Positive number with the scalloping loss in dB. """ return -dB20(abs(sum(wnd * cexp(line(len(wnd), 0, -1j * pi)))) / sum(wnd))
} table.append([to_string(wnd_data[k]) for k in schema]) wnd_symm = wnd + [wnd[0]] full_spectrum = np.hstack([spectrum[::-1], spectrum[1:-1]]) - spectrum[0] smallest_peak_idx = min(get_peaks(spectrum), key=spectrum.__getitem__) ymin = (spectrum[smallest_peak_idx] - spectrum[0] - 5) // 10 * 10 fig, (time_ax, freq_ax) = plt.subplots(2, 1, num=name) time_ax.vlines(np.arange(-size // 2, size // 2 + 1), 0, wnd_symm) time_ax.set(xlim=(-(size // 2), size // 2), ylim=(-.1, 1.1), xlabel="Time (samples)", title=name) freq_ax.plot(list(line(full_size, -1, 1)), full_spectrum) freq_ax.set(xlim=(-1, 1), ylim=(ymin, 0), ylabel="dB", xlabel="Frequency (% of the Nyquist frequency)") fig.tight_layout() # Prints the table and other text contents print(__doc__) print(""" Schema ------ """) for row in rst_table([(v, schema_full[k]) for k, v in iteritems(schema)], ["Column", "Description"]): print(row)
"ol75": overlap_correlation(wnd_full, .25 * full_size) * 100, "ol50": overlap_correlation(wnd_full, .5 * full_size) * 100, } table.append([to_string(wnd_data[k]) for k in schema]) wnd_symm = wnd + [wnd[0]] full_spectrum = np.hstack([spectrum[::-1], spectrum[1:-1]]) - spectrum[0] smallest_peak_idx = min(get_peaks(spectrum), key=spectrum.__getitem__) ymin = (spectrum[smallest_peak_idx] - spectrum[0] - 5) // 10 * 10 fig, (time_ax, freq_ax) = plt.subplots(2, 1, num=name) time_ax.vlines(np.arange(- size // 2, size // 2 + 1), 0, wnd_symm) time_ax.set(xlim=(-(size // 2), size // 2), ylim=(-.1, 1.1), xlabel="Time (samples)", title=name) freq_ax.plot(list(line(full_size, -1, 1)), full_spectrum) freq_ax.set(xlim=(-1, 1), ylim=(ymin, 0), ylabel="dB", xlabel="Frequency (% of the Nyquist frequency)") fig.tight_layout() # Prints the table and other text contents print(__doc__) print(""" Schema ------ """) for row in rst_table([(v, schema_full[k]) for k, v in iteritems(schema)], ["Column", "Description"]): print(row) print(""" Windows and Figures of Merit