def player(self, value): # Also initialize playing thread self._player = value self.volume_ctrl = ControlStream(.2) self.carrier_ctrl = ControlStream(220) self.mod_ctrl = ControlStream(440) sound = sinusoid(freq=self.carrier_ctrl * Hz, phase=sinusoid(self.mod_ctrl * Hz)) * self.volume_ctrl self.playing_thread = player.play(sound)
def __init__(self, parent): frame_style = ( wx.FRAME_SHAPED | # Allows wx.SetShape wx.FRAME_NO_TASKBAR | wx.STAY_ON_TOP | wx.NO_BORDER) super(McFMFrame, self).__init__(parent, style=frame_style) self.Bind(wx.EVT_ERASE_BACKGROUND, lambda evt: None) self._paint_width, self._paint_height = 0, 0 # Ensure update_sizes at # first on_paint self.ClientSize = (FIRST_WIDTH, FIRST_HEIGHT) self.Bind(wx.EVT_PAINT, self.on_paint) self._draw_timer = wx.Timer(self) self.Bind(wx.EVT_TIMER, self.on_draw_timer, self._draw_timer) self.on_draw_timer() self.angstep = ControlStream(pi / 90) self.rotstream = modulo_counter(modulo=2 * pi, step=self.angstep) self.rotation_data = iter(self.rotstream)
} # Initialization rate = 44100 s, Hz = sHz(rate) inertia_dur = 1 * s inertia_filter = maverage(rint(inertia_dur)) api = sys.argv[1] if sys.argv[1:] else None # Choose API via command-line chunks.size = 1 if api == "jack" else 16 with AudioIO() as player: first_coeffs = formants[vowels[0]] # These are signals to be changed during the synthesis f1 = ControlStream(first_coeffs[0] * Hz) f2 = ControlStream(first_coeffs[1] * Hz) gain = ControlStream(0) # For fading in # Creates the playing signal filt = CascadeFilter([ resonator.z_exp(inertia_filter(f1).skip(inertia_dur), 400 * Hz), resonator.z_exp(inertia_filter(f2).skip(inertia_dur), 2000 * Hz), ]) sig = filt((saw_table)(100 * Hz)) * inertia_filter(gain) th = player.play(sig) for vowel in vowels: coeffs = formants[vowel] print("Now playing: ", vowel) f1.value = coeffs[0] * Hz
"a": [850, 1610], "æ": [820, 1530], "ɑ": [750, 940], "ɒ": [700, 760], "ʌ": [600, 1170], "ɔ": [500, 700], "ɤ": [460, 1310], "o": [360, 640], "ɯ": [300, 1390], "u": [250, 595], } inertia_filter = maverage(rint(.5 * s)) with AudioIO() as player: f1, f2 = ControlStream(0), ControlStream(pi) gain = ControlStream(0) filt = CascadeFilter([ resonator.z_exp(inertia_filter(f1), 400 * Hz), resonator.z_exp(inertia_filter(f2), 2000 * Hz), ]) sig = filt((saw_table)(100 * Hz)) * inertia_filter(gain) player.play(sig) vowels = "aɛiɒu" for vowel in vowels: coeffs = formants[vowel] print("Now playing: ", vowel) f1.value = coeffs[0] * Hz f2.value = coeffs[1] * Hz
# + 1.16e-3 * z ** -3 + 2.90e-4 * z ** -4) / # (1 - 3.26 * z ** -1 + 4.04 * z ** -2 # - 2.25 * z ** -3 + .474 * z ** -4)) wp = np.array([freq + tol, 2 * freq - tol]) # Bandpass range in rad/sample ws = np.array([freq - tol, 2 * freq + tol]) # Bandstop range in rad/sample order, new_wp_divpi = buttord(wp / pi, ws / pi, gpass=dB10(.6), gstop=dB10(.4)) ssfilt = butter(order, new_wp_divpi, btype="bandpass") filt_high = ZFilter(ssfilt[0].tolist(), ssfilt[1].tolist()) ## Likewise, using the equation directly this one would be: #filt_high = ((2.13e-3 * (1 - z ** -6) - 6.39e-3 * (z ** -2 - z ** -4)) / # (1 - 4.99173 * z ** -1 + 10.7810 * z ** -2 - 12.8597 * z ** -3 # + 8.93092 * z ** -4 - 3.42634 * z ** -5 + .569237 * z ** -6)) gain_low = ControlStream(0) gain_high = ControlStream(0) low = filt_low(white_noise()) high = filt_high(white_noise()) low /= 2 * max(low.take(2000)) high /= 2 * max(high.take(2000)) api = sys.argv[1] if sys.argv[1:] else None # Choose API via command-line chunks.size = 1 if api == "jack" else 16 with AudioIO(api=api) as player: player.play(low * gain_low + high * gain_high) gain_low.value = 1 while True: gain_high.value = 0 sleep(1)