def __init__(self, app): self.app = app self.app.after(self.update_rate, self.tick) self.app.firstTrackFrame.play() self.stopping = False self.mixer = StreamMixer([], endless=True) self.output = Output(self.mixer.samplerate, self.mixer.samplewidth, self.mixer.nchannels, queuesize=self.async_queue_size) self.mixed_samples = iter(self.mixer) self.levelmeter = LevelMeter(rms_mode=False, lowest=self.levelmeter_lowest)
def stereo_pan(): synth = WaveSynth() # panning a stereo source: wave = Sample("samples/SOS 020.wav").clip(6, 12).normalize().fadein(0.5).fadeout(0.5).lock() osc = Sine(0.4) panning = wave.copy().pan(lfo=osc).fadeout(0.2) with Output.for_sample(panning) as out: out.play_sample(panning) # panning a generated mono source: fm = Sine(0.5, 0.1999, bias=0.2) wave = synth.triangle(220, 5, fm_lfo=fm).lock() osc = Sine(0.4) panning = wave.copy().pan(lfo=osc).fadeout(0.2) with Output.for_sample(panning) as out: out.play_sample(panning)
def demo_song(): synth = WaveSynth() notes = { note: key_freq(49 + i) for i, note in enumerate( ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']) } tempo = 0.3 def instrument(freq, duration): harmonics = [(1, 1), (2, 1 / 2), (4, 1 / 4), (6, 1 / 6)] a = synth.harmonics(freq, duration, harmonics) return a.envelope(0.05, 0.2, 0.8, 0.5) print("Synthesizing tones...") quarter_notes = {note: instrument(notes[note], tempo) for note in notes} half_notes = {note: instrument(notes[note], tempo * 2) for note in notes} full_notes = {note: instrument(notes[note], tempo * 4) for note in notes} song = "A A B. A D. C#.. ; A A B. A E. D.. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; "\ "A A B. A D C#.. ; A A B. A E D. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; " with Output(synth.samplerate, synth.samplewidth, 1) as out: for note in song.split(): if note == ";": print() time.sleep(tempo * 2) continue print(note, end=" ", flush=True) if note.endswith(".."): sample = full_notes[note[:-2]] elif note.endswith("."): sample = half_notes[note[:-1]] else: sample = quarter_notes[note] out.play_sample(sample) print()
def play_console(filename_or_stream): with wave.open(filename_or_stream, 'r') as wav: samplewidth = wav.getsampwidth() samplerate = wav.getframerate() nchannels = wav.getnchannels() bar_width = 60 update_rate = 20 # lower this if you hear the sound crackle! levelmeter = LevelMeter(rms_mode=False, lowest=-50.0) with Output(samplerate, samplewidth, nchannels, int(update_rate / 4)) as out: while True: frames = wav.readframes(samplerate // update_rate) if not frames: break sample = Sample.from_raw_frames(frames, wav.getsampwidth(), wav.getframerate(), wav.getnchannels()) out.play_sample(sample, async=True) levelmeter.update(sample) time.sleep( sample.duration * 0.4 ) # print the peak meter more or less halfway during the sample levelmeter.print(bar_width) print("\ndone") input("Enter to exit:")
def main(args): if len(args) < 1: raise SystemExit( "Mixes one or more audio files. Arguments: inputfile...") hqresample = AudiofileToWavStream.supports_hq_resample() if not hqresample: print( "WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported." ) wav_streams = [ AudiofileToWavStream(filename, hqresample=hqresample) for filename in args ] with StreamMixer(wav_streams, endless=True) as mixer: mixed_samples = iter(mixer) with Output(mixer.samplerate, mixer.samplewidth, mixer.nchannels) as output: levelmeter = LevelMeter(rms_mode=False, lowest=-50) temp_stream = AudiofileToWavStream("samples/909_crash.wav", hqresample=hqresample) for timestamp, sample in mixed_samples: levelmeter.update(sample) output.play_sample(sample) time.sleep(sample.duration * 0.4) levelmeter.print(bar_width=60) if 5.0 <= timestamp <= 5.1: mixer.add_stream(temp_stream) if 10.0 <= timestamp <= 10.1: sample = Sample("samples/909_crash.wav").normalize() mixer.add_sample(sample) print("done.")
def bells(): def makebell(freq): synth = WaveSynth() duration = 2 divider = 2.2823535 fm = Triangle(freq/divider, amplitude=0.5) s = synth.sine(freq, duration, fm_lfo=fm) # apply ADSR envelope that resembles bell amp curve, see http://www.hibberts.co.uk/make.htm s.envelope(0, duration*0.25, .5, duration*0.75) s.echo(2, 5, 0.06, 0.6) return s.make_32bit(False) b_l1 = makebell(key_freq(56)) b_l2 = makebell(key_freq(60)) b_h1 = makebell(key_freq(78)).amplify(0.7) b_h2 = makebell(key_freq(82)).amplify(0.7) b_h3 = makebell(key_freq(84)).amplify(0.7) bells = b_l1.mix_at(1.0, b_h1) bells.mix_at(1.5, b_h2) bells.mix_at(2, b_h3) bells.mix_at(3, b_l2) bells.mix_at(4, b_h2) bells.mix_at(4.5, b_h3) bells.mix_at(5, b_h1) bells.make_16bit() with Output.for_sample(bells) as out: out.play_sample(bells)
def stereo_pan(): synth = WaveSynth() # panning a stereo source: wave = Sample("samples/SOS 020.wav").clip( 6, 12).normalize().fadein(0.5).fadeout(0.5).lock() osc = Sine(0.4) panning = wave.copy().pan(lfo=osc).fadeout(0.2) with Output.for_sample(panning) as out: out.play_sample(panning) # panning a generated mono source: fm = Sine(0.5, 0.1999, bias=0.2) wave = synth.triangle(220, 5, fm_lfo=fm).lock() osc = Sine(0.4) panning = wave.copy().pan(lfo=osc).fadeout(0.2) with Output.for_sample(panning) as out: out.play_sample(panning)
def demo_tones(): synth = WaveSynth() with Output(nchannels=1) as out: for wave in [ synth.square_h, synth.square, synth.sine, synth.triangle, synth.sawtooth, synth.sawtooth_h ]: print(wave.__name__) for note, freq in list(notes[4].items())[6:]: print(" {:f} hz".format(freq)) sample = wave(freq, duration=0.4).fadein(0.02).fadeout(0.1) out.play_sample(sample) print("pulse") for note, freq in list(notes[4].items())[6:]: print(" {:f} hz".format(freq)) sample = synth.pulse(freq, duration=0.4, pulsewidth=0.1).fadein(0.02).fadeout(0.1) out.play_sample(sample) print("harmonics (only even)") for note, freq in list(notes[3].items())[6:]: print(" {:f} hz".format(freq)) harmonics = [(n, 1 / n) for n in range(1, 5 * 2, 2)] sample = synth.harmonics(freq, 0.4, harmonics).fadein(0.02).fadeout(0.1) out.play_sample(sample) print("noise") sample = synth.white_noise(duration=1.5).fadein(0.1).fadeout(0.1) out.play_sample(sample)
def bells(): def makebell(freq): synth = WaveSynth() duration = 2 divider = 2.2823535 fm = Triangle(freq / divider, amplitude=0.5) s = synth.sine(freq, duration, fm_lfo=fm) # apply ADSR envelope that resembles bell amp curve, see http://www.hibberts.co.uk/make.htm s.envelope(0, duration * 0.25, .5, duration * 0.75) s.echo(2, 5, 0.06, 0.6) return s.make_32bit(False) b_l1 = makebell(key_freq(56)) b_l2 = makebell(key_freq(60)) b_h1 = makebell(key_freq(78)).amplify(0.7) b_h2 = makebell(key_freq(82)).amplify(0.7) b_h3 = makebell(key_freq(84)).amplify(0.7) bells = b_l1.mix_at(1.0, b_h1) bells.mix_at(1.5, b_h2) bells.mix_at(2, b_h3) bells.mix_at(3, b_l2) bells.mix_at(4, b_h2) bells.mix_at(4.5, b_h3) bells.mix_at(5, b_h1) bells.make_16bit() with Output.for_sample(bells) as out: out.play_sample(bells)
def open_audio_file(self, filename_or_stream): self.wave = wave.open(filename_or_stream, 'r') self.samplewidth = self.wave.getsampwidth() self.samplerate = self.wave.getframerate() self.nchannels = self.wave.getnchannels() self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level) self.audio_out = Output(self.samplerate, self.samplewidth, self.nchannels, int(self.update_rate/4)) filename = filename_or_stream if isinstance(filename_or_stream, str) else "<stream>" info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}".format(filename, self.samplerate/1000, 8*self.samplewidth, self.nchannels) self.info.configure(text=info)
def envelope(): from matplotlib import pyplot as plot synth = WaveSynth() freq = 440 s = synth.triangle(freq, duration=1) s.envelope(0.05, 0.1, 0.6, 0.4) plot.title("ADSR envelope") plot.plot(s.get_frame_array()) plot.show() with Output(nchannels=1) as out: out.play_sample(s)
def modulate_amp(): from matplotlib import pyplot as plot synth = WaveSynth() freq = 220 s1 = synth.triangle(freq, duration=2) m = synth.sine(2, duration=2, amplitude=0.4, bias=0.5) s1.modulate_amp(m) plot.title("Amplitude modulation by another waveform") plot.plot(s1.get_frame_array()) plot.show() with Output(nchannels=1) as out: out.play_sample(s1) s1 = synth.triangle(freq, duration=2) m = Sine(3, amplitude=0.4, bias=0.5) s1.modulate_amp(m) plot.title("Amplitude modulation by an oscillator") plot.plot(s1.get_frame_array()) plot.show() with Output(nchannels=1) as out: out.play_sample(s1)
def echo_sample(): synth = WaveSynth(samplerate=22050) lfo = Linear(1, -0.0001, min_value=-99999) s = synth.pulse(220, .5, fm_lfo=lfo).fadeout(.2) with Output(s.samplerate, s.samplewidth, s.nchannels) as out: e = s.copy().echo(1, 4, 0.5, 0.4) # echo out.play_sample(e) e = s.copy().echo( 1, 30, 0.15, 0.5) # simple "reverberation" (simulated using fast echos) out.play_sample(e)
def echo_lfo(): synth = WaveSynth(22050) s = Sine(440, amplitude=25000, samplerate=synth.samplerate) s = EnvelopeFilter(s, .2, .2, 0, 0, 1.5, stop_at_end=True) s = EchoFilter(s, .15, 5, 0.3, 0.6) s = ClipFilter(s, -32000, 32000) frames = [int(v) for v in s] import matplotlib.pyplot as plot plot.plot(frames) plot.show() samp = Sample.from_array(frames, synth.samplerate, 1) with Output.for_sample(samp) as out: out.play_sample(samp)
def __init__(self, app, trackframes): self.app = app self.trackframes = trackframes self.app.after(self.update_rate, self.tick) self.stopping = False self.mixer = StreamMixer([], endless=True) self.output = Output(self.mixer.samplerate, self.mixer.samplewidth, self.mixer.nchannels, queuesize=self.async_buffers) self.mixed_samples = iter(self.mixer) self.levelmeter = LevelMeter(rms_mode=False, lowest=self.levelmeter_lowest) for tf in self.trackframes: tf.player = self player_thread = Thread(target=self._play_sample_in_thread, name="jukebox_sampleplayer") player_thread.daemon = True player_thread.start()
def open_audio_file(self, filename_or_stream): self.wave = wave.open(filename_or_stream, 'r') self.samplewidth = self.wave.getsampwidth() self.samplerate = self.wave.getframerate() self.nchannels = self.wave.getnchannels() self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level) self.audio_out = Output(self.samplerate, self.samplewidth, self.nchannels, int(self.update_rate / 4)) filename = filename_or_stream if isinstance(filename_or_stream, str) else "<stream>" info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}".format( filename, self.samplerate / 1000, 8 * self.samplewidth, self.nchannels) self.info.configure(text=info)
def vibrato(): synth = WaveSynth() duration = 3 def make_sample(freq): fmfm = Linear(0, 0.002, max_value=99999) fm = Sine(0.05, amplitude=0.5, fm_lfo=fmfm) s1 = synth.sawtooth(freq, duration, amplitude=0.6, fm_lfo=fm) s1.envelope(0.01, 0.1, 0.6, 2) return s1 with Output(synth.samplerate, nchannels=1) as out: for f in [220, 330, 440]: sample = make_sample(f) out.play_sample(sample)
def chords(): synth = WaveSynth() with Output(nchannels=1) as out: for rootnote in octave_notes: chord_keys = major_chord_keys(rootnote, 4) print("chord", rootnote, [ "{0} {1}".format(note, octave) for note, octave in chord_keys ]) freqs = [notes[octave][key] for key, octave in chord_keys] for i in range(1, len(freqs)): assert freqs[i] > freqs[i - 1] samples = [ synth.sine(freq, 1.5, amplitude=0.333) for freq in freqs ] s = samples[0].mix(samples[1]).mix( samples[2]).fadein(0.1).fadeout(0.1) out.play_sample(s)
def pwm(): from matplotlib import pyplot as plot synth = WaveSynth(samplerate=1000) pwm_lfo = Sine(0.05, amplitude=0.49, bias=0.5, samplerate=synth.samplerate) s1 = synth.pulse(4, amplitude=0.6, duration=20, pwm_lfo=pwm_lfo) plot.figure(figsize=(16, 4)) plot.title("Pulse width modulation") plot.ylim([-35000, 35000]) plot.plot(s1.get_frame_array()) plot.show() with Output(nchannels=1) as out: synth = WaveSynth() lfo2 = Sine(0.2, amplitude=0.48, bias=0.5) s1 = synth.pulse(440 / 6, amplitude=0.5, duration=6, fm_lfo=None, pwm_lfo=lfo2) out.play_sample(s1)
def fm(): synth = WaveSynth(samplerate=8000) from matplotlib import pyplot as plot freq = 2000 lfo1 = Sine(1, amplitude=0.4, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=3, fm_lfo=lfo1) plot.title("Spectrogram") plot.ylabel("Freq") plot.xlabel("Time") plot.specgram(s1.get_frame_array(), Fs=synth.samplerate, noverlap=90, cmap=plot.cm.gist_heat) plot.show() with Output(nchannels=1, samplerate=22050) as out: synth = WaveSynth(samplerate=22050) freq = 440 lfo1 = Linear(5, samplerate=synth.samplerate) lfo1 = EnvelopeFilter(lfo1, 1, 0.5, 0.5, 0.5, 1) s1 = synth.sine(freq, duration=3, fm_lfo=lfo1) s_all = s1.copy() out.play_sample(s1) lfo1 = Sine(1, amplitude=0.2, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) lfo1 = Sine(freq / 17, amplitude=0.5, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) lfo1 = Sine(freq / 6, amplitude=0.5, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) lfo1 = Sine(1, amplitude=0.4, samplerate=synth.samplerate) s1 = synth.triangle(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) freq = 440 * 2 lfo1 = Sine(freq / 80, amplitude=0.4, samplerate=synth.samplerate) s1 = synth.triangle(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1)
def main(track_file, outputfile=None, interactive=False): discard_unused = not interactive if interactive: repl = Repl(discard_unused_instruments=discard_unused) repl.do_load(track_file) repl.cmdloop("Interactive Samplebox session. Type 'help' for help on commands.") else: song = Song() song.read(track_file, discard_unused_instruments=discard_unused) with Output() as out: if out.supports_streaming: # mix and stream output in real time print("Mixing and streaming to speakers...") out.play_samples(song.mix_generator(), False) print("\r ") else: # output can't stream, fallback on mixing everything to a wav print("(Sorry, streaming audio is not possible, perhaps because you don't have pyaudio installed?)") song.mix(outputfile) mix = Sample(wave_file=outputfile) print("Playing sound...") out.play_sample(mix)
class SynthGUI(tk.Frame): def __init__(self, master=None): super().__init__(master) self.master.title("Synthesizer") self.keypress_sema = Semaphore(1) self.osc_frame = tk.Frame(self) self.oscillators = [] self.piano_frame = tk.Frame(self) self.piano = PianoKeyboardGUI(self.piano_frame, self) self.piano.pack(side=tk.BOTTOM) filter_frame = tk.LabelFrame(self, text="Filters etc.", padx=10, pady=10) self.envelope_filters = [ EnvelopeFilterGUI(filter_frame, "1", self), EnvelopeFilterGUI(filter_frame, "2", self), EnvelopeFilterGUI(filter_frame, "3", self)] self.echo_filter = EchoFilterGUI(filter_frame, self) for ev in self.envelope_filters: ev.pack(side=tk.LEFT, anchor=tk.N) self.arp_filter = ArpeggioFilterGUI(filter_frame, self) self.arp_filter.pack(side=tk.LEFT, anchor=tk.N) f = tk.Frame(filter_frame) self.tremolo_filter = TremoloFilterGUI(f, self) self.tremolo_filter.pack(side=tk.TOP) lf = tk.LabelFrame(f, text="A4 tuning") lf.pack(pady=(4, 0)) lf = tk.LabelFrame(f, text="Performance") self.samplerate_choice = tk.IntVar() self.samplerate_choice.set(44100) tk.Label(lf, text="Samplerate:").pack(anchor=tk.W) subf = tk.Frame(lf) tk.Radiobutton(subf, variable=self.samplerate_choice, value=44100, text="44.1 kHz", pady=0, command=self.create_synth).pack(side=tk.LEFT) tk.Radiobutton(subf, variable=self.samplerate_choice, value=22050, text="22 kHz", pady=0, command=self.create_synth).pack(side=tk.LEFT) subf.pack() tk.Label(lf, text="Piano key response:").pack(anchor=tk.W) subf = tk.Frame(lf) self.rendering_choice = tk.StringVar() self.rendering_choice.set("realtime") tk.Radiobutton(subf, variable=self.rendering_choice, value="realtime", text="realtime", pady=0).pack(side=tk.LEFT) tk.Radiobutton(subf, variable=self.rendering_choice, value="render", text="render", pady=0).pack(side=tk.LEFT) subf.pack() lf.pack(pady=(4, 0)) f.pack(side=tk.LEFT, anchor=tk.N) self.echo_filter.pack(side=tk.LEFT, anchor=tk.N) misc_frame = tk.Frame(filter_frame, padx=10) tk.Label(misc_frame, text="To Speaker:").pack(pady=(5, 0)) self.to_speaker_lb = tk.Listbox(misc_frame, width=8, height=5, selectmode=tk.MULTIPLE, exportselection=0) self.to_speaker_lb.pack() lf = tk.LabelFrame(misc_frame, text="A4 tuning") self.a4_choice = tk.IntVar() self.a4_choice.set(440) tk.Radiobutton(lf, variable=self.a4_choice, value=440, text="440 Hz", pady=0).pack() tk.Radiobutton(lf, variable=self.a4_choice, value=432, text="432 Hz", pady=0).pack() lf.pack(pady=(4, 0)) tk.Button(misc_frame, text="Load preset", command=self.load_preset).pack() tk.Button(misc_frame, text="Save preset", command=self.save_preset).pack() for _ in range(5): self.add_osc_to_gui() self.to_speaker_lb.select_set(4) self.osc_frame.pack(side=tk.TOP, padx=10) filter_frame.pack(side=tk.TOP) misc_frame.pack(side=tk.RIGHT, anchor=tk.N) self.piano_frame.pack(side=tk.TOP, padx=10, pady=10) self.statusbar = tk.Label(self, text="<status>", relief=tk.RIDGE) self.statusbar.pack(side=tk.BOTTOM, fill=tk.X) self.pack() self.synth = self.output = None self.create_synth() self.playing_note = False self.current_note = None self.echos_ending_time = 0 self.arpeggio_playing = False def create_synth(self): samplerate = self.samplerate_choice.get() self.synth = WaveSynth(samplewidth=2, samplerate=samplerate) if self.output is not None: self.output.close() self.output = Output(self.synth.samplerate, self.synth.samplewidth, 1, queuesize=2) def add_osc_to_gui(self): osc_nr = len(self.oscillators) fm_sources = ["osc "+str(n+1) for n in range(osc_nr)] osc_pane = OscillatorGUI(self.osc_frame, self, "Oscillator "+str(osc_nr+1), fm_sources=fm_sources, pwm_sources=fm_sources) osc_pane.pack(side=tk.LEFT, anchor=tk.N, padx=10, pady=10) self.oscillators.append(osc_pane) self.to_speaker_lb.insert(tk.END, "osc "+str(osc_nr+1)) def create_osc(self, from_gui, all_oscillators, is_audio=False): def create_unfiltered_osc(): def create_chord_osc(clazz, **arguments): if is_audio and self.arp_filter.input_mode.get().startswith("chords"): chord_keys = major_chord_keys(self.current_note[0], self.current_note[1]) if self.arp_filter.input_mode.get() == "chords3": chord_keys = list(chord_keys)[:-1] a4freq = self.a4_choice.get() chord_freqs = [note_freq(note, octave, a4freq) for note, octave in chord_keys] self.statusbar["text"] = "major chord: "+" ".join(note for note, octave in chord_keys) oscillators = [] arguments["amplitude"] /= len(chord_freqs) for f in chord_freqs: arguments["frequency"] = f oscillators.append(clazz(**arguments)) return MixingFilter(*oscillators) else: # no chord (or an LFO instead of audio output oscillator), return one osc for only the given frequency return clazz(**arguments) waveform = from_gui.input_waveformtype.get() amp = from_gui.input_amp.get() bias = from_gui.input_bias.get() if waveform == "noise": return WhiteNoise(amplitude=amp, bias=bias, samplerate=self.synth.samplerate) elif waveform == "linear": startlevel = from_gui.input_lin_start.get() increment = from_gui.input_lin_increment.get() minvalue = from_gui.input_lin_min.get() maxvalue = from_gui.input_lin_max.get() return Linear(startlevel, increment, minvalue, maxvalue) else: freq = from_gui.input_freq.get() phase = from_gui.input_phase.get() pw = from_gui.input_pw.get() fm_choice = from_gui.input_fm.get() pwm_choice = from_gui.input_pwm.get() if fm_choice in (None, "", "<none>"): fm = None elif fm_choice.startswith("osc"): osc_num = int(fm_choice.split()[1]) fm = self.create_osc(all_oscillators[osc_num-1], all_oscillators) else: raise ValueError("invalid fm choice") if pwm_choice in (None, "", "<none>"): pwm = None elif pwm_choice.startswith("osc"): osc_num = int(pwm_choice.split()[1]) pwm = self.create_osc(all_oscillators[osc_num-1], all_oscillators) else: raise ValueError("invalid fm choice") if waveform == "pulse": return create_chord_osc(Pulse, frequency=freq, amplitude=amp, phase=phase, bias=bias, pulsewidth=pw, fm_lfo=fm, pwm_lfo=pwm, samplerate=self.synth.samplerate) elif waveform == "harmonics": harmonics = self.parse_harmonics(from_gui.harmonics_text.get(1.0, tk.END)) return create_chord_osc(Harmonics, frequency=freq, harmonics=harmonics, amplitude=amp, phase=phase, bias=bias, fm_lfo=fm, samplerate=self.synth.samplerate) else: o = { "sine": Sine, "triangle": Triangle, "sawtooth": Sawtooth, "sawtooth_h": SawtoothH, "square": Square, "square_h": SquareH, }[waveform] return create_chord_osc(o, frequency=freq, amplitude=amp, phase=phase, bias=bias, fm_lfo=fm, samplerate=self.synth.samplerate) def envelope(osc, envelope_gui): adsr_src = envelope_gui.input_source.get() if adsr_src not in (None, "", "<none>"): osc_num = int(adsr_src.split()[1]) if from_gui is self.oscillators[osc_num-1]: return envelope_gui.filter(osc) return osc osc = create_unfiltered_osc() for ev in self.envelope_filters: osc = envelope(osc, ev) return osc def parse_harmonics(self, harmonics): parsed = [] for harmonic in harmonics.split(): num, frac = harmonic.split(",") num = int(num) if '/' in frac: numerator, denominator = frac.split("/") else: numerator, denominator = frac, 1 frac = float(numerator)/float(denominator) parsed.append((num, frac)) return parsed def do_play(self, osc): if osc.input_waveformtype.get() == "linear": self.statusbar["text"] = "cannot output linear osc to speakers" return duration = 1 osc.set_title_status("TO SPEAKER") osc.after(duration*1000, lambda: osc.set_title_status(None)) o = self.create_osc(osc, all_oscillators=self.oscillators, is_audio=True) o = self.apply_filters(o) sample = self.generate_sample(iter(o), 1) with Output(self.synth.samplerate, self.synth.samplewidth, duration) as out: out.play_sample(sample, async=True) def do_plot(self, osc): o = self.create_osc(osc, all_oscillators=self.oscillators) o = iter(o) frames = [next(o) for _ in range(self.synth.samplerate)] if not plot: self.statusbar["text"] = "Cannot plot! To plot things, you need to have matplotlib installed!" return plot.figure(figsize=(16, 4)) plot.title("Waveform") plot.plot(frames) plot.show() # @todo properly integrate matplotlib in the tkinter gui because the above causes gui freeze problems # see http://matplotlib.org/examples/user_interfaces/embedding_in_tk2.html def generate_sample(self, oscillator, duration, use_fade=False): o = oscillator # iter(oscillator) scale = 2**(8*self.synth.samplewidth-1) try: frames = [int(next(o)*scale) for _ in range(int(self.synth.samplerate*duration))] except StopIteration: return None else: sample = Sample.from_array(frames, self.synth.samplerate, 1) if use_fade: sample.fadein(0.05).fadeout(0.1) return sample def continue_play_note(self, oscillator, first=True): if self.echos_ending_time and time.time() >= self.echos_ending_time: self.stop_playing_note() return if not self.playing_note: sample = self.generate_sample(oscillator, 0.1).fadeout(0.1) if sample: if sample.samplewidth != self.synth.samplewidth: print("16 bit overflow!") # XXX sample.make_16bit() self.output.play_sample(sample, async=True) return if first: sample = self.generate_sample(oscillator, 0.1) if sample: sample.fadein(0.05) if sample.samplewidth != self.synth.samplewidth: print("16 bit overflow!") # XXX sample.make_16bit() self.output.play_sample(sample, async=True) sample = self.generate_sample(oscillator, 0.1) if sample: if sample.samplewidth != self.synth.samplewidth: print("16 bit overflow!") # XXX sample.make_16bit() self.output.play_sample(sample, async=True) self.after_idle(lambda: self.continue_play_note(oscillator, False)) def render_and_play_note(self, oscillator, max_duration=4): duration = 0 for ev in self.envelope_filters: duration = max(duration, ev.duration) if duration == 0: duration = max_duration duration = min(duration, max_duration) sample = self.generate_sample(oscillator, duration) if sample: sample.fadein(0.05).fadeout(0.05) if sample.samplewidth != self.synth.samplewidth: print("16 bit overflow!") # XXX sample.make_16bit() self.output.play_sample(sample, async=True) def stop_playing_note(self): self.playing_note = False to_speaker = [self.oscillators[i] for i in self.to_speaker_lb.curselection()] for osc in to_speaker: osc.set_title_status(None) self.keypress_sema.release() def pressed(self, event, note, octave, released=False): if self.arpeggio_playing: if not released: # arp still playing... stop it self.arpeggio_playing = False return a4freq = self.a4_choice.get() if self.arp_filter.input_mode.get().startswith("arp"): if released: self._pressed([0, 1, 2], released=True) return chord_keys = major_chord_keys(note, octave) if self.arp_filter.input_mode.get() == "arpeggio3": chord_keys = list(chord_keys)[:-1] chord_freqs = [note_freq(note, octave, a4freq) for note, octave in chord_keys] self.statusbar["text"] = "arpeggio: "+" ".join(note for note, octave in chord_keys) self.arpeggio_playing = True self._pressed(chord_freqs) else: self.statusbar["text"] = "ok" a4freq = self.a4_choice.get() freq = note_freq(note, octave, a4freq) self.current_note = (note, octave, freq) self._pressed(freq, released) def _pressed(self, freqs, released=False): # freqs can be a single frequency or a sequence of freqs (ARP) if isinstance(freqs, (tuple, list)): freq = freqs[0] arpeggio = True else: freq = freqs arpeggio = False if arpeggio and not self.arpeggio_playing: # stop the running arp cycle return to_speaker = [self.oscillators[i] for i in self.to_speaker_lb.curselection()] if not to_speaker: self.statusbar["text"] = "No oscillators connected to speaker output!" return if not arpeggio: if released: # only stop sound immediately if no echo filter is enabled if not self.echos_ending_time: self.stop_playing_note() return if not self.keypress_sema.acquire(blocking=False): self.statusbar["text"] = "can't play new note - previous one still playing (monophonic, sorry)" return for osc in self.oscillators: if osc.input_freq_keys.get(): osc.input_freq.set(freq*osc.input_freq_keys_ratio.get()) for osc in to_speaker: if osc.input_waveformtype.get() == "linear": self.statusbar["text"] = "cannot output linear osc to speakers" return else: osc.set_title_status("TO SPEAKER") oscs = [self.create_osc(osc, self.oscillators, is_audio=True) for osc in to_speaker] mixed_osc = MixingFilter(*oscs) if len(oscs) > 1 else oscs[0] if not arpeggio: # at this time you can't use filters when using arpeggio mixed_osc = self.apply_filters(mixed_osc) current_echos_duration = getattr(mixed_osc, "echo_duration", 0) if current_echos_duration > 0: self.echos_ending_time = time.time() + current_echos_duration else: self.echos_ending_time = 0 self.playing_note = True if arpeggio: # cycle the arp notes freqs.append(freqs[0]) freqs = freqs[1:] rate = self.arp_filter.input_rate.get() duration = rate * self.arp_filter.input_ratio.get() / 100.0 self.after_idle(lambda: self.render_and_play_note(iter(mixed_osc), max_duration=duration)) self.after(int(rate*1000*0.95), lambda: self._pressed(freqs)) else: # normal, single note self.output.wipe_queue() if self.rendering_choice.get() == "render": self.statusbar["text"] = "rendering note sample..." self.after_idle(lambda: self.render_and_play_note(iter(mixed_osc))) self.after_idle(lambda: self.stop_playing_note()) else: self.after_idle(lambda: self.continue_play_note(iter(mixed_osc))) def apply_filters(self, output_oscillator): output_oscillator = self.tremolo_filter.filter(output_oscillator) output_oscillator = self.echo_filter.filter(output_oscillator) return output_oscillator def load_preset(self): file = askopenfile(filetypes=[("Synth presets", "*.ini")]) cf = ConfigParser() cf.read_file(file) file.close() # general settings self.samplerate_choice.set(cf["settings"]["samplerate"]) self.rendering_choice.set(cf["settings"]["rendering"]) self.a4_choice.set(cf["settings"]["a4tuning"]) self.to_speaker_lb.selection_clear(0, tk.END) to_speaker = cf["settings"]["to_speaker"] to_speaker = tuple(to_speaker.split(',')) for o in to_speaker: self.to_speaker_lb.selection_set(int(o)-1) for section in cf.sections(): if section.startswith("oscillator"): num = int(section.split('_')[1])-1 osc = self.oscillators[num] for name, value in cf[section].items(): getattr(osc, name).set(value) osc.waveform_selected() elif section.startswith("envelope"): num = int(section.split('_')[1])-1 env = self.envelope_filters[num] for name, value in cf[section].items(): getattr(env, name).set(value) elif section == "arpeggio": for name, value in cf[section].items(): getattr(self.arp_filter, name).set(value) elif section == "tremolo": for name, value in cf[section].items(): getattr(self.tremolo_filter, name).set(value) elif section == "echo": for name, value in cf[section].items(): getattr(self.echo_filter, name).set(value) self.statusbar["text"] = "preset loaded." def save_preset(self): file = asksaveasfile(filetypes=[("Synth presets", "*.ini")]) cf = ConfigParser(dict_type=collections.OrderedDict) # general settings cf.add_section("settings") cf["settings"]["samplerate"] = str(self.samplerate_choice.get()) cf["settings"]["rendering"] = self.rendering_choice.get() cf["settings"]["to_speaker"] = ",".join(str(v+1) for v in self.to_speaker_lb.curselection()) cf["settings"]["a4tuning"] = str(self.a4_choice.get()) # oscillators for num, osc in enumerate(self.oscillators, 1): section = "oscillator_"+str(num) cf.add_section(section) for name, var in vars(osc).items(): if name.startswith("input_"): cf[section][name] = str(var.get()) # adsr envelopes for num, filter in enumerate(self.envelope_filters, 1): section = "envelope_"+str(num) cf.add_section(section) for name, var in vars(filter).items(): if name.startswith("input_"): cf[section][name] = str(var.get()) # echo cf.add_section("echo") for name, var in vars(self.echo_filter).items(): if name.startswith("input_"): cf["echo"][name] = str(var.get()) # tremolo cf.add_section("tremolo") for name, var in vars(self.tremolo_filter).items(): if name.startswith("input_"): cf["tremolo"][name] = str(var.get()) # arpeggio cf.add_section("arpeggio") for name, var in vars(self.arp_filter).items(): if name.startswith("input_"): cf["arpeggio"][name] = str(var.get()) cf.write(file) file.close()
def a440(): synth = WaveSynth(samplerate=44100, samplewidth=4) a440 = synth.sine(440, duration=3) with Output.for_sample(a440) as out: out.play_sample(a440)
def create_synth(self): samplerate = self.samplerate_choice.get() self.synth = WaveSynth(samplewidth=2, samplerate=samplerate) if self.output is not None: self.output.close() self.output = Output(self.synth.samplerate, self.synth.samplewidth, 1, queuesize=2)
class Player: update_rate = 50 # 50 ms = 20 updates/sec levelmeter_lowest = -40 # dB xfade_duration = 7 async_buffers = 2 def __init__(self, app, trackframes): self.app = app self.trackframes = trackframes self.app.after(self.update_rate, self.tick) self.stopping = False self.mixer = StreamMixer([], endless=True) self.output = Output(self.mixer.samplerate, self.mixer.samplewidth, self.mixer.nchannels, queuesize=self.async_buffers) self.mixed_samples = iter(self.mixer) self.levelmeter = LevelMeter(rms_mode=False, lowest=self.levelmeter_lowest) for tf in self.trackframes: tf.player = self player_thread = Thread(target=self._play_sample_in_thread, name="jukebox_sampleplayer") player_thread.daemon = True player_thread.start() def skip(self, trackframe): if trackframe.state != TrackFrame.state_needtrack and trackframe.stream: trackframe.stream.close() trackframe.stream = None trackframe.display_track(None, None, None, "(next track...)") trackframe.state = TrackFrame.state_switching def stop(self): self.stopping = True for tf in self.trackframes: if tf.stream: tf.stream.close() tf.stream = None tf.state = TrackFrame.state_needtrack self.mixer.close() self.output.close() def tick(self): # the actual decoding and sound playing is done in a background thread self._levelmeter() self._load_song() self._play_song() self._crossfade() if not self.stopping: self.app.after(self.update_rate, self.tick) def _play_sample_in_thread(self): """ This is run in a background thread to avoid GUI interactions interfering with audio output. """ while True: if self.stopping: break _, sample = next(self.mixed_samples) if sample and sample.duration > 0: self.output.play_sample(sample, async=True) self.levelmeter.update(sample) # will be updated from the gui thread else: self.levelmeter.reset() time.sleep(self.update_rate/1000*2) # avoid hogging the cpu while no samples are played def _levelmeter(self): self.app.update_levels(self.levelmeter.level_left, self.levelmeter.level_right) def _load_song(self): if self.stopping: return # make sure we don't load new songs when the player is shutting down for tf in self.trackframes: if tf.state == TrackFrame.state_needtrack: track = self.app.pop_playlist_track() if track: tf.track = track tf.state = TrackFrame.state_idle def _play_song(self): def start_stream(tf, filename, volume): def _start_from_thread(): # start loading the track from a thread to avoid gui stutters when loading takes a bit of time tf.stream = AudiofileToWavStream(filename, hqresample=hqresample) self.mixer.add_stream(tf.stream, [tf.volumefilter]) tf.playback_started = datetime.datetime.now() tf.state = TrackFrame.state_playing tf.volume = volume tf.state = TrackFrame.state_loading Thread(target=_start_from_thread, name="stream_loader").start() for tf in self.trackframes: if tf.state == TrackFrame.state_playing: remaining = tf.track_duration - (datetime.datetime.now() - tf.playback_started) remaining = remaining.total_seconds() tf.time = datetime.timedelta(seconds=math.ceil(remaining)) if tf.stream.closed and tf.time.total_seconds() <= 0: self.skip(tf) # stream ended! elif tf.state == TrackFrame.state_idle: if tf.xfade_state == TrackFrame.state_xfade_fadingin: # if we're set to fading in, regardless of other tracks, we start playing as well start_stream(tf, tf.track["location"], 0) elif not any(tf for tf in self.trackframes if tf.state in (TrackFrame.state_playing, TrackFrame.state_loading)): # if there is no other track currently playing (or loading), it's our turn! start_stream(tf, tf.track["location"], 100) elif tf.state == TrackFrame.state_switching: tf.state = TrackFrame.state_needtrack def _crossfade(self): for tf in self.trackframes: # nearing the end of the track? then start a fade out if tf.state == TrackFrame.state_playing \ and tf.xfade_state == TrackFrame.state_xfade_nofade \ and tf.time.total_seconds() <= self.xfade_duration: tf.xfade_state = TrackFrame.state_xfade_fadingout tf.xfade_started = datetime.datetime.now() tf.xfade_start_volume = tf.volume # fade in the first other track that is currently idle for other_tf in self.trackframes: if tf is not other_tf and other_tf.state == TrackFrame.state_idle: other_tf.xfade_state = TrackFrame.state_xfade_fadingin other_tf.xfade_started = datetime.datetime.now() other_tf.xfade_start_volume = 0 other_tf.volume = 0 break for tf in self.trackframes: if tf.xfade_state == TrackFrame.state_xfade_fadingin: # fading in, slide volume up from 0 to 100% volume = 100 * (datetime.datetime.now() - tf.xfade_started).total_seconds() / self.xfade_duration tf.volume = min(volume, 100) if volume >= 100: tf.xfade_state = TrackFrame.state_xfade_nofade # fade reached the end elif tf.xfade_state == TrackFrame.state_xfade_fadingout: # fading out, slide volume down from what it was at to 0% fade_progress = (datetime.datetime.now() - tf.xfade_started) fade_progress = (self.xfade_duration - fade_progress.total_seconds()) / self.xfade_duration volume = max(0, tf.xfade_start_volume * fade_progress) tf.volume = max(volume, 0) if volume <= 0: tf.xfade_state = TrackFrame.state_xfade_nofade # fade reached the end def play_sample(self, sample): def unmute(trf, vol): if trf: trf.volume=vol if sample and sample.duration > 0: for tf in self.trackframes: if tf.state == TrackFrame.state_playing: old_volume = tf.mute_volume(40) self.mixer.add_sample(sample, lambda mtf=tf, vol=old_volume: unmute(mtf, vol)) break else: self.mixer.add_sample(sample)
class LevelGUI(tk.Frame): def __init__(self, audio_source, master=None): self.lowest_level = -50 super().__init__(master) self.master.title("Levels") self.pbvar_left = tk.IntVar() self.pbvar_right = tk.IntVar() pbstyle = ttk.Style() pbstyle.theme_use("classic") pbstyle.configure("green.Vertical.TProgressbar", troughcolor="gray", background="light green") pbstyle.configure("yellow.Vertical.TProgressbar", troughcolor="gray", background="yellow") pbstyle.configure("red.Vertical.TProgressbar", troughcolor="gray", background="orange") frame = tk.LabelFrame(self, text="Left") frame.pack(side=tk.LEFT) tk.Label(frame, text="dB").pack() self.pb_left = ttk.Progressbar(frame, orient=tk.VERTICAL, length=300, maximum=-self.lowest_level, variable=self.pbvar_left, mode='determinate', style='yellow.Vertical.TProgressbar') self.pb_left.pack() frame = tk.LabelFrame(self, text="Right") frame.pack(side=tk.LEFT) tk.Label(frame, text="dB").pack() self.pb_right = ttk.Progressbar(frame, orient=tk.VERTICAL, length=300, maximum=-self.lowest_level, variable=self.pbvar_right, mode='determinate', style='yellow.Vertical.TProgressbar') self.pb_right.pack() frame = tk.LabelFrame(self, text="Info") self.info = tk.Label(frame, text="", justify=tk.LEFT) frame.pack() self.info.pack(side=tk.TOP) self.pack() self.update_rate = 19 # lower this if you hear the sound crackle! self.open_audio_file(audio_source) self.after_idle(self.update) def open_audio_file(self, filename_or_stream): self.wave = wave.open(filename_or_stream, 'r') self.samplewidth = self.wave.getsampwidth() self.samplerate = self.wave.getframerate() self.nchannels = self.wave.getnchannels() self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level) self.audio_out = Output(self.samplerate, self.samplewidth, self.nchannels, int(self.update_rate/4)) filename = filename_or_stream if isinstance(filename_or_stream, str) else "<stream>" info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}".format(filename, self.samplerate/1000, 8*self.samplewidth, self.nchannels) self.info.configure(text=info) def update(self, *args, **kwargs): frames = self.wave.readframes(self.samplerate//self.update_rate) if not frames: self.pbvar_left.set(0) self.pbvar_right.set(0) print("done!") return sample = Sample.from_raw_frames(frames, self.samplewidth, self.samplerate, self.nchannels) self.audio_out.play_sample(sample, async=True) time.sleep(sample.duration/3) # print the peak meter more or less halfway during the sample left, peak_l, right, peak_r = self.levelmeter.update(sample) self.pbvar_left.set(left-self.lowest_level) self.pbvar_right.set(right-self.lowest_level) if left > -3: self.pb_left.configure(style="red.Vertical.TProgressbar") elif left > -6: self.pb_left.configure(style="yellow.Vertical.TProgressbar") else: self.pb_left.configure(style="green.Vertical.TProgressbar") if right > -3: self.pb_right.configure(style="red.Vertical.TProgressbar") elif right > -6: self.pb_right.configure(style="yellow.Vertical.TProgressbar") else: self.pb_right.configure(style="green.Vertical.TProgressbar") self.after(self.update_rate, self.update)
class Player: async_queue_size = 3 # larger is less chance of getting skips, but latency increases update_rate = 40 # larger is less cpu usage but more chance of getting skips levelmeter_lowest = -40 # dB def __init__(self, app): self.app = app self.app.after(self.update_rate, self.tick) self.app.firstTrackFrame.play() self.stopping = False self.mixer = StreamMixer([], endless=True) self.output = Output(self.mixer.samplerate, self.mixer.samplewidth, self.mixer.nchannels, queuesize=self.async_queue_size) self.mixed_samples = iter(self.mixer) self.levelmeter = LevelMeter(rms_mode=False, lowest=self.levelmeter_lowest) def stop(self): self.stopping = True self.app.firstTrackFrame.close_stream() self.app.secondTrackFrame.close_stream() self.mixer.close() self.output.close() def switch_player(self): """ The actual switching of the main track player. Note that it can be playing already because of the fade-in mixing. """ first_is_playing = self.app.firstTrackFrame.playing self.app.firstTrackFrame.play(not first_is_playing) self.app.secondTrackFrame.play(first_is_playing) def tick(self): if self.output.queue_size() <= self.async_queue_size / 2: self.app.firstTrackFrame.tick(self.mixer) self.app.secondTrackFrame.tick(self.mixer) _, sample = next(self.mixed_samples) if sample and sample.duration > 0: self.output.play_sample(sample, async=True) left, _, right, _ = self.levelmeter.update(sample) self.app.update_levels(left, right) else: self.levelmeter.reset() self.app.update_levels(self.levelmeter.level_left, self.levelmeter.level_right) if not self.stopping: self.app.after(self.update_rate, self.tick) def play_sample(self, sample): if sample and sample.duration > 0: self.mixer.add_sample(sample) def start_play_other(self): # @todo fix the track switching and fadein/fadeout, it's a bit of a mess if self.app.firstTrackFrame.playing: other_track = self.app.secondTrackFrame else: other_track = self.app.firstTrackFrame other_track.start_fadein()