def __init__( self, samples_to_load: Dict[str, Tuple[Union[str, Sample], int]]) -> None: global samples samples.clear() self.output = Output(mixing="mix") if any(isinstance(smp, str) for smp, _ in samples_to_load.values()): print("Loading sound files...") for name, (filename, max_simultaneously) in samples_to_load.items(): if isinstance(filename, Sample): samples[name] = filename else: data = pkgutil.get_data(__name__, "sounds/" + filename) if data: tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".ogg") try: tmp.write(data) tmp.close() samples[name] = Sample( streaming.AudiofileToWavStream(tmp.name), name).stereo() finally: os.remove(tmp.name) else: raise SystemExit("corrupt package; sound data is missing") self.output.set_sample_play_limit(name, max_simultaneously) print("Sound API initialized:", self.output.audio_api)
def __init__(self, app, trackframes): self.app = app self.trackframes = trackframes self.app.after(self.update_rate, self.tick) self.stopping = False self.mixer = StreamMixer([], endless=True) self.output = Output(self.mixer.samplerate, self.mixer.samplewidth, self.mixer.nchannels, mixing="sequential", queue_size=2) self.mixed_samples = iter(self.mixer) self.levelmeter = LevelMeter(rms_mode=False, lowest=self.levelmeter_lowest) self.output.register_notify_played(self.levelmeter.update) for tf in self.trackframes: tf.player = self player_thread = Thread(target=self._play_sample_in_thread, name="jukebox_sampleplayer") player_thread.daemon = True player_thread.start()
def open_audio_file(self, filename_or_stream): wav = wave.open(filename_or_stream, 'r') self.samplewidth = wav.getsampwidth() self.samplerate = wav.getframerate() self.nchannels = wav.getnchannels() self.samplestream = iter(SampleStream(wav, self.samplerate // 10)) self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level) self.audio_out = Output(self.samplerate, self.samplewidth, self.nchannels, mixing="sequential", queue_size=3) print("Audio API used:", self.audio_out.audio_api) if not self.audio_out.supports_streaming: raise RuntimeError("need api that supports streaming") self.audio_out.register_notify_played(self.levelmeter.update) filename = filename_or_stream if isinstance(filename_or_stream, str) else "<stream>" info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}"\ .format(filename, self.samplerate/1000, 8*self.samplewidth, self.nchannels) self.info.configure(text=info)
def _audio_playback(self, pcm_stream): # thread 3: audio playback levelmeter = LevelMeter() def played(sample): if self.client.stream_title != self.stream_title: self.stream_title = self.client.stream_title if self.song_title_callback: self.song_title_callback(self.stream_title) else: print("\n\nNew Song:", self.stream_title, "\n") levelmeter.update(sample) if self.update_ui: self.update_ui(levelmeter, None) else: levelmeter.print(60, True) with Output(mixing="sequential", frames_per_chunk=44100//4) as output: output.register_notify_played(played) while not self._stop_playback: try: audio = pcm_stream.read(44100 * 2 * 2 // 20) if not audio: break except (IOError, ValueError): break else: if not self._stop_playback: sample = Sample.from_raw_frames(audio, 2, 44100, 2) output.play_sample(sample)
def demo_song(): synth = WaveSynth() notes = {note: key_freq(49+i) for i, note in enumerate(['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#'])} tempo = 0.3 def synth_sample(freq, duration): harmonics = [(1, 1), (2, 1/2), (4, 1/4), (6, 1/6)] a = synth.harmonics(freq, duration, harmonics) return a.envelope(0.05, 0.2, 0.8, 0.5) silence = Sample.from_array([0]*int(synth.samplerate*tempo*2), synth.samplerate, numchannels=1) song = "A A B. A D. C#.. ; A A B. A E. D.. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; "\ "A A B. A D C#.. ; A A B. A E D. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; " with Output(synth.samplerate, synth.samplewidth, 1, mixing="sequential", queue_size=50) as out: for note in song.split(): if note == ";": print() out.play_sample(silence) continue print(note, end=" ", flush=True) if note.endswith(".."): sample = synth_sample(notes[note[:-2]], tempo*4) elif note.endswith("."): sample = synth_sample(notes[note[:-1]], tempo*2) else: sample = synth_sample(notes[note], tempo) out.play_sample(sample) print() out.wait_all_played()
def main(args): if len(args) < 1: raise SystemExit( "Mixes one or more audio files. Arguments: inputfile...") hqresample = AudiofileToWavStream.supports_hq_resample() if not hqresample: print( "WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported." ) wav_streams = [ AudiofileToWavStream(filename, hqresample=False) for filename in args ] with StreamMixer(wav_streams, endless=False) as mixer: mixed_samples = iter(mixer) with Output(mixer.samplerate, mixer.samplewidth, mixer.nchannels, mixing="sequential", queue_size=50) as output: if not output.supports_streaming: raise RuntimeError("need api that supports streaming") levelmeter = LevelMeter(rms_mode=False, lowest=-50) def update_and_print_meter(sample): levelmeter.update(sample) levelmeter.print(bar_width=60) output.register_notify_played(update_and_print_meter) for timestamp, sample in mixed_samples: output.play_sample(sample) output.wait_all_played() print("\ndone.")
def bells(): def makebell(freq): synth = WaveSynth() duration = 2 divider = 2.2823535 fm = Triangle(freq/divider, amplitude=0.5) s = synth.sine(freq, duration, fm_lfo=fm) # apply ADSR envelope that resembles bell amp curve, see http://www.hibberts.co.uk/make.htm s.envelope(0, duration*0.25, .5, duration*0.75) s.echo(2, 5, 0.06, 0.6) return s.make_32bit(False) b_l1 = makebell(key_freq(56)) b_l2 = makebell(key_freq(60)) b_h1 = makebell(key_freq(78)).amplify(0.7) b_h2 = makebell(key_freq(82)).amplify(0.7) b_h3 = makebell(key_freq(84)).amplify(0.7) bells = b_l1.mix_at(1.0, b_h1) bells.mix_at(1.5, b_h2) bells.mix_at(2, b_h3) bells.mix_at(3, b_l2) bells.mix_at(4, b_h2) bells.mix_at(4.5, b_h3) bells.mix_at(5, b_h1) bells.make_16bit() with Output.for_sample(bells) as out: out.play_sample(bells) out.wait_all_played()
def main(track_file, outputfile=None, interactive=False): discard_unused = not interactive if interactive: repl = Repl(discard_unused_instruments=discard_unused) repl.do_load(track_file) repl.cmdloop( "Interactive Samplebox session. Type 'help' for help on commands.") else: song = Song() song.read(track_file, discard_unused_instruments=discard_unused) with Output(mixing="sequential", queue_size=1) as out: if out.supports_streaming: # mix and stream output in real time print("Mixing and streaming to speakers...") samples = out.normalized_samples(song.mix_generator()) for s in samples: out.play_sample(s) out.wait_all_played() print("\r ") else: # output can't stream, fallback on mixing everything to a wav print( "(Sorry, streaming audio is not possible, install one of the audio libraries that supports that)" ) song.mix(outputfile) mix = Sample(wave_file=outputfile) print("Playing sound...") out.play_sample(mix) out.wait_all_played()
def play_console(filename_or_stream): with wave.open(filename_or_stream, 'r') as wav: samplewidth = wav.getsampwidth() samplerate = wav.getframerate() nchannels = wav.getnchannels() bar_width = 60 levelmeter = LevelMeter(rms_mode=False, lowest=-50.0) with Output(samplerate, samplewidth, nchannels, mixing="sequential") as out: print("Audio API used:", out.audio_api) if not out.supports_streaming: raise RuntimeError("need api that supports streaming") out.register_notify_played(levelmeter.update) while True: frames = wav.readframes(samplerate//update_rate) if not frames: break sample = Sample.from_raw_frames(frames, wav.getsampwidth(), wav.getframerate(), wav.getnchannels()) out.play_sample(sample) levelmeter.print(bar_width) while out.still_playing(): time.sleep(1/update_rate) levelmeter.print(bar_width) out.wait_all_played() print("\nDone. Enter to exit:") input()
def stereo_pan(): synth = WaveSynth() # panning a stereo source: wave = Sample("samples/SOS 020.wav").clip(6, 12).normalize().fadein(0.5).fadeout(0.5).lock() osc = Sine(0.4) panning = wave.copy().pan(lfo=osc).fadeout(0.2) with Output.for_sample(panning) as out: out.play_sample(panning) out.wait_all_played() # panning a generated mono source: fm = Sine(0.5, 0.1999, bias=0.2) wave = synth.triangle(220, 5, fm_lfo=fm).lock() osc = Sine(0.4) panning = wave.copy().pan(lfo=osc).fadeout(0.2) with Output.for_sample(panning) as out: out.play_sample(panning) out.wait_all_played()
def echo_sample(): synth = WaveSynth(samplerate=44100) lfo = Linear(1, -0.0001, min_value=-99999) s = synth.pulse(220, .5, fm_lfo=lfo).fadeout(.2) with Output(s.samplerate, s.samplewidth, s.nchannels) as out: e = s.copy().echo(1, 4, 0.5, 0.4) # echo out.play_sample(e) e = s.copy().echo(1, 30, 0.15, 0.5) # simple "reverberation" (simulated using fast echos) out.play_sample(e) out.wait_all_played()
def envelope(): from matplotlib import pyplot as plot synth = WaveSynth() freq = 440 s = synth.triangle(freq, duration=1) s.envelope(0.05, 0.1, 0.6, 0.4) plot.title("ADSR envelope") plot.plot(s.get_frame_array()) plot.show() with Output(nchannels=1) as out: out.play_sample(s) out.wait_all_played()
def chords(): synth = WaveSynth() with Output(nchannels=1, mixing="sequential", queue_size=1) as out: for rootnote in octave_notes: chord_keys = major_chord_keys(rootnote, 4) print("chord", rootnote, ["{0} {1}".format(note, octave) for note, octave in chord_keys]) freqs = [notes[octave][key] for key, octave in chord_keys] for i in range(1, len(freqs)): assert freqs[i] > freqs[i-1] samples = [synth.sine(freq, 1.5, amplitude=0.333) for freq in freqs] s = samples[0].mix(samples[1]).mix(samples[2]).fadein(0.1).fadeout(0.1) out.play_sample(s) out.wait_all_played()
def modulate_amp(): from matplotlib import pyplot as plot synth = WaveSynth() freq = 220 s1 = synth.triangle(freq, duration=2) m = synth.sine(2, duration=2, amplitude=0.4, bias=0.5) s1.modulate_amp(m) plot.title("Amplitude modulation by another waveform") plot.plot(s1.get_frame_array()) plot.show() with Output(nchannels=1) as out: out.play_sample(s1) out.wait_all_played() s1 = synth.triangle(freq, duration=2) m = Sine(3, amplitude=0.4, bias=0.5) s1.modulate_amp(m) plot.title("Amplitude modulation by an oscillator") plot.plot(s1.get_frame_array()) plot.show() with Output(nchannels=1) as out: out.play_sample(s1) out.wait_all_played()
def echo_lfo(): synth = WaveSynth(22050) s = Sine(440, amplitude=25000, samplerate=synth.samplerate) s = EnvelopeFilter(s, .2, .2, 0, 0, 1.5, stop_at_end=True) s = EchoFilter(s, .15, 5, 0.3, 0.6) s = ClipFilter(s, -32000, 32000) frames = [int(v) for v in s] import matplotlib.pyplot as plot plot.plot(frames) plot.show() samp = Sample.from_array(frames, synth.samplerate, 1) with Output.for_sample(samp) as out: out.play_sample(samp) out.wait_all_played()
def vibrato(): synth = WaveSynth() duration = 3 def make_sample(freq): fmfm = Linear(0, 0.002, max_value=99999) fm = Sine(0.05, amplitude=0.5, fm_lfo=fmfm) s1 = synth.sawtooth(freq, duration, amplitude=0.6, fm_lfo=fm) s1.envelope(0.01, 0.1, 0.6, 2) return s1 with Output(synth.samplerate, nchannels=1, mixing="sequential") as out: for f in [220, 330, 440]: sample = make_sample(f) out.play_sample(sample) out.wait_all_played()
def pwm(): from matplotlib import pyplot as plot synth = WaveSynth(samplerate=1000) pwm_lfo = Sine(0.05, amplitude=0.49, bias=0.5, samplerate=synth.samplerate) s1 = synth.pulse(4, amplitude=0.6, duration=20, pwm_lfo=pwm_lfo) plot.figure(figsize=(16, 4)) plot.title("Pulse width modulation") plot.ylim([-35000, 35000]) plot.plot(s1.get_frame_array()) plot.show() with Output(nchannels=1) as out: synth = WaveSynth() lfo2 = Sine(0.2, amplitude=0.48, bias=0.5) s1 = synth.pulse(440/6, amplitude=0.5, duration=6, fm_lfo=None, pwm_lfo=lfo2) out.play_sample(s1) # s1.write_wav("pwmtest.wav") out.wait_all_played()
def fm(): synth = WaveSynth(samplerate=8000) from matplotlib import pyplot as plot freq = 2000 lfo1 = Sine(1, amplitude=0.4, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=3, fm_lfo=lfo1) plot.title("Spectrogram") plot.ylabel("Freq") plot.xlabel("Time") plot.specgram(s1.get_frame_array(), Fs=synth.samplerate, noverlap=90, cmap=plot.cm.gist_heat) plot.show() with Output(nchannels=1, mixing="sequential") as out: synth = WaveSynth() freq = 440 lfo1 = Linear(5, samplerate=synth.samplerate) lfo1 = EnvelopeFilter(lfo1, 1, 0.5, 0.5, 0.5, 1) s1 = synth.sine(freq, duration=3, fm_lfo=lfo1) s_all = s1.copy() out.play_sample(s1) lfo1 = Sine(1, amplitude=0.2, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) lfo1 = Sine(freq/17, amplitude=0.5, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) lfo1 = Sine(freq/6, amplitude=0.5, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) lfo1 = Sine(1, amplitude=0.4, samplerate=synth.samplerate) s1 = synth.triangle(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) freq = 440*2 lfo1 = Sine(freq/80, amplitude=0.4, samplerate=synth.samplerate) s1 = synth.triangle(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) # s_all.write_wav("fmtestall.wav") out.wait_all_played()
def demo_song(profiling=False): synth = WaveSynth() notes = {note: key_freq(49+i) for i, note in enumerate(['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#'])} tempo = 0.3 def instrument(freq, duration): harmonics = [(1, 1), (2, 1/2), (4, 1/4), (6, 1/6)] a = synth.harmonics(freq, duration, harmonics) return a.envelope(0.05, 0.2, 0.8, 0.5) print("Synthesizing tones...") perf_c = time.perf_counter() quarter_notes = {note: instrument(notes[note], tempo) for note in notes} half_notes = {note: instrument(notes[note], tempo*2) for note in notes} full_notes = {note: instrument(notes[note], tempo*4) for note in notes} silence = Sample.from_array([0]*int(synth.samplerate*tempo*2), synth.samplerate, numchannels=1) if profiling: print(time.perf_counter()-perf_c) else: song = "A A B. A D. C#.. ; A A B. A E. D.. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; "\ "A A B. A D C#.. ; A A B. A E D. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; " with Output(synth.samplerate, synth.samplewidth, 1, mixing="sequential") as out: for note in song.split(): if note == ";": print() out.play_sample(silence) continue print(note, end=" ", flush=True) if note.endswith(".."): sample = full_notes[note[:-2]] elif note.endswith("."): sample = half_notes[note[:-1]] else: sample = quarter_notes[note] out.play_sample(sample) print() out.wait_all_played()
def demo_tones(): synth = WaveSynth() with Output(nchannels=1, mixing="sequential", queue_size=2) as out: for wave in [synth.square_h, synth.square, synth.sine, synth.triangle, synth.sawtooth, synth.sawtooth_h]: print(wave.__name__) for note, freq in list(notes[4].items())[6:]: print(" {:f} hz".format(freq)) sample = wave(freq, duration=0.4).fadein(0.02).fadeout(0.1) out.play_sample(sample) print("pulse") for note, freq in list(notes[4].items())[6:]: print(" {:f} hz".format(freq)) sample = synth.pulse(freq, duration=0.4, pulsewidth=0.1).fadein(0.02).fadeout(0.1) out.play_sample(sample) print("harmonics (only even)") for note, freq in list(notes[3].items())[6:]: print(" {:f} hz".format(freq)) harmonics = [(n, 1/n) for n in range(1, 5*2, 2)] sample = synth.harmonics(freq, 0.4, harmonics).fadein(0.02).fadeout(0.1) out.play_sample(sample) print("noise") sample = synth.white_noise(frequency=440, duration=1.5).fadein(0.1).fadeout(0.1) out.play_sample(sample) out.wait_all_played()
def create_synth(self): samplerate = self.samplerate_choice.get() self.synth = WaveSynth(samplewidth=2, samplerate=samplerate) if self.output is not None: self.output.close() self.output = Output(self.synth.samplerate, self.synth.samplewidth, 1, mixing="mix")
class Player: update_rate = 50 # 50 ms = 20 updates/sec levelmeter_lowest = -40 # dB xfade_duration = 7 def __init__(self, app, trackframes): self.app = app self.trackframes = trackframes self.app.after(self.update_rate, self.tick) self.stopping = False self.mixer = StreamMixer([], endless=True) self.output = Output(self.mixer.samplerate, self.mixer.samplewidth, self.mixer.nchannels, mixing="sequential", queue_size=2) self.mixed_samples = iter(self.mixer) self.levelmeter = LevelMeter(rms_mode=False, lowest=self.levelmeter_lowest) self.output.register_notify_played(self.levelmeter.update) for tf in self.trackframes: tf.player = self player_thread = Thread(target=self._play_sample_in_thread, name="jukebox_sampleplayer") player_thread.daemon = True player_thread.start() def skip(self, trackframe): if trackframe.state != TrackFrame.state_needtrack and trackframe.stream: trackframe.stream.close() trackframe.stream = None trackframe.display_track(None, None, None, "(next track...)") trackframe.state = TrackFrame.state_switching def stop(self): self.stopping = True for tf in self.trackframes: if tf.stream: tf.stream.close() tf.stream = None tf.state = TrackFrame.state_needtrack self.mixer.close() self.output.close() def tick(self): # the actual decoding and sound playing is done in a background thread self._levelmeter() self._load_song() self._play_song() self._crossfade() if not self.stopping: self.app.after(self.update_rate, self.tick) def _play_sample_in_thread(self): """ This is run in a background thread to avoid GUI interactions interfering with audio output. """ while True: if self.stopping: break _, sample = next(self.mixed_samples) if sample and sample.duration > 0: self.output.play_sample(sample) else: self.levelmeter.reset() time.sleep(self.update_rate/1000*2) # avoid hogging the cpu while no samples are played def _levelmeter(self): self.app.update_levels(self.levelmeter.level_left, self.levelmeter.level_right) def _load_song(self): if self.stopping: return # make sure we don't load new songs when the player is shutting down for tf in self.trackframes: if tf.state == TrackFrame.state_needtrack: track = self.app.pop_playlist_track() if track: tf.track = track tf.state = TrackFrame.state_idle def _play_song(self): def start_stream(tf, filename, volume): def _start_from_thread(): # start loading the track from a thread to avoid gui stutters when loading takes a bit of time tf.stream = AudiofileToWavStream(filename, hqresample=hqresample) self.mixer.add_stream(tf.stream, [tf.volumefilter]) tf.playback_started = datetime.datetime.now() tf.state = TrackFrame.state_playing tf.volume = volume tf.state = TrackFrame.state_loading Thread(target=_start_from_thread, name="stream_loader").start() for tf in self.trackframes: if tf.state == TrackFrame.state_playing: remaining = tf.track_duration - (datetime.datetime.now() - tf.playback_started) remaining = remaining.total_seconds() tf.time = datetime.timedelta(seconds=math.ceil(remaining)) if tf.stream.closed and tf.time.total_seconds() <= 0: self.skip(tf) # stream ended! elif tf.state == TrackFrame.state_idle: if tf.xfade_state == TrackFrame.state_xfade_fadingin: # if we're set to fading in, regardless of other tracks, we start playing as well start_stream(tf, tf.track["location"], 0) elif not any(tf for tf in self.trackframes if tf.state in (TrackFrame.state_playing, TrackFrame.state_loading)): # if there is no other track currently playing (or loading), it's our turn! start_stream(tf, tf.track["location"], 100) elif tf.state == TrackFrame.state_switching: tf.state = TrackFrame.state_needtrack def _crossfade(self): for tf in self.trackframes: # nearing the end of the track? then start a fade out if tf.state == TrackFrame.state_playing \ and tf.xfade_state == TrackFrame.state_xfade_nofade \ and tf.time.total_seconds() <= self.xfade_duration: tf.xfade_state = TrackFrame.state_xfade_fadingout tf.xfade_started = datetime.datetime.now() tf.xfade_start_volume = tf.volume # fade in the first other track that is currently idle for other_tf in self.trackframes: if tf is not other_tf and other_tf.state == TrackFrame.state_idle: other_tf.xfade_state = TrackFrame.state_xfade_fadingin other_tf.xfade_started = datetime.datetime.now() other_tf.xfade_start_volume = 0 other_tf.volume = 0 break for tf in self.trackframes: if tf.xfade_state == TrackFrame.state_xfade_fadingin: # fading in, slide volume up from 0 to 100% volume = 100 * (datetime.datetime.now() - tf.xfade_started).total_seconds() / self.xfade_duration tf.volume = min(volume, 100) if volume >= 100: tf.xfade_state = TrackFrame.state_xfade_nofade # fade reached the end elif tf.xfade_state == TrackFrame.state_xfade_fadingout: # fading out, slide volume down from what it was at to 0% fade_progress = (datetime.datetime.now() - tf.xfade_started) fade_progress = (self.xfade_duration - fade_progress.total_seconds()) / self.xfade_duration volume = max(0, tf.xfade_start_volume * fade_progress) tf.volume = max(volume, 0) if volume <= 0: tf.xfade_state = TrackFrame.state_xfade_nofade # fade reached the end def play_sample(self, sample): def unmute(trf, vol): if trf: trf.volume = vol if sample and sample.duration > 0: for tf in self.trackframes: if tf.state == TrackFrame.state_playing: old_volume = tf.mute_volume(40) self.mixer.add_sample(sample, lambda mtf=tf, vol=old_volume: unmute(mtf, vol)) break else: self.mixer.add_sample(sample)
def a440(): synth = WaveSynth(samplerate=44100, samplewidth=4) a440 = synth.sine(440, duration=2) with Output.for_sample(a440) as out: out.play_sample(a440) out.wait_all_played()
class LevelGUI(tk.Frame): def __init__(self, audio_source, master=None): self.lowest_level = -50 super().__init__(master) self.master.title("Levels") self.pbvar_left = tk.IntVar() self.pbvar_right = tk.IntVar() pbstyle = ttk.Style() pbstyle.theme_use("classic") pbstyle.configure("green.Vertical.TProgressbar", troughcolor="gray", background="light green") pbstyle.configure("yellow.Vertical.TProgressbar", troughcolor="gray", background="yellow") pbstyle.configure("red.Vertical.TProgressbar", troughcolor="gray", background="orange") frame = tk.LabelFrame(self, text="Left") frame.pack(side=tk.LEFT) tk.Label(frame, text="dB").pack() self.pb_left = ttk.Progressbar(frame, orient=tk.VERTICAL, length=300, maximum=-self.lowest_level, variable=self.pbvar_left, mode='determinate', style='yellow.Vertical.TProgressbar') self.pb_left.pack() frame = tk.LabelFrame(self, text="Right") frame.pack(side=tk.LEFT) tk.Label(frame, text="dB").pack() self.pb_right = ttk.Progressbar(frame, orient=tk.VERTICAL, length=300, maximum=-self.lowest_level, variable=self.pbvar_right, mode='determinate', style='yellow.Vertical.TProgressbar') self.pb_right.pack() frame = tk.LabelFrame(self, text="Info") self.info = tk.Label(frame, text="", justify=tk.LEFT) frame.pack() self.info.pack(side=tk.TOP) self.pack() self.open_audio_file(audio_source) self.after_idle(self.update) self.after_idle(self.stream_audio) def open_audio_file(self, filename_or_stream): wav = wave.open(filename_or_stream, 'r') self.samplewidth = wav.getsampwidth() self.samplerate = wav.getframerate() self.nchannels = wav.getnchannels() self.samplestream = iter(SampleStream(wav, self.samplerate // 10)) self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level) self.audio_out = Output(self.samplerate, self.samplewidth, self.nchannels, mixing="sequential", queue_size=3) print("Audio API used:", self.audio_out.audio_api) if not self.audio_out.supports_streaming: raise RuntimeError("need api that supports streaming") self.audio_out.register_notify_played(self.levelmeter.update) filename = filename_or_stream if isinstance(filename_or_stream, str) else "<stream>" info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}"\ .format(filename, self.samplerate/1000, 8*self.samplewidth, self.nchannels) self.info.configure(text=info) def stream_audio(self): try: sample = next(self.samplestream) self.audio_out.play_sample(sample) self.after(20, self.stream_audio) except StopIteration: self.audio_out.close() def update(self): if not self.audio_out.still_playing(): self.pbvar_left.set(0) self.pbvar_right.set(0) print("done!") return left, peak_l = self.levelmeter.level_left, self.levelmeter.peak_left right, peak_r = self.levelmeter.level_right, self.levelmeter.peak_right self.pbvar_left.set(left-self.lowest_level) self.pbvar_right.set(right-self.lowest_level) if left > -3: self.pb_left.configure(style="red.Vertical.TProgressbar") elif left > -6: self.pb_left.configure(style="yellow.Vertical.TProgressbar") else: self.pb_left.configure(style="green.Vertical.TProgressbar") if right > -3: self.pb_right.configure(style="red.Vertical.TProgressbar") elif right > -6: self.pb_right.configure(style="yellow.Vertical.TProgressbar") else: self.pb_right.configure(style="green.Vertical.TProgressbar") self.after(1000//update_rate, self.update)
def __init__(self, discard_unused_instruments=False): self.song = Song() self.discard_unused_instruments = discard_unused_instruments self.out = Output(mixing="sequential", queue_size=1) super(Repl, self).__init__()
import os import time from synthplayer.sample import Sample from synthplayer.playback import Output s1 = Sample("samples/909_clap.wav").normalize() s2 = Sample("samples/909_hi_tom.wav").normalize() s3 = Sample("samples/909_ride.wav").normalize() s4 = Sample("samples/Drop the bass now.wav").normalize() s5 = Sample("samples/909_snare_drum.wav").normalize() s6 = Sample("samples/909_hihat_closed.wav").normalize() s6_soft = s6.copy().amplify(0.2) with Output(mixing="sequential", queue_size=3) as out: print("\nPlaying samples with sequential mixing mode.") print("This takes care of playing samples only if the previous one finished,") print("but you cannot mix any sounds. It's ideal for playback of a single sound source,") print("such as an audio clip or audio stream that comes in chunks.") out.play_sample(s1) out.play_sample(s2) out.play_sample(s3) out.play_sample(s4) out.play_sample(s5) out.play_sample(s5) out.play_sample(s5) out.play_sample(s6) print("\nwaiting till all sounds have played...") out.wait_all_played() print("\nEnter to continue:")
# Instead, just play it _as a single huge sample_ where the sample itself # takes care of dynamically producing its audio data chunks. print("Streaming mp3 using realtime mixer...") counter = 1 def played_callback(sample): global counter print(" played sound chunk", counter, end="\r") counter += 1 with AudiofileToWavStream("example_mixes/track3.mp3") as wavstream: sample = StreamingSample(wavstream, wavstream.name) hihat = Sample("samples/909_hihat_closed.wav").normalize() with Output(mixing="mix", frames_per_chunk=afmt.rate//10) as out: out.register_notify_played(played_callback) # as an example, we show the capability of real time mixing by adding some other samples in the timeline out.play_sample(hihat, delay=0.0) out.play_sample(hihat, delay=0.5) out.play_sample(hihat, delay=1.0) out.play_sample(hihat, delay=1.5) out.play_sample(sample, delay=2.0) out.wait_all_played() # the mixer itself takes care of grabbing new data as needed # ** Streaming a large mp3 file using the sequential mixing output ** # This is more efficient for just playing large music files, # and can be done by simply playing sample chunks one after another. print("Streaming mp3 using sequential mixer...") with AudiofileToWavStream("example_mixes/track3.mp3") as wavstream: with SampleStream(wavstream, afmt.rate//10) as samples:
import Pyro4 def sample_deserializer(classname: str, data: Dict[str, Any]) -> sample.Sample: return sample.Sample.from_raw_frames(data["frames"], data["samplewidth"], data["samplerate"], data["nchannels"], data["name"]) Pyro4.config.SERIALIZER = "marshal" SerializerBase.register_dict_to_class("synthplayer.sample.Sample", sample_deserializer) synth = Pyro4.Proxy("PYRONAME:synth.wavesynth") synth.setup(44100) with Output(44100, nchannels=1, samplewidth=2, mixing="sequential") as output: silence = sample.Sample.from_raw_frames(b"", samplewidth=2, samplerate=44100, numchannels=1) silence.add_silence(0.1) output.play_sample(synth.sine(220, .5)) output.play_sample(silence) output.play_sample(synth.sine(330, .5)) output.play_sample(silence) output.play_sample(synth.sine(440, .5)) output.play_sample(silence) output.play_sample(synth.sine(550, .5)) output.play_sample(silence) output.play_sample(synth.sine(660, .5)) output.play_sample(silence)
class SynthGUI(tk.Frame): def __init__(self, master=None): super().__init__(master) self.master.title("Software FM/PWM Synthesizer | synthplayer lib v" + synthplayer.__version__) self.waveform_area = tk.Frame(self) self.osc_frame = tk.Frame(self) self.oscillators = [] self.piano_frame = tk.Frame(self) self.pianokeys_gui = PianoKeyboardGUI(self.piano_frame, self) self.pianokeys_gui.pack(side=tk.BOTTOM) filter_frame = tk.LabelFrame(self, text="Filters etc.", padx=10, pady=10) self.envelope_filter_guis = [ EnvelopeFilterGUI(filter_frame, "1", self), EnvelopeFilterGUI(filter_frame, "2", self), EnvelopeFilterGUI(filter_frame, "3", self)] self.echo_filter_gui = EchoFilterGUI(filter_frame, self) for ev in self.envelope_filter_guis: ev.pack(side=tk.LEFT, anchor=tk.N) self.arp_filter_gui = ArpeggioFilterGUI(filter_frame, self) self.arp_filter_gui.pack(side=tk.LEFT, anchor=tk.N) f = tk.Frame(filter_frame) self.tremolo_filter_gui = TremoloFilterGUI(f, self) self.tremolo_filter_gui.pack(side=tk.TOP) lf = tk.LabelFrame(f, text="A4 tuning") lf.pack(pady=(4, 0)) lf = tk.LabelFrame(f, text="Performance") self.samplerate_choice = tk.IntVar() self.samplerate_choice.set(22050) tk.Label(lf, text="Samplerate:").pack(anchor=tk.W) subf = tk.Frame(lf) tk.Radiobutton(subf, variable=self.samplerate_choice, value=44100, text="44.1 kHz", fg=lf.cget('fg'), selectcolor=lf.cget('bg'), pady=0, command=self.create_synth).pack(side=tk.LEFT) tk.Radiobutton(subf, variable=self.samplerate_choice, value=22050, text="22 kHz", fg=lf.cget('fg'), selectcolor=lf.cget('bg'), pady=0, command=self.create_synth).pack(side=tk.LEFT) subf.pack() tk.Label(lf, text="Piano key response:").pack(anchor=tk.W) subf = tk.Frame(lf) self.rendering_choice = tk.StringVar() self.rendering_choice.set("realtime") tk.Radiobutton(subf, variable=self.rendering_choice, value="realtime", text="realtime", pady=0, fg=lf.cget('fg'), selectcolor=lf.cget('bg'),).pack(side=tk.LEFT) tk.Radiobutton(subf, variable=self.rendering_choice, value="render", text="render", pady=0, fg=lf.cget('fg'), selectcolor=lf.cget('bg'),).pack(side=tk.LEFT) subf.pack() lf.pack(pady=(4, 0)) f.pack(side=tk.LEFT, anchor=tk.N) self.echo_filter_gui.pack(side=tk.LEFT, anchor=tk.N) misc_frame = tk.Frame(filter_frame, padx=10) tk.Label(misc_frame, text="To Speaker:").pack(pady=(5, 0)) self.to_speaker_lb = tk.Listbox(misc_frame, width=8, height=5, selectmode=tk.MULTIPLE, exportselection=0) self.to_speaker_lb.pack() lf = tk.LabelFrame(misc_frame, text="A4 tuning") self.a4_choice = tk.IntVar() self.a4_choice.set(440) tk.Radiobutton(lf, variable=self.a4_choice, value=440, text="440 Hz", pady=0, fg=lf.cget('fg'), selectcolor=lf.cget('bg')).pack() tk.Radiobutton(lf, variable=self.a4_choice, value=432, text="432 Hz", pady=0, fg=lf.cget('fg'), selectcolor=lf.cget('bg')).pack() lf.pack(pady=(4, 0)) tk.Button(misc_frame, text="Load preset", command=self.load_preset).pack() tk.Button(misc_frame, text="Save preset", command=self.save_preset).pack() for _ in range(5): self.add_osc_to_gui() self.to_speaker_lb.select_set(4) self.waveform_area.pack(side=tk.TOP) self.osc_frame.pack(side=tk.TOP, padx=10) filter_frame.pack(side=tk.TOP) misc_frame.pack(side=tk.RIGHT, anchor=tk.N) self.piano_frame.pack(side=tk.TOP, padx=10, pady=10) self.statusbar = tk.Label(self, text="<status>", relief=tk.RIDGE) self.statusbar.pack(side=tk.BOTTOM, fill=tk.X) self.pack() self.synth = self.output = None self.create_synth() self.echos_ending_time = 0 self.currently_playing = {} # (note, octave) -> sid self.arp_after_id = 0 showwarning("garbled sound output", "When using miniaudio 1.20+, the audio could be garbled (not always the case). I haven't had time yet to debug and fix this. Sorry for any inconvenience.") def bind_keypress(self, key, note, octave): def kbpress(event): self.pressed_keyboard(note, octave, False) def kbrelease(event): self.pressed_keyboard(note, octave, True) self.master.bind(key, kbpress) if key == '[': key = "bracketleft" if key == ']': key = "bracketright" self.master.bind("<KeyRelease-%s>" % key, kbrelease) def create_synth(self): samplerate = self.samplerate_choice.get() self.synth = WaveSynth(samplewidth=2, samplerate=samplerate) if self.output is not None: self.output.close() self.output = Output(self.synth.samplerate, self.synth.samplewidth, 1, mixing="mix") def add_osc_to_gui(self): osc_nr = len(self.oscillators) fm_sources = ["osc "+str(n+1) for n in range(osc_nr)] osc_pane = OscillatorGUI(self.osc_frame, self, "Oscillator "+str(osc_nr+1), fm_sources=fm_sources, pwm_sources=fm_sources) osc_pane.pack(side=tk.LEFT, anchor=tk.N, padx=10, pady=10) self.oscillators.append(osc_pane) self.to_speaker_lb.insert(tk.END, "osc "+str(osc_nr+1)) def create_osc(self, note, octave, freq, from_gui, all_oscillators, is_audio=False): def create_unfiltered_osc(): def create_chord_osc(clazz, **arguments): if is_audio and self.arp_filter_gui.input_mode.get().startswith("chords"): chord_keys = major_chord_keys(note, octave) if self.arp_filter_gui.input_mode.get() == "chords3": chord_keys = list(chord_keys)[:-1] a4freq = self.a4_choice.get() chord_freqs = [note_freq(n, o, a4freq) for n, o in chord_keys] self.statusbar["text"] = "major chord: "+" ".join(n for n, o in chord_keys) oscillators = [] arguments["amplitude"] /= len(chord_freqs) for f in chord_freqs: arguments["frequency"] = f oscillators.append(clazz(**arguments)) return MixingFilter(*oscillators) else: # no chord (or an LFO instead of audio output oscillator), return one osc for only the given frequency return clazz(**arguments) waveform = from_gui.input_waveformtype.get() amp = from_gui.input_amp.get() bias = from_gui.input_bias.get() if waveform == "noise": return WhiteNoise(freq, amplitude=amp, bias=bias, samplerate=self.synth.samplerate) elif waveform == "linear": startlevel = from_gui.input_lin_start.get() increment = from_gui.input_lin_increment.get() minvalue = from_gui.input_lin_min.get() maxvalue = from_gui.input_lin_max.get() return Linear(startlevel, increment, minvalue, maxvalue) else: phase = from_gui.input_phase.get() pw = from_gui.input_pw.get() fm_choice = from_gui.input_fm.get() pwm_choice = from_gui.input_pwm.get() if fm_choice in (None, "", "<none>"): fm = None elif fm_choice.startswith("osc"): osc_num = int(fm_choice.split()[1]) osc = all_oscillators[osc_num - 1] fm = self.create_osc(note, octave, osc.input_freq.get(), all_oscillators[osc_num-1], all_oscillators) else: raise ValueError("invalid fm choice") if pwm_choice in (None, "", "<none>"): pwm = None elif pwm_choice.startswith("osc"): osc_num = int(pwm_choice.split()[1]) osc = all_oscillators[osc_num-1] pwm = self.create_osc(note, octave, osc.input_freq.get(), osc, all_oscillators) else: raise ValueError("invalid fm choice") if waveform == "pulse": return create_chord_osc(Pulse, frequency=freq, amplitude=amp, phase=phase, bias=bias, pulsewidth=pw, fm_lfo=fm, pwm_lfo=pwm, samplerate=self.synth.samplerate) elif waveform == "harmonics": harmonics = self.parse_harmonics(from_gui.harmonics_text.get(1.0, tk.END)) return create_chord_osc(Harmonics, frequency=freq, harmonics=harmonics, amplitude=amp, phase=phase, bias=bias, fm_lfo=fm, samplerate=self.synth.samplerate) else: o = { "sine": Sine, "triangle": Triangle, "sawtooth": Sawtooth, "sawtooth_h": SawtoothH, "square": Square, "square_h": SquareH, "semicircle": Semicircle, "pointy": Pointy, }[waveform] return create_chord_osc(o, frequency=freq, amplitude=amp, phase=phase, bias=bias, fm_lfo=fm, samplerate=self.synth.samplerate) def envelope(osc, envelope_gui): adsr_src = envelope_gui.input_source.get() if adsr_src not in (None, "", "<none>"): osc_num = int(adsr_src.split()[1]) if from_gui is self.oscillators[osc_num-1]: return envelope_gui.filter(osc) return osc osc = create_unfiltered_osc() for ev in self.envelope_filter_guis: osc = envelope(osc, ev) return osc def parse_harmonics(self, harmonics): parsed = [] for harmonic in harmonics.split(): num, frac = harmonic.split(",") num = int(num) if '/' in frac: numerator, denominator = frac.split("/") else: numerator, denominator = frac, 1 frac = float(numerator)/float(denominator) parsed.append((num, frac)) return parsed def do_play(self, osc): if osc.input_waveformtype.get() == "linear": self.statusbar["text"] = "cannot output linear osc to speakers" return duration = 1.0 osc.set_title_status("TO SPEAKER") self.update() osc.after(int(duration*1000), lambda: osc.set_title_status(None)) o = self.create_osc(None, None, osc.input_freq.get(), osc, all_oscillators=self.oscillators, is_audio=True) o = self.apply_filters(o) sample = self.generate_sample(o, duration) if sample.samplewidth != self.synth.samplewidth: print("16 bit overflow!") # XXX sample = sample.make_16bit() self.output.play_sample(sample) self.after(1000, lambda: osc.set_title_status("")) def do_close_waveform(self): for child in self.waveform_area.winfo_children(): child.destroy() def do_plot(self, osc): if not matplotlib: self.statusbar["text"] = "Cannot plot! To plot things, you need to have matplotlib installed!" return o = self.create_osc(None, None, osc.input_freq.get(), osc, all_oscillators=self.oscillators).blocks() blocks = list(itertools.islice(o, self.synth.samplerate//params.norm_osc_blocksize)) # integrating matplotlib in tikinter, see http://matplotlib.org/examples/user_interfaces/embedding_in_tk2.html fig = Figure(figsize=(8, 2), dpi=100) axis = fig.add_subplot(111) axis.plot(sum(blocks, [])) axis.set_title("Waveform") self.do_close_waveform() canvas = FigureCanvasTkAgg(fig, master=self.waveform_area) canvas.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH, expand=1) canvas.draw() close_waveform = tk.Button(self.waveform_area, text="Close waveform", command=self.do_close_waveform) close_waveform.pack(side=tk.RIGHT) def generate_sample(self, oscillator: Oscillator, duration: float, use_fade: bool = False) -> Optional[Sample]: scale = 2**(8*self.synth.samplewidth-1) blocks = oscillator.blocks() try: sample_blocks = list(next(blocks) for _ in range(int(self.synth.samplerate*duration/params.norm_osc_blocksize))) float_frames = sum(sample_blocks, []) frames = [int(v*scale) for v in float_frames] except StopIteration: return None else: sample = Sample.from_array(frames, self.synth.samplerate, 1) if use_fade: sample.fadein(0.05).fadeout(0.1) return sample def render_and_play_note(self, oscillator: Oscillator, max_duration: float = 4) -> None: duration = 0 for ev in self.envelope_filter_guis: duration = max(duration, ev.duration) if duration == 0: duration = 1 duration = min(duration, max_duration) sample = self.generate_sample(oscillator, duration) if sample: sample.fadein(0.05).fadeout(0.05) if sample.samplewidth != self.synth.samplewidth: print("16 bit overflow!") # XXX sample.make_16bit() self.output.play_sample(sample) keypresses = collections.defaultdict(float) # (note, octave) -> timestamp keyrelease_counts = collections.defaultdict(int) # (note, octave) -> int def _key_release(self, note, octave): # mechanism to filter out key repeats self.keyrelease_counts[(note, octave)] -= 1 if self.keyrelease_counts[(note, octave)] <= 0: self.pressed(note, octave, True) def pressed_keyboard(self, note, octave, released=False): if released: self.keyrelease_counts[(note, octave)] += 1 self.after(400, lambda n=note, o=octave: self._key_release(n, o)) else: time_since_previous = time.time() - self.keypresses[(note, octave)] self.keypresses[(note, octave)] = time.time() if time_since_previous < 0.8: # assume auto-repeat, and do nothing return self.pressed(note, octave) def pressed(self, note, octave, released=False): if self.arp_filter_gui.input_mode.get().startswith("arp"): if released: if self.arp_after_id: self.after_cancel(self.arp_after_id) # stop the arp cycle self.statusbar["text"] = "ok" self.arp_after_id = 0 return chord_keys = major_chord_keys(note, octave) if self.arp_filter_gui.input_mode.get() == "arpeggio3": chord_keys = list(chord_keys)[:-1] self.statusbar["text"] = "arpeggio: "+" ".join(note for note, octave in chord_keys) self.play_note(chord_keys) else: self.statusbar["text"] = "ok" self.play_note([(note, octave)], released) def play_note(self, list_of_notes, released=False): # list of notes to play (length 1 = just one note, more elements = arpeggiator list) to_speaker = [self.oscillators[i] for i in self.to_speaker_lb.curselection()] if not to_speaker: self.statusbar["text"] = "No oscillators connected to speaker output!" return if released: for note, octave in list_of_notes: if (note, octave) in self.currently_playing: # stop the note sid = self.currently_playing[(note, octave)] self.output.stop_sample(sid) return first_note, first_octave = list_of_notes[0] first_freq = note_freq(first_note, first_octave, self.a4_choice.get()) for osc in self.oscillators: if osc.input_freq_keys.get(): osc.input_freq.set(first_freq*osc.input_freq_keys_ratio.get()) for osc in to_speaker: if osc.input_waveformtype.get() == "linear": self.statusbar["text"] = "cannot output linear osc to speakers" return else: osc.set_title_status("TO SPEAKER") oscs_to_play = [] for note, octave in list_of_notes: freq = note_freq(note, octave, self.a4_choice.get()) oscs = [self.create_osc(note, octave, freq * osc.input_freq_keys_ratio.get(), osc, self.oscillators, is_audio=True) for osc in to_speaker] mixed_osc = MixingFilter(*oscs) if len(oscs) > 1 else oscs[0] self.echos_ending_time = 0 if len(list_of_notes) <= 1: # you can't use filters and echo when using arpeggio for now mixed_osc = self.apply_filters(mixed_osc) current_echos_duration = getattr(mixed_osc, "echo_duration", 0) if current_echos_duration > 0: self.echos_ending_time = time.time() + current_echos_duration oscs_to_play.append(mixed_osc) if len(list_of_notes) > 1: rate = self.arp_filter_gui.input_rate.get() duration = rate * self.arp_filter_gui.input_ratio.get() / 100.0 self.statusbar["text"] = "playing ARP ({0}) from note {1} {2}".format(len(oscs_to_play), first_note, first_octave) for index, (note, octave) in enumerate(list_of_notes): sample = StreamingOscSample(oscs_to_play[index], self.synth.samplerate, duration) sid = self.output.play_sample(sample, delay=rate*index) self.currently_playing[(note, octave)] = sid self.arp_after_id = self.after(int(rate * len(list_of_notes) * 1000), lambda: self.play_note(list_of_notes)) # repeat arp! else: # normal, single note if self.rendering_choice.get() == "render": self.statusbar["text"] = "rendering note sample..." self.after_idle(lambda: self.render_and_play_note(mixed_osc)) else: self.statusbar["text"] = "playing note {0} {1}".format(first_note, first_octave) sample = StreamingOscSample(oscs_to_play[0], self.synth.samplerate) sid = self.output.play_sample(sample) self.currently_playing[(first_note, first_octave)] = sid def reset_osc_title_status(): for osc in to_speaker: osc.set_title_status("") self.after(1000, reset_osc_title_status) def apply_filters(self, output_oscillator): output_oscillator = self.tremolo_filter_gui.filter(output_oscillator) output_oscillator = self.echo_filter_gui.filter(output_oscillator) return output_oscillator def load_preset(self): file = askopenfile(filetypes=[("Synth presets", "*.ini")]) cf = ConfigParser() cf.read_file(file) file.close() # general settings self.samplerate_choice.set(cf["settings"]["samplerate"]) self.rendering_choice.set(cf["settings"]["rendering"]) self.a4_choice.set(cf["settings"]["a4tuning"]) self.to_speaker_lb.selection_clear(0, tk.END) to_speaker = cf["settings"]["to_speaker"] to_speaker = tuple(to_speaker.split(',')) for o in to_speaker: self.to_speaker_lb.selection_set(int(o)-1) for section in cf.sections(): if section.startswith("oscillator"): num = int(section.split('_')[1])-1 osc = self.oscillators[num] for name, value in cf[section].items(): getattr(osc, name).set(value) osc.waveform_selected() elif section.startswith("envelope"): num = int(section.split('_')[1])-1 env = self.envelope_filter_guis[num] for name, value in cf[section].items(): getattr(env, name).set(value) elif section == "arpeggio": for name, value in cf[section].items(): getattr(self.arp_filter_gui, name).set(value) elif section == "tremolo": for name, value in cf[section].items(): getattr(self.tremolo_filter_gui, name).set(value) elif section == "echo": for name, value in cf[section].items(): getattr(self.echo_filter_gui, name).set(value) self.statusbar["text"] = "preset loaded." def save_preset(self): file = asksaveasfile(filetypes=[("Synth presets", "*.ini")]) cf = ConfigParser(dict_type=collections.OrderedDict) # general settings cf.add_section("settings") cf["settings"]["samplerate"] = str(self.samplerate_choice.get()) cf["settings"]["rendering"] = self.rendering_choice.get() cf["settings"]["to_speaker"] = ",".join(str(v+1) for v in self.to_speaker_lb.curselection()) cf["settings"]["a4tuning"] = str(self.a4_choice.get()) # oscillators for num, osc in enumerate(self.oscillators, 1): section = "oscillator_"+str(num) cf.add_section(section) for name, var in vars(osc).items(): if name.startswith("input_"): cf[section][name] = str(var.get()) # adsr envelopes for num, flter in enumerate(self.envelope_filter_guis, 1): section = "envelope_"+str(num) cf.add_section(section) for name, var in vars(flter).items(): if name.startswith("input_"): cf[section][name] = str(var.get()) # echo cf.add_section("echo") for name, var in vars(self.echo_filter_gui).items(): if name.startswith("input_"): cf["echo"][name] = str(var.get()) # tremolo cf.add_section("tremolo") for name, var in vars(self.tremolo_filter_gui).items(): if name.startswith("input_"): cf["tremolo"][name] = str(var.get()) # arpeggio cf.add_section("arpeggio") for name, var in vars(self.arp_filter_gui).items(): if name.startswith("input_"): cf["arpeggio"][name] = str(var.get()) cf.write(file) file.close()
class Repl(cmd.Cmd): """ Interactive command line interface to load/record/save and play samples, patterns and whole tracks. Currently it has no way of defining and loading samples manually. This means you need to initialize it with a track file containing at least the instruments (samples) that you will be using. """ def __init__(self, discard_unused_instruments=False): self.song = Song() self.discard_unused_instruments = discard_unused_instruments self.out = Output(mixing="sequential", queue_size=1) super(Repl, self).__init__() def do_quit(self, args): """quits the session""" print("Bye.", args) self.out.close() return True def do_bpm(self, bpm): """set the playback BPM (such as 174 for some drum'n'bass)""" try: self.song.bpm = int(bpm) except ValueError as x: print("ERROR:", x) def do_ticks(self, ticks): """set the number of pattern ticks per beat (usually 4 or 8)""" try: self.song.ticks = int(ticks) except ValueError as x: print("ERROR:", x) def do_samples(self, args): """show the loaded samples""" print("Samples:") print(", ".join(self.song.instruments)) def do_patterns(self, args): """show the loaded patterns""" print("Patterns:") for name, pattern in sorted(self.song.patterns.items()): self.print_pattern(name, pattern) def print_pattern(self, name, pattern): print("PATTERN {:s}".format(name)) for instrument, bars in pattern.items(): print(" {:>15s} = {:s}".format(instrument, bars)) def do_pattern(self, names): """play the pattern with the given name(s)""" names = names.split() for name in sorted(set(names)): try: pat = self.song.patterns[name] self.print_pattern(name, pat) except KeyError: print("no such pattern '{:s}'".format(name)) return patterns = [self.song.patterns[name] for name in names] try: m = Mixer(patterns, self.song.bpm, self.song.ticks, self.song.instruments) result = m.mix(verbose=len(patterns) > 1).make_16bit() self.out.play_sample(result) self.out.wait_all_played() except ValueError as x: print("ERROR:", x) def do_play(self, args): """play a single sample by giving its name, add a bar (xx..x.. etc) to play it in a bar""" if ' ' in args: instrument, pattern = args.split(maxsplit=1) pattern = pattern.replace(' ', '') else: instrument = args pattern = None instrument = instrument.strip() try: sample = self.song.instruments[instrument] except KeyError: print("unknown sample") return if pattern: self.play_single_bar(sample, pattern) else: sample = sample.copy().make_16bit() self.out.play_sample(sample) self.out.wait_all_played() def play_single_bar(self, sample, pattern): try: m = Mixer([{ "sample": pattern }], self.song.bpm, self.song.ticks, {"sample": sample}) result = m.mix(verbose=False).make_16bit() self.out.play_sample(result) self.out.wait_all_played() except ValueError as x: print("ERROR:", x) def do_mix(self, args): """mix and play all patterns of the song""" if not self.song.pattern_sequence: print("Nothing to be mixed.") return output = "__temp_mix.wav" self.song.mix(output) mix = Sample(wave_file=output) print("Playing sound...") self.out.play_sample(mix) os.remove(output) def do_stream(self, args): """ mix all patterns of the song and stream the output to your speakers in real-time, or to an output file if you give a filename argument. This is the fastest and most efficient way of generating the output mix because it uses very little memory and avoids large buffer copying. """ if not self.song.pattern_sequence: print("Nothing to be mixed.") return if args: filename = args.strip() print("Mixing and streaming to output file '{0}'...".format( filename)) self.out.stream_to_file(filename, self.song.mix_generator()) print("\r ") return print("Mixing and streaming to speakers...") try: samples = self.out.normalized_samples(self.song.mix_generator()) for sample in samples: self.out.play_sample(sample) print("\r ") self.out.wait_all_played() except KeyboardInterrupt: print("Stopped.") def do_rec(self, args): """Record (or overwrite) a new sample (instrument) bar in a pattern. Args: [pattern name] [sample] [bar(s)]. Omit bars to remote the sample from the pattern. If a pattern with the name doesn't exist yet it will be added.""" args = args.split(maxsplit=2) if len(args) not in (2, 3): print("Wrong arguments. Use: patternname sample bar(s)") return if len(args) == 2: args.append(None) # no bars pattern_name, instrument, bars = args if instrument not in self.song.instruments: print("Unknown sample '{:s}'.".format(instrument)) return if pattern_name not in self.song.patterns: self.song.patterns[pattern_name] = {} pattern = self.song.patterns[pattern_name] if bars: bars = bars.replace(' ', '') if len(bars) % self.song.ticks != 0: print("Bar length must be multiple of the number of ticks.") return pattern[instrument] = bars else: if instrument in pattern: del pattern[instrument] if pattern_name in self.song.patterns: if not self.song.patterns[pattern_name]: del self.song.patterns[pattern_name] print("Pattern was empty and has been removed.") else: self.print_pattern(pattern_name, self.song.patterns[pattern_name]) def do_seq(self, names): """ Print the sequence of patterns that form the current track, or if you give a list of names: use that as the new pattern sequence. """ if not names: print(" ".join(self.song.pattern_sequence)) return names = names.split() for name in names: if name not in self.song.patterns: print("Unknown pattern '{:s}'.".format(name)) return self.song.pattern_sequence = names def do_load(self, filename): """Load a new song file""" song = Song() try: song.read(filename, self.discard_unused_instruments) self.song = song except IOError as x: print("ERROR:", x) def do_save(self, filename): """Save current song to file""" if not filename: print("Give filename to save song to.") return if not filename.endswith(".ini"): filename += ".ini" if os.path.exists(filename): if input("File exists: '{:s}'. Overwrite y/n? ".format( filename)) not in ('y', 'yes'): return self.song.write(filename)