def main(args): if len(args) < 1: raise SystemExit( "Mixes one or more audio files. Arguments: inputfile...") hqresample = AudiofileToWavStream.supports_hq_resample() if not hqresample: print( "WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported." ) wav_streams = [ AudiofileToWavStream(filename, hqresample=hqresample) for filename in args ] with StreamMixer(wav_streams, endless=True) as mixer: mixed_samples = iter(mixer) with Output(mixer.samplerate, mixer.samplewidth, mixer.nchannels) as output: levelmeter = LevelMeter(rms_mode=False, lowest=-50) temp_stream = AudiofileToWavStream("samples/909_crash.wav", hqresample=hqresample) for timestamp, sample in mixed_samples: levelmeter.update(sample) output.play_sample(sample) time.sleep(sample.duration * 0.4) levelmeter.print(bar_width=60) if 5.0 <= timestamp <= 5.1: mixer.add_stream(temp_stream) if 10.0 <= timestamp <= 10.1: sample = Sample("samples/909_crash.wav").normalize() mixer.add_sample(sample) print("done.")
def start_stream(self, mixer): self.stream = AudiofileToWavStream(self.current_track_filename, hqresample=hqresample) self.stream_started = time.time() self.after_idle(lambda s=self: s.set_state(s.state_playing)) mixer.add_stream(self.stream, [self.volumefilter]) if self.stream.format_probe and self.stream.format_probe.duration and not self.current_track_duration: # get the duration from the stream itself self.current_track_duration = self.stream.format_probe.duration self.stream_opened = True
def _start_from_thread(): # start loading the track from a thread to avoid gui stutters when loading takes a bit of time tf.stream = AudiofileToWavStream(filename, hqresample=hqresample) self.mixer.add_stream(tf.stream, [tf.volumefilter]) tf.playback_started = datetime.datetime.now() tf.state = TrackFrame.state_playing tf.volume = volume
def main(args): if len(args) < 1: raise SystemExit("Mixes one or more audio files. Arguments: inputfile...") hqresample = AudiofileToWavStream.supports_hq_resample() if not hqresample: print("WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported.") wav_streams = [AudiofileToWavStream(filename, hqresample=hqresample) for filename in args] with StreamMixer(wav_streams, endless=True) as mixer: mixed_samples = iter(mixer) with Output(mixer.samplerate, mixer.samplewidth, mixer.nchannels) as output: if not output.supports_streaming: raise RuntimeError("need api that supports streaming") levelmeter = LevelMeter(rms_mode=False, lowest=-50) output.register_notify_played(levelmeter.update) for timestamp, sample in mixed_samples: output.play_sample(sample) levelmeter.print(bar_width=60) print("done.")
def set_effect(self, effect_nr, filename): try: with AudiofileToWavStream(filename, hqresample=hqresample) as wav: sample = Sample(wav) self.effects[effect_nr] = sample except IOError as x: print("Can't load effect sample:", x) else: for button in self.buttons: if button.effect_nr == effect_nr: button["state"] = tk.NORMAL button["text"] = os.path.splitext( os.path.basename(filename))[0] break
def main(args): if len(args) < 1: raise SystemExit("Mixes one or more audio files. Arguments: inputfile...") hqresample = AudiofileToWavStream.supports_hq_resample() if not hqresample: print("WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported.") wav_streams = [AudiofileToWavStream(filename, hqresample=hqresample) for filename in args] with StreamMixer(wav_streams, endless=True) as mixer: mixed_samples = iter(mixer) with Output(mixer.samplerate, mixer.samplewidth, mixer.nchannels) as output: levelmeter = LevelMeter(rms_mode=False, lowest=-50) for timestamp, sample in mixed_samples: levelmeter.update(sample) output.play_sample(sample) time.sleep(sample.duration*0.4) levelmeter.print(bar_width=60) print("done.")
def do_button_release(self, event): if event.state & 0x0100 == 0: return # no left mouse button event shift = event.state & 0x0001 if shift: filename = tkinter.filedialog.askopenfilename() if filename: with AudiofileToWavStream(filename, hqresample=hqresample) as wav: sample = Sample(wav) self.jingles[event.widget.jingle_nr] = sample event.widget["state"] = tk.NORMAL event.widget["text"] = os.path.splitext( os.path.basename(filename))[0] else: sample = self.jingles[event.widget.jingle_nr] if sample: self.app.play_sample(sample)
self.pb_left.configure(style="yellow.Vertical.TProgressbar") else: self.pb_left.configure(style="green.Vertical.TProgressbar") if right > -3: self.pb_right.configure(style="red.Vertical.TProgressbar") elif right > -6: self.pb_right.configure(style="yellow.Vertical.TProgressbar") else: self.pb_right.configure(style="green.Vertical.TProgressbar") self.after(self.update_rate, self.update) def play_gui(file_or_stream): root = tk.Tk() app = LevelGUI(file_or_stream, master=root) app.mainloop() if __name__ == "__main__": if len(sys.argv) != 2: raise SystemExit("give audio file to play as an argument.") hqresample = AudiofileToWavStream.supports_hq_resample() with AudiofileToWavStream(sys.argv[1], hqresample=hqresample) as stream: print(stream.format_probe) if stream.conversion_required and not hqresample: print( "WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported." ) play_gui(stream) print("Done.")
import tkinter as tk import tkinter.ttk as ttk import tkinter.font import tkinter.messagebox import tkinter.filedialog from .backend import BACKEND_PORT from synthesizer.streaming import AudiofileToWavStream, StreamMixer, VolumeFilter from synthesizer.sample import Sample, Output, LevelMeter import appdirs import Pyro4 import Pyro4.errors import Pyro4.futures StreamMixer.buffer_size = 4096 # larger is less skips and less cpu usage but more latency and slower meters try: hqresample = AudiofileToWavStream.supports_hq_resample() if hqresample: print("Great, ffmpeg supports high quality resampling.") else: print("WARNING: ffmpeg isn't compiled with libsoxr, high quality resampling is not supported.") except IOError: raise SystemExit("Cannot find the ffmpeg and ffprobe executables. They have to be installed on the search path.") class Player: update_rate = 50 # 50 ms = 20 updates/sec levelmeter_lowest = -40 # dB xfade_duration = 7 async_buffers = 2 def __init__(self, app, trackframes):
class TrackFrame(ttk.LabelFrame): state_idle = 1 state_warning = 2 state_playing = 3 crossfade_time = 6 def __init__(self, app, master, title): self.title = title self.playing = False super().__init__(master, text=title, padding=4) self.app = app self.current_track = None self.current_track_filename = None self.current_track_duration = None self.stream = None self.stream_started = 0 self.stream_opened = False self.volumeVar = tk.DoubleVar(value=100) self.volumefilter = VolumeFilter() self.fadeout = None self.fadein = None ttk.Label(self, text="title / artist / album").pack() self.titleLabel = ttk.Label(self, relief=tk.GROOVE, width=22, anchor=tk.W) self.titleLabel.pack() self.artistLabel = ttk.Label(self, relief=tk.GROOVE, width=22, anchor=tk.W) self.artistLabel.pack() self.albumlabel = ttk.Label(self, relief=tk.GROOVE, width=22, anchor=tk.W) self.albumlabel.pack() f = ttk.Frame(self) ttk.Label(f, text="time left: ").pack(side=tk.LEFT) self.timeleftLabel = ttk.Label(f, relief=tk.GROOVE, anchor=tk.CENTER) self.timeleftLabel.pack(side=tk.RIGHT, fill=tk.X, expand=True) f.pack(fill=tk.X) f = ttk.Frame(self) ttk.Label(f, text="V: ").pack(side=tk.LEFT) scale = ttk.Scale(f, from_=0, to=150, length=120, variable=self.volumeVar, command=self.on_volumechange) scale.bind("<Double-1>", lambda event: self.volumereset(100)) scale.pack(side=tk.LEFT) self.volumeLabel = ttk.Label(f, text="???%") self.volumeLabel.pack(side=tk.RIGHT) f.pack(fill=tk.X) ttk.Button(self, text="Skip", command=self.skip).pack(pady=4) self.volumereset() self.stateLabel = tk.Label(self, text="STATE", relief=tk.SUNKEN, border=1) self.stateLabel.pack() def play(self, playing=True): self.playing = playing self.volumereset() def skip(self): if self.playing: self.app.switch_player() self.close_stream() self.titleLabel["text"] = "" self.artistLabel["text"] = "" self.albumlabel["text"] = "" self.timeleftLabel["text"] = "(next track...)" def tick(self, mixer): # if we don't have a track, try go get the next one from the playlist if self.current_track is None: track = self.app.upcoming_track_hash() if track: self.next_track(track) self.set_state(self.state_idle) else: self.set_state(self.state_warning) if self.playing and self.current_track: if self.stream_opened: # update duration timer stream_time = time.time() - self.stream_started remaining = self.current_track_duration - stream_time self.timeleftLabel["text"] = datetime.timedelta( seconds=int(remaining)) dotsl = '«' * (int(stream_time * 4) % 6) dotsr = '»' * len(dotsl) if self.fadein: status = " FADE IN " elif self.fadeout: status = " FADE OUT " else: status = " PLAYING " self.stateLabel["text"] = dotsl + status + dotsr if self.fadeout: self.volumeVar.set(self.fadeout * remaining / self.crossfade_time) self.on_volumechange(self.volumeVar.get()) elif remaining <= self.crossfade_time < self.current_track_duration: self.fadeout = self.volumeVar.get() self.app.start_playing_other() if self.fadein: if stream_time >= self.crossfade_time: self.fadein = None else: self.volumeVar.set(100 * stream_time / self.crossfade_time) self.on_volumechange(self.volumeVar.get()) if self.stream.closed: # Stream is closed, probably exhausted. Skip to other track. self.skip() return # when it is time, load the track and add its stream to the mixer if not self.stream: self.stream = object() # placeholder Pyro4.futures.Future(self.start_stream)(mixer) def start_stream(self, mixer): self.stream = AudiofileToWavStream(self.current_track_filename, hqresample=hqresample) self.stream_started = time.time() self.after_idle(lambda s=self: s.set_state(s.state_playing)) mixer.add_stream(self.stream, [self.volumefilter]) if self.stream.format_probe and self.stream.format_probe.duration and not self.current_track_duration: # get the duration from the stream itself self.current_track_duration = self.stream.format_probe.duration self.stream_opened = True def close_stream(self): self.current_track = None self.fadein = None self.fadeout = None if self.stream_opened: self.stream.close() self.stream = None self.stream_opened = False def next_track(self, hashcode): if self.stream_opened: self.stream.close() self.stream = None self.stream_opened = False self.current_track = hashcode track = self.app.backend.track(hashcode=self.current_track) self.titleLabel["text"] = track["title"] or "-" self.artistLabel["text"] = track["artist"] or "-" self.albumlabel["text"] = track["album"] or "-" self.timeleftLabel["text"] = datetime.timedelta( seconds=int(track["duration"])) self.current_track_filename = track["location"] self.current_track_duration = track["duration"] self.volumereset() def on_volumechange(self, value): self.volumefilter.volume = self.volumeVar.get() / 100.0 self.volumeLabel["text"] = "{:.0f}%".format(self.volumeVar.get()) def volumereset(self, volume=100): self.volumeVar.set(volume) self.fadeout = None self.fadein = None self.on_volumechange(volume) def start_fadein(self): if self.current_track_duration <= self.crossfade_time: return self.volumereset(0) self.fadein = self.crossfade_time self.playing = True def set_state(self, state): if state == self.state_idle: self.stateLabel.configure(text=" Waiting ", bg="white", fg="black") elif state == self.state_playing: self.stateLabel.configure(text=" Playing ", bg="light green", fg="black") elif state == self.state_warning: self.stateLabel.configure(text=" Needs Track ", bg="red", fg="white")
import subprocess import datetime import tkinter as tk import tkinter.ttk as ttk import tkinter.font import tkinter.messagebox import tkinter.filedialog from .backend import BACKEND_PORT from synthesizer.streaming import AudiofileToWavStream, StreamMixer, VolumeFilter from synthesizer.sample import Sample, Output, LevelMeter import Pyro4 import Pyro4.errors import Pyro4.futures StreamMixer.buffer_size = 4096 # larger is less skips and less cpu usage but more latency and slower meters hqresample = AudiofileToWavStream.supports_hq_resample() if not hqresample: print( "WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported." ) class Player: async_queue_size = 3 # larger is less chance of getting skips, but latency increases update_rate = 40 # larger is less cpu usage but more chance of getting skips levelmeter_lowest = -40 # dB def __init__(self, app): self.app = app self.app.after(self.update_rate, self.tick) self.app.firstTrackFrame.play()