Esempio n. 1
0
 def __init__(self, app):
     self.app = app
     self.app.after(self.update_rate, self.tick)
     self.app.firstTrackFrame.play()
     self.stopping = False
     self.mixer = StreamMixer([], endless=True)
     self.output = Output(self.mixer.samplerate,
                          self.mixer.samplewidth,
                          self.mixer.nchannels,
                          queuesize=self.async_queue_size)
     self.mixed_samples = iter(self.mixer)
     self.levelmeter = LevelMeter(rms_mode=False,
                                  lowest=self.levelmeter_lowest)
Esempio n. 2
0
 def open_audio_file(self, filename_or_stream):
     self.wave = wave.open(filename_or_stream, 'r')
     self.samplewidth = self.wave.getsampwidth()
     self.samplerate = self.wave.getframerate()
     self.nchannels = self.wave.getnchannels()
     self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level)
     self.audio_out = Output(self.samplerate, self.samplewidth,
                             self.nchannels, int(self.update_rate / 4))
     filename = filename_or_stream if isinstance(filename_or_stream,
                                                 str) else "<stream>"
     info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}".format(
         filename, self.samplerate / 1000, 8 * self.samplewidth,
         self.nchannels)
     self.info.configure(text=info)
Esempio n. 3
0
def main(args):
    if len(args) < 1:
        raise SystemExit("Mixes one or more audio files. Arguments: inputfile...")
    hqresample = AudiofileToWavStream.supports_hq_resample()
    if not hqresample:
        print("WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported.")
    wav_streams = [AudiofileToWavStream(filename, hqresample=hqresample) for filename in args]
    with StreamMixer(wav_streams, endless=True) as mixer:
        mixed_samples = iter(mixer)
        with Output(mixer.samplerate, mixer.samplewidth, mixer.nchannels) as output:
            levelmeter = LevelMeter(rms_mode=False, lowest=-50)
            for timestamp, sample in mixed_samples:
                levelmeter.update(sample)
                output.play_sample(sample)
                time.sleep(sample.duration*0.4)
                levelmeter.print(bar_width=60)
    print("done.")
Esempio n. 4
0
def main(args):
    if len(args) < 1:
        raise SystemExit("Mixes one or more audio files. Arguments: inputfile...")
    hqresample = AudiofileToWavStream.supports_hq_resample()
    if not hqresample:
        print("WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported.")
    wav_streams = [AudiofileToWavStream(filename, hqresample=hqresample) for filename in args]
    with StreamMixer(wav_streams, endless=True) as mixer:
        mixed_samples = iter(mixer)
        with Output(mixer.samplerate, mixer.samplewidth, mixer.nchannels) as output:
            if not output.supports_streaming:
                raise RuntimeError("need api that supports streaming")
            levelmeter = LevelMeter(rms_mode=False, lowest=-50)
            output.register_notify_played(levelmeter.update)
            for timestamp, sample in mixed_samples:
                output.play_sample(sample)
                levelmeter.print(bar_width=60)
    print("done.")
Esempio n. 5
0
 def open_audio_file(self, filename_or_stream):
     self.wave = wave.open(filename_or_stream, 'r')
     self.samplewidth = self.wave.getsampwidth()
     self.samplerate = self.wave.getframerate()
     self.nchannels = self.wave.getnchannels()
     self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level)
     self.audio_out = Output(self.samplerate, self.samplewidth, self.nchannels, int(self.update_rate/4))
     filename = filename_or_stream if isinstance(filename_or_stream, str) else "<stream>"
     info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}".format(filename, self.samplerate/1000, 8*self.samplewidth, self.nchannels)
     self.info.configure(text=info)
Esempio n. 6
0
 def __init__(self, app, trackframes):
     self.app = app
     self.trackframes = trackframes
     self.app.after(self.update_rate, self.tick)
     self.stopping = False
     self.mixer = StreamMixer([], endless=True)
     self.output = Output(self.mixer.samplerate,
                          self.mixer.samplewidth,
                          self.mixer.nchannels,
                          queuesize=self.async_buffers)
     self.mixed_samples = iter(self.mixer)
     self.levelmeter = LevelMeter(rms_mode=False,
                                  lowest=self.levelmeter_lowest)
     self.output.register_notify_played(self.levelmeter.update)
     for tf in self.trackframes:
         tf.player = self
     player_thread = Thread(target=self._play_sample_in_thread,
                            name="jukebox_sampleplayer")
     player_thread.daemon = True
     player_thread.start()
Esempio n. 7
0
def play_console(filename_or_stream):
    with wave.open(filename_or_stream, 'r') as wav:
        samplewidth = wav.getsampwidth()
        samplerate = wav.getframerate()
        nchannels = wav.getnchannels()
        bar_width = 60
        update_rate = 20   # lower this if you hear the sound crackle!
        levelmeter = LevelMeter(rms_mode=False, lowest=-50.0)
        with Output(samplerate, samplewidth, nchannels, int(update_rate/4)) as out:
            while True:
                frames = wav.readframes(samplerate//update_rate)
                if not frames:
                    break
                sample = Sample.from_raw_frames(frames, wav.getsampwidth(), wav.getframerate(), wav.getnchannels())
                out.play_sample(sample, async=True)
                levelmeter.update(sample)
                time.sleep(sample.duration*0.4)   # print the peak meter more or less halfway during the sample
                levelmeter.print(bar_width)
    print("\ndone")
    input("Enter to exit:")
Esempio n. 8
0
def play_console(filename_or_stream):
    with wave.open(filename_or_stream, 'r') as wav:
        samplewidth = wav.getsampwidth()
        samplerate = wav.getframerate()
        nchannels = wav.getnchannels()
        bar_width = 60
        update_rate = 20  # lower this if you hear the sound crackle!
        levelmeter = LevelMeter(rms_mode=False, lowest=-50.0)
        with Output(samplerate, samplewidth, nchannels,
                    int(update_rate / 4)) as out:
            while True:
                frames = wav.readframes(samplerate // update_rate)
                if not frames:
                    break
                sample = Sample.from_raw_frames(frames, wav.getsampwidth(),
                                                wav.getframerate(),
                                                wav.getnchannels())
                out.play_sample(sample, async=True)
                levelmeter.update(sample)
                time.sleep(
                    sample.duration * 0.4
                )  # print the peak meter more or less halfway during the sample
                levelmeter.print(bar_width)
    print("\ndone")
    input("Enter to exit:")
Esempio n. 9
0
def main(args):
    if len(args) < 1:
        raise SystemExit(
            "Mixes one or more audio files. Arguments: inputfile...")
    hqresample = AudiofileToWavStream.supports_hq_resample()
    if not hqresample:
        print(
            "WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported."
        )
    wav_streams = [
        AudiofileToWavStream(filename, hqresample=hqresample)
        for filename in args
    ]
    with StreamMixer(wav_streams, endless=True) as mixer:
        mixed_samples = iter(mixer)
        with Output(mixer.samplerate, mixer.samplewidth,
                    mixer.nchannels) as output:
            levelmeter = LevelMeter(rms_mode=False, lowest=-50)
            temp_stream = AudiofileToWavStream("samples/909_crash.wav",
                                               hqresample=hqresample)
            for timestamp, sample in mixed_samples:
                levelmeter.update(sample)
                output.play_sample(sample)
                time.sleep(sample.duration * 0.4)
                levelmeter.print(bar_width=60)
                if 5.0 <= timestamp <= 5.1:
                    mixer.add_stream(temp_stream)
                if 10.0 <= timestamp <= 10.1:
                    sample = Sample("samples/909_crash.wav").normalize()
                    mixer.add_sample(sample)
    print("done.")
Esempio n. 10
0
def play_console(filename_or_stream):
    with wave.open(filename_or_stream, 'r') as wav:
        samplewidth = wav.getsampwidth()
        samplerate = wav.getframerate()
        nchannels = wav.getnchannels()
        bar_width = 60
        update_rate = 20   # lower this if you hear the sound crackle!
        levelmeter = LevelMeter(rms_mode=False, lowest=-50.0)
        with Output(samplerate, samplewidth, nchannels) as out:
            print("Audio API used:", out.audio_api)
            if not out.supports_streaming:
                raise RuntimeError("need api that supports streaming")
            out.register_notify_played(levelmeter.update)
            while True:
                frames = wav.readframes(samplerate//update_rate)
                if not frames:
                    break
                sample = Sample.from_raw_frames(frames, wav.getsampwidth(), wav.getframerate(), wav.getnchannels())
                out.play_sample(sample)
                levelmeter.print(bar_width)
    print("\ndone")
    input("Enter to exit:")
Esempio n. 11
0
 def __init__(self, app, trackframes):
     self.app = app
     self.trackframes = trackframes
     self.app.after(self.update_rate, self.tick)
     self.stopping = False
     self.mixer = StreamMixer([], endless=True)
     self.output = Output(self.mixer.samplerate, self.mixer.samplewidth, self.mixer.nchannels, queuesize=self.async_buffers)
     self.mixed_samples = iter(self.mixer)
     self.levelmeter = LevelMeter(rms_mode=False, lowest=self.levelmeter_lowest)
     for tf in self.trackframes:
         tf.player = self
     player_thread = Thread(target=self._play_sample_in_thread, name="jukebox_sampleplayer")
     player_thread.daemon = True
     player_thread.start()
Esempio n. 12
0
 def open_audio_file(self, filename_or_stream):
     self.wave = wave.open(filename_or_stream, 'r')
     self.samplewidth = self.wave.getsampwidth()
     self.samplerate = self.wave.getframerate()
     self.nchannels = self.wave.getnchannels()
     self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level)
     self.audio_out = Output(self.samplerate, self.samplewidth, self.nchannels)
     print("Audio API used:", self.audio_out.audio_api)
     if not self.audio_out.supports_streaming:
         raise RuntimeError("need api that supports streaming")
     self.audio_out.register_notify_played(self.levelmeter.update)
     filename = filename_or_stream if isinstance(filename_or_stream, str) else "<stream>"
     info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}".format(filename, self.samplerate/1000, 8*self.samplewidth, self.nchannels)
     self.info.configure(text=info)
Esempio n. 13
0
class Player:
    update_rate = 50    # 50 ms = 20 updates/sec
    levelmeter_lowest = -40  # dB
    xfade_duration = 7
    async_buffers = 2

    def __init__(self, app, trackframes):
        self.app = app
        self.trackframes = trackframes
        self.app.after(self.update_rate, self.tick)
        self.stopping = False
        self.mixer = StreamMixer([], endless=True)
        self.output = Output(self.mixer.samplerate, self.mixer.samplewidth, self.mixer.nchannels, queuesize=self.async_buffers)
        self.mixed_samples = iter(self.mixer)
        self.levelmeter = LevelMeter(rms_mode=False, lowest=self.levelmeter_lowest)
        for tf in self.trackframes:
            tf.player = self
        player_thread = Thread(target=self._play_sample_in_thread, name="jukebox_sampleplayer")
        player_thread.daemon = True
        player_thread.start()

    def skip(self, trackframe):
        if trackframe.state != TrackFrame.state_needtrack and trackframe.stream:
            trackframe.stream.close()
            trackframe.stream = None
        trackframe.display_track(None, None, None, "(next track...)")
        trackframe.state = TrackFrame.state_switching

    def stop(self):
        self.stopping = True
        for tf in self.trackframes:
            if tf.stream:
                tf.stream.close()
                tf.stream = None
            tf.state = TrackFrame.state_needtrack
        self.mixer.close()
        self.output.close()

    def tick(self):
        # the actual decoding and sound playing is done in a background thread
        self._levelmeter()
        self._load_song()
        self._play_song()
        self._crossfade()
        if not self.stopping:
            self.app.after(self.update_rate, self.tick)

    def _play_sample_in_thread(self):
        """
        This is run in a background thread to avoid GUI interactions interfering with audio output.
        """
        while True:
            if self.stopping:
                break
            _, sample = next(self.mixed_samples)
            if sample and sample.duration > 0:
                self.output.play_sample(sample, async=True)
                self.levelmeter.update(sample)  # will be updated from the gui thread
            else:
                self.levelmeter.reset()
                time.sleep(self.update_rate/1000*2)   # avoid hogging the cpu while no samples are played

    def _levelmeter(self):
        self.app.update_levels(self.levelmeter.level_left, self.levelmeter.level_right)

    def _load_song(self):
        if self.stopping:
            return   # make sure we don't load new songs when the player is shutting down
        for tf in self.trackframes:
            if tf.state == TrackFrame.state_needtrack:
                track = self.app.pop_playlist_track()
                if track:
                    tf.track = track
                    tf.state = TrackFrame.state_idle

    def _play_song(self):
        def start_stream(tf, filename, volume):
            def _start_from_thread():
                # start loading the track from a thread to avoid gui stutters when loading takes a bit of time
                tf.stream = AudiofileToWavStream(filename, hqresample=hqresample)
                self.mixer.add_stream(tf.stream, [tf.volumefilter])
                tf.playback_started = datetime.datetime.now()
                tf.state = TrackFrame.state_playing
                tf.volume = volume
            tf.state = TrackFrame.state_loading
            Thread(target=_start_from_thread, name="stream_loader").start()
        for tf in self.trackframes:
            if tf.state == TrackFrame.state_playing:
                remaining = tf.track_duration - (datetime.datetime.now() - tf.playback_started)
                remaining = remaining.total_seconds()
                tf.time = datetime.timedelta(seconds=math.ceil(remaining))
                if tf.stream.closed and tf.time.total_seconds() <= 0:
                    self.skip(tf)  # stream ended!
            elif tf.state == TrackFrame.state_idle:
                if tf.xfade_state == TrackFrame.state_xfade_fadingin:
                    # if we're set to fading in, regardless of other tracks, we start playing as well
                    start_stream(tf, tf.track["location"], 0)
                elif not any(tf for tf in self.trackframes if tf.state in (TrackFrame.state_playing, TrackFrame.state_loading)):
                    # if there is no other track currently playing (or loading), it's our turn!
                    start_stream(tf, tf.track["location"], 100)
            elif tf.state == TrackFrame.state_switching:
                tf.state = TrackFrame.state_needtrack

    def _crossfade(self):
        for tf in self.trackframes:
            # nearing the end of the track? then start a fade out
            if tf.state == TrackFrame.state_playing \
                    and tf.xfade_state == TrackFrame.state_xfade_nofade \
                    and tf.time.total_seconds() <= self.xfade_duration:
                tf.xfade_state = TrackFrame.state_xfade_fadingout
                tf.xfade_started = datetime.datetime.now()
                tf.xfade_start_volume = tf.volume
                # fade in the first other track that is currently idle
                for other_tf in self.trackframes:
                    if tf is not other_tf and other_tf.state == TrackFrame.state_idle:
                        other_tf.xfade_state = TrackFrame.state_xfade_fadingin
                        other_tf.xfade_started = datetime.datetime.now()
                        other_tf.xfade_start_volume = 0
                        other_tf.volume = 0
                        break
        for tf in self.trackframes:
            if tf.xfade_state == TrackFrame.state_xfade_fadingin:
                # fading in, slide volume up from 0 to 100%
                volume = 100 * (datetime.datetime.now() - tf.xfade_started).total_seconds() / self.xfade_duration
                tf.volume = min(volume, 100)
                if volume >= 100:
                    tf.xfade_state = TrackFrame.state_xfade_nofade  # fade reached the end
            elif tf.xfade_state == TrackFrame.state_xfade_fadingout:
                # fading out, slide volume down from what it was at to 0%
                fade_progress = (datetime.datetime.now() - tf.xfade_started)
                fade_progress = (self.xfade_duration - fade_progress.total_seconds()) / self.xfade_duration
                volume = max(0, tf.xfade_start_volume * fade_progress)
                tf.volume = max(volume, 0)
                if volume <= 0:
                    tf.xfade_state = TrackFrame.state_xfade_nofade   # fade reached the end

    def play_sample(self, sample):
        def unmute(trf, vol):
            if trf:
                trf.volume=vol
        if sample and sample.duration > 0:
            for tf in self.trackframes:
                if tf.state == TrackFrame.state_playing:
                    old_volume = tf.mute_volume(40)
                    self.mixer.add_sample(sample, lambda mtf=tf, vol=old_volume: unmute(mtf, vol))
                    break
            else:
                self.mixer.add_sample(sample)
Esempio n. 14
0
class LevelGUI(tk.Frame):
    def __init__(self, audio_source, master=None):
        self.lowest_level = -50
        super().__init__(master)
        self.master.title("Levels")

        self.pbvar_left = tk.IntVar()
        self.pbvar_right = tk.IntVar()
        pbstyle = ttk.Style()
        pbstyle.theme_use("classic")
        pbstyle.configure("green.Vertical.TProgressbar", troughcolor="gray", background="light green")
        pbstyle.configure("yellow.Vertical.TProgressbar", troughcolor="gray", background="yellow")
        pbstyle.configure("red.Vertical.TProgressbar", troughcolor="gray", background="orange")

        frame = tk.LabelFrame(self, text="Left")
        frame.pack(side=tk.LEFT)
        tk.Label(frame, text="dB").pack()
        self.pb_left = ttk.Progressbar(frame, orient=tk.VERTICAL, length=300, maximum=-self.lowest_level, variable=self.pbvar_left, mode='determinate', style='yellow.Vertical.TProgressbar')
        self.pb_left.pack()

        frame = tk.LabelFrame(self, text="Right")
        frame.pack(side=tk.LEFT)
        tk.Label(frame, text="dB").pack()
        self.pb_right = ttk.Progressbar(frame, orient=tk.VERTICAL, length=300, maximum=-self.lowest_level, variable=self.pbvar_right, mode='determinate', style='yellow.Vertical.TProgressbar')
        self.pb_right.pack()

        frame = tk.LabelFrame(self, text="Info")
        self.info = tk.Label(frame, text="", justify=tk.LEFT)
        frame.pack()
        self.info.pack(side=tk.TOP)
        self.pack()
        self.update_rate = 19   # lower this if you hear the sound crackle!
        self.open_audio_file(audio_source)
        self.after_idle(self.update)

    def open_audio_file(self, filename_or_stream):
        self.wave = wave.open(filename_or_stream, 'r')
        self.samplewidth = self.wave.getsampwidth()
        self.samplerate = self.wave.getframerate()
        self.nchannels = self.wave.getnchannels()
        self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level)
        self.audio_out = Output(self.samplerate, self.samplewidth, self.nchannels, int(self.update_rate/4))
        filename = filename_or_stream if isinstance(filename_or_stream, str) else "<stream>"
        info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}".format(filename, self.samplerate/1000, 8*self.samplewidth, self.nchannels)
        self.info.configure(text=info)

    def update(self, *args, **kwargs):
        frames = self.wave.readframes(self.samplerate//self.update_rate)
        if not frames:
            self.pbvar_left.set(0)
            self.pbvar_right.set(0)
            print("done!")
            return
        sample = Sample.from_raw_frames(frames, self.samplewidth, self.samplerate, self.nchannels)
        self.audio_out.play_sample(sample, async=True)
        time.sleep(sample.duration/3)   # print the peak meter more or less halfway during the sample
        left, peak_l, right, peak_r = self.levelmeter.update(sample)
        self.pbvar_left.set(left-self.lowest_level)
        self.pbvar_right.set(right-self.lowest_level)
        if left > -3:
            self.pb_left.configure(style="red.Vertical.TProgressbar")
        elif left > -6:
            self.pb_left.configure(style="yellow.Vertical.TProgressbar")
        else:
            self.pb_left.configure(style="green.Vertical.TProgressbar")
        if right > -3:
            self.pb_right.configure(style="red.Vertical.TProgressbar")
        elif right > -6:
            self.pb_right.configure(style="yellow.Vertical.TProgressbar")
        else:
            self.pb_right.configure(style="green.Vertical.TProgressbar")
        self.after(self.update_rate, self.update)
Esempio n. 15
0
class LevelGUI(tk.Frame):
    def __init__(self, audio_source, master=None):
        self.lowest_level = -50
        super().__init__(master)
        self.master.title("Levels")

        self.pbvar_left = tk.IntVar()
        self.pbvar_right = tk.IntVar()
        pbstyle = ttk.Style()
        pbstyle.theme_use("classic")
        pbstyle.configure("green.Vertical.TProgressbar",
                          troughcolor="gray",
                          background="light green")
        pbstyle.configure("yellow.Vertical.TProgressbar",
                          troughcolor="gray",
                          background="yellow")
        pbstyle.configure("red.Vertical.TProgressbar",
                          troughcolor="gray",
                          background="orange")

        frame = tk.LabelFrame(self, text="Left")
        frame.pack(side=tk.LEFT)
        tk.Label(frame, text="dB").pack()
        self.pb_left = ttk.Progressbar(frame,
                                       orient=tk.VERTICAL,
                                       length=300,
                                       maximum=-self.lowest_level,
                                       variable=self.pbvar_left,
                                       mode='determinate',
                                       style='yellow.Vertical.TProgressbar')
        self.pb_left.pack()

        frame = tk.LabelFrame(self, text="Right")
        frame.pack(side=tk.LEFT)
        tk.Label(frame, text="dB").pack()
        self.pb_right = ttk.Progressbar(frame,
                                        orient=tk.VERTICAL,
                                        length=300,
                                        maximum=-self.lowest_level,
                                        variable=self.pbvar_right,
                                        mode='determinate',
                                        style='yellow.Vertical.TProgressbar')
        self.pb_right.pack()

        frame = tk.LabelFrame(self, text="Info")
        self.info = tk.Label(frame, text="", justify=tk.LEFT)
        frame.pack()
        self.info.pack(side=tk.TOP)
        self.pack()
        self.update_rate = 19  # lower this if you hear the sound crackle!
        self.open_audio_file(audio_source)
        self.after_idle(self.update)

    def open_audio_file(self, filename_or_stream):
        self.wave = wave.open(filename_or_stream, 'r')
        self.samplewidth = self.wave.getsampwidth()
        self.samplerate = self.wave.getframerate()
        self.nchannels = self.wave.getnchannels()
        self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level)
        self.audio_out = Output(self.samplerate, self.samplewidth,
                                self.nchannels, int(self.update_rate / 4))
        filename = filename_or_stream if isinstance(filename_or_stream,
                                                    str) else "<stream>"
        info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}".format(
            filename, self.samplerate / 1000, 8 * self.samplewidth,
            self.nchannels)
        self.info.configure(text=info)

    def update(self, *args, **kwargs):
        frames = self.wave.readframes(self.samplerate // self.update_rate)
        if not frames:
            self.pbvar_left.set(0)
            self.pbvar_right.set(0)
            print("done!")
            return
        sample = Sample.from_raw_frames(frames, self.samplewidth,
                                        self.samplerate, self.nchannels)
        self.audio_out.play_sample(sample, async=True)
        time.sleep(
            sample.duration /
            3)  # print the peak meter more or less halfway during the sample
        left, peak_l, right, peak_r = self.levelmeter.update(sample)
        self.pbvar_left.set(left - self.lowest_level)
        self.pbvar_right.set(right - self.lowest_level)
        if left > -3:
            self.pb_left.configure(style="red.Vertical.TProgressbar")
        elif left > -6:
            self.pb_left.configure(style="yellow.Vertical.TProgressbar")
        else:
            self.pb_left.configure(style="green.Vertical.TProgressbar")
        if right > -3:
            self.pb_right.configure(style="red.Vertical.TProgressbar")
        elif right > -6:
            self.pb_right.configure(style="yellow.Vertical.TProgressbar")
        else:
            self.pb_right.configure(style="green.Vertical.TProgressbar")
        self.after(self.update_rate, self.update)
Esempio n. 16
0
class Player:
    async_queue_size = 3  # larger is less chance of getting skips, but latency increases
    update_rate = 40  # larger is less cpu usage but more chance of getting skips
    levelmeter_lowest = -40  # dB

    def __init__(self, app):
        self.app = app
        self.app.after(self.update_rate, self.tick)
        self.app.firstTrackFrame.play()
        self.stopping = False
        self.mixer = StreamMixer([], endless=True)
        self.output = Output(self.mixer.samplerate,
                             self.mixer.samplewidth,
                             self.mixer.nchannels,
                             queuesize=self.async_queue_size)
        self.mixed_samples = iter(self.mixer)
        self.levelmeter = LevelMeter(rms_mode=False,
                                     lowest=self.levelmeter_lowest)

    def stop(self):
        self.stopping = True
        self.app.firstTrackFrame.close_stream()
        self.app.secondTrackFrame.close_stream()
        self.mixer.close()
        self.output.close()

    def switch_player(self):
        """
        The actual switching of the main track player. Note that it can be playing
        already because of the fade-in mixing.
        """
        first_is_playing = self.app.firstTrackFrame.playing
        self.app.firstTrackFrame.play(not first_is_playing)
        self.app.secondTrackFrame.play(first_is_playing)

    def tick(self):
        if self.output.queue_size() <= self.async_queue_size / 2:
            self.app.firstTrackFrame.tick(self.mixer)
            self.app.secondTrackFrame.tick(self.mixer)
            _, sample = next(self.mixed_samples)
            if sample and sample.duration > 0:
                self.output.play_sample(sample, async=True)
                left, _, right, _ = self.levelmeter.update(sample)
                self.app.update_levels(left, right)
            else:
                self.levelmeter.reset()
                self.app.update_levels(self.levelmeter.level_left,
                                       self.levelmeter.level_right)
        if not self.stopping:
            self.app.after(self.update_rate, self.tick)

    def play_sample(self, sample):
        if sample and sample.duration > 0:
            self.mixer.add_sample(sample)

    def start_play_other(self):
        # @todo fix the track switching and fadein/fadeout, it's a bit of a mess
        if self.app.firstTrackFrame.playing:
            other_track = self.app.secondTrackFrame
        else:
            other_track = self.app.firstTrackFrame
        other_track.start_fadein()
Esempio n. 17
0
class Player:
    update_rate = 50  # 50 ms = 20 updates/sec
    levelmeter_lowest = -40  # dB
    xfade_duration = 7
    async_buffers = 4

    def __init__(self, app, trackframes):
        self.app = app
        self.trackframes = trackframes
        self.app.after(self.update_rate, self.tick)
        self.stopping = False
        self.mixer = StreamMixer([], endless=True)
        self.output = Output(self.mixer.samplerate,
                             self.mixer.samplewidth,
                             self.mixer.nchannels,
                             queuesize=self.async_buffers)
        self.mixed_samples = iter(self.mixer)
        self.levelmeter = LevelMeter(rms_mode=False,
                                     lowest=self.levelmeter_lowest)
        self.output.register_notify_played(self.levelmeter.update)
        for tf in self.trackframes:
            tf.player = self
        player_thread = Thread(target=self._play_sample_in_thread,
                               name="jukebox_sampleplayer")
        player_thread.daemon = True
        player_thread.start()

    def skip(self, trackframe):
        if trackframe.state != TrackFrame.state_needtrack and trackframe.stream:
            trackframe.stream.close()
            trackframe.stream = None
        trackframe.display_track(None, None, None, "(next track...)")
        trackframe.state = TrackFrame.state_switching

    def stop(self):
        self.stopping = True
        for tf in self.trackframes:
            if tf.stream:
                tf.stream.close()
                tf.stream = None
            tf.state = TrackFrame.state_needtrack
        self.mixer.close()
        self.output.close()

    def tick(self):
        # the actual decoding and sound playing is done in a background thread
        self._levelmeter()
        self._load_song()
        self._play_song()
        self._crossfade()
        if not self.stopping:
            self.app.after(self.update_rate, self.tick)

    def _play_sample_in_thread(self):
        """
        This is run in a background thread to avoid GUI interactions interfering with audio output.
        """
        while True:
            if self.stopping:
                break
            _, sample = next(self.mixed_samples)
            if sample and sample.duration > 0:
                self.output.play_sample(sample)
            else:
                self.levelmeter.reset()
                time.sleep(
                    self.update_rate / 1000 *
                    2)  # avoid hogging the cpu while no samples are played

    def _levelmeter(self):
        self.app.update_levels(self.levelmeter.level_left,
                               self.levelmeter.level_right)

    def _load_song(self):
        if self.stopping:
            return  # make sure we don't load new songs when the player is shutting down
        for tf in self.trackframes:
            if tf.state == TrackFrame.state_needtrack:
                track = self.app.pop_playlist_track()
                if track:
                    tf.track = track
                    tf.state = TrackFrame.state_idle

    def _play_song(self):
        def start_stream(tf, filename, volume):
            def _start_from_thread():
                # start loading the track from a thread to avoid gui stutters when loading takes a bit of time
                tf.stream = AudiofileToWavStream(filename,
                                                 hqresample=hqresample)
                self.mixer.add_stream(tf.stream, [tf.volumefilter])
                tf.playback_started = datetime.datetime.now()
                tf.state = TrackFrame.state_playing
                tf.volume = volume

            tf.state = TrackFrame.state_loading
            Thread(target=_start_from_thread, name="stream_loader").start()

        for tf in self.trackframes:
            if tf.state == TrackFrame.state_playing:
                remaining = tf.track_duration - (datetime.datetime.now() -
                                                 tf.playback_started)
                remaining = remaining.total_seconds()
                tf.time = datetime.timedelta(seconds=math.ceil(remaining))
                if tf.stream.closed and tf.time.total_seconds() <= 0:
                    self.skip(tf)  # stream ended!
            elif tf.state == TrackFrame.state_idle:
                if tf.xfade_state == TrackFrame.state_xfade_fadingin:
                    # if we're set to fading in, regardless of other tracks, we start playing as well
                    start_stream(tf, tf.track["location"], 0)
                elif not any(tf for tf in self.trackframes
                             if tf.state in (TrackFrame.state_playing,
                                             TrackFrame.state_loading)):
                    # if there is no other track currently playing (or loading), it's our turn!
                    start_stream(tf, tf.track["location"], 100)
            elif tf.state == TrackFrame.state_switching:
                tf.state = TrackFrame.state_needtrack

    def _crossfade(self):
        for tf in self.trackframes:
            # nearing the end of the track? then start a fade out
            if tf.state == TrackFrame.state_playing \
                    and tf.xfade_state == TrackFrame.state_xfade_nofade \
                    and tf.time.total_seconds() <= self.xfade_duration:
                tf.xfade_state = TrackFrame.state_xfade_fadingout
                tf.xfade_started = datetime.datetime.now()
                tf.xfade_start_volume = tf.volume
                # fade in the first other track that is currently idle
                for other_tf in self.trackframes:
                    if tf is not other_tf and other_tf.state == TrackFrame.state_idle:
                        other_tf.xfade_state = TrackFrame.state_xfade_fadingin
                        other_tf.xfade_started = datetime.datetime.now()
                        other_tf.xfade_start_volume = 0
                        other_tf.volume = 0
                        break
        for tf in self.trackframes:
            if tf.xfade_state == TrackFrame.state_xfade_fadingin:
                # fading in, slide volume up from 0 to 100%
                volume = 100 * (datetime.datetime.now() - tf.xfade_started
                                ).total_seconds() / self.xfade_duration
                tf.volume = min(volume, 100)
                if volume >= 100:
                    tf.xfade_state = TrackFrame.state_xfade_nofade  # fade reached the end
            elif tf.xfade_state == TrackFrame.state_xfade_fadingout:
                # fading out, slide volume down from what it was at to 0%
                fade_progress = (datetime.datetime.now() - tf.xfade_started)
                fade_progress = (
                    self.xfade_duration -
                    fade_progress.total_seconds()) / self.xfade_duration
                volume = max(0, tf.xfade_start_volume * fade_progress)
                tf.volume = max(volume, 0)
                if volume <= 0:
                    tf.xfade_state = TrackFrame.state_xfade_nofade  # fade reached the end

    def play_sample(self, sample):
        def unmute(trf, vol):
            if trf:
                trf.volume = vol

        if sample and sample.duration > 0:
            for tf in self.trackframes:
                if tf.state == TrackFrame.state_playing:
                    old_volume = tf.mute_volume(40)
                    self.mixer.add_sample(
                        sample,
                        lambda mtf=tf, vol=old_volume: unmute(mtf, vol))
                    break
            else:
                self.mixer.add_sample(sample)