Exemplo n.º 1
0
class SoundEngine:
    def __init__(
            self, samples_to_load: Dict[str, Tuple[Union[str, Sample],
                                                   int]]) -> None:
        global samples
        samples.clear()
        self.output = Output(mixing="mix")
        if any(isinstance(smp, str) for smp, _ in samples_to_load.values()):
            print("Loading sound files...")
        for name, (filename, max_simultaneously) in samples_to_load.items():
            if isinstance(filename, Sample):
                samples[name] = filename
            else:
                data = pkgutil.get_data(__name__, "sounds/" + filename)
                if data:
                    tmp = tempfile.NamedTemporaryFile(delete=False,
                                                      suffix=".ogg")
                    try:
                        tmp.write(data)
                        tmp.close()
                        samples[name] = Sample(
                            streaming.AudiofileToWavStream(tmp.name),
                            name).stereo()
                    finally:
                        os.remove(tmp.name)
                else:
                    raise SystemExit("corrupt package; sound data is missing")
            self.output.set_sample_play_limit(name, max_simultaneously)
        print("Sound API initialized:", self.output.audio_api)

    def play_sample(self, samplename, repeat=False, after=0.0):
        self.output.play_sample(samples[samplename], repeat, after)

    def silence(self, sid_or_name=None):
        if sid_or_name:
            self.output.stop_sample(sid_or_name)
        else:
            self.output.silence()

    def close(self):
        self.output.close()
Exemplo n.º 2
0
class SynthGUI(tk.Frame):
    def __init__(self, master=None):
        super().__init__(master)
        self.master.title("Software FM/PWM Synthesizer   |   synthplayer lib v" + synthplayer.__version__)
        self.waveform_area = tk.Frame(self)
        self.osc_frame = tk.Frame(self)
        self.oscillators = []
        self.piano_frame = tk.Frame(self)
        self.pianokeys_gui = PianoKeyboardGUI(self.piano_frame, self)
        self.pianokeys_gui.pack(side=tk.BOTTOM)
        filter_frame = tk.LabelFrame(self, text="Filters etc.", padx=10, pady=10)
        self.envelope_filter_guis = [
            EnvelopeFilterGUI(filter_frame, "1", self),
            EnvelopeFilterGUI(filter_frame, "2", self),
            EnvelopeFilterGUI(filter_frame, "3", self)]
        self.echo_filter_gui = EchoFilterGUI(filter_frame, self)
        for ev in self.envelope_filter_guis:
            ev.pack(side=tk.LEFT, anchor=tk.N)
        self.arp_filter_gui = ArpeggioFilterGUI(filter_frame, self)
        self.arp_filter_gui.pack(side=tk.LEFT, anchor=tk.N)
        f = tk.Frame(filter_frame)
        self.tremolo_filter_gui = TremoloFilterGUI(f, self)
        self.tremolo_filter_gui.pack(side=tk.TOP)
        lf = tk.LabelFrame(f, text="A4 tuning")
        lf.pack(pady=(4, 0))
        lf = tk.LabelFrame(f, text="Performance")
        self.samplerate_choice = tk.IntVar()
        self.samplerate_choice.set(22050)
        tk.Label(lf, text="Samplerate:").pack(anchor=tk.W)
        subf = tk.Frame(lf)
        tk.Radiobutton(subf, variable=self.samplerate_choice, value=44100, text="44.1 kHz",
                       fg=lf.cget('fg'), selectcolor=lf.cget('bg'), pady=0, command=self.create_synth).pack(side=tk.LEFT)
        tk.Radiobutton(subf, variable=self.samplerate_choice, value=22050, text="22 kHz",
                       fg=lf.cget('fg'), selectcolor=lf.cget('bg'), pady=0, command=self.create_synth).pack(side=tk.LEFT)
        subf.pack()
        tk.Label(lf, text="Piano key response:").pack(anchor=tk.W)
        subf = tk.Frame(lf)
        self.rendering_choice = tk.StringVar()
        self.rendering_choice.set("realtime")
        tk.Radiobutton(subf, variable=self.rendering_choice, value="realtime", text="realtime", pady=0,
                       fg=lf.cget('fg'), selectcolor=lf.cget('bg'),).pack(side=tk.LEFT)
        tk.Radiobutton(subf, variable=self.rendering_choice, value="render", text="render", pady=0,
                       fg=lf.cget('fg'), selectcolor=lf.cget('bg'),).pack(side=tk.LEFT)
        subf.pack()
        lf.pack(pady=(4, 0))
        f.pack(side=tk.LEFT, anchor=tk.N)
        self.echo_filter_gui.pack(side=tk.LEFT, anchor=tk.N)
        misc_frame = tk.Frame(filter_frame, padx=10)
        tk.Label(misc_frame, text="To Speaker:").pack(pady=(5, 0))
        self.to_speaker_lb = tk.Listbox(misc_frame, width=8, height=5, selectmode=tk.MULTIPLE, exportselection=0)
        self.to_speaker_lb.pack()
        lf = tk.LabelFrame(misc_frame, text="A4 tuning")
        self.a4_choice = tk.IntVar()
        self.a4_choice.set(440)
        tk.Radiobutton(lf, variable=self.a4_choice, value=440, text="440 Hz", pady=0, fg=lf.cget('fg'), selectcolor=lf.cget('bg')).pack()
        tk.Radiobutton(lf, variable=self.a4_choice, value=432, text="432 Hz", pady=0, fg=lf.cget('fg'), selectcolor=lf.cget('bg')).pack()
        lf.pack(pady=(4, 0))
        tk.Button(misc_frame, text="Load preset", command=self.load_preset).pack()
        tk.Button(misc_frame, text="Save preset", command=self.save_preset).pack()
        for _ in range(5):
            self.add_osc_to_gui()
        self.to_speaker_lb.select_set(4)
        self.waveform_area.pack(side=tk.TOP)
        self.osc_frame.pack(side=tk.TOP, padx=10)
        filter_frame.pack(side=tk.TOP)
        misc_frame.pack(side=tk.RIGHT, anchor=tk.N)
        self.piano_frame.pack(side=tk.TOP, padx=10, pady=10)
        self.statusbar = tk.Label(self, text="<status>", relief=tk.RIDGE)
        self.statusbar.pack(side=tk.BOTTOM, fill=tk.X)
        self.pack()
        self.synth = self.output = None
        self.create_synth()
        self.echos_ending_time = 0
        self.currently_playing = {}     # (note, octave) -> sid
        self.arp_after_id = 0
        showwarning("garbled sound output", "When using miniaudio 1.20+, the audio could be garbled (not always the case). I haven't had time yet to debug and fix this. Sorry for any inconvenience.")

    def bind_keypress(self, key, note, octave):
        def kbpress(event):
            self.pressed_keyboard(note, octave, False)

        def kbrelease(event):
            self.pressed_keyboard(note, octave, True)

        self.master.bind(key, kbpress)
        if key == '[':
            key = "bracketleft"
        if key == ']':
            key = "bracketright"
        self.master.bind("<KeyRelease-%s>" % key, kbrelease)

    def create_synth(self):
        samplerate = self.samplerate_choice.get()
        self.synth = WaveSynth(samplewidth=2, samplerate=samplerate)
        if self.output is not None:
            self.output.close()
        self.output = Output(self.synth.samplerate, self.synth.samplewidth, 1, mixing="mix")

    def add_osc_to_gui(self):
        osc_nr = len(self.oscillators)
        fm_sources = ["osc "+str(n+1) for n in range(osc_nr)]
        osc_pane = OscillatorGUI(self.osc_frame, self, "Oscillator "+str(osc_nr+1), fm_sources=fm_sources, pwm_sources=fm_sources)
        osc_pane.pack(side=tk.LEFT, anchor=tk.N, padx=10, pady=10)
        self.oscillators.append(osc_pane)
        self.to_speaker_lb.insert(tk.END, "osc "+str(osc_nr+1))

    def create_osc(self, note, octave, freq, from_gui, all_oscillators, is_audio=False):
        def create_unfiltered_osc():
            def create_chord_osc(clazz, **arguments):
                if is_audio and self.arp_filter_gui.input_mode.get().startswith("chords"):
                    chord_keys = major_chord_keys(note, octave)
                    if self.arp_filter_gui.input_mode.get() == "chords3":
                        chord_keys = list(chord_keys)[:-1]
                    a4freq = self.a4_choice.get()
                    chord_freqs = [note_freq(n, o, a4freq) for n, o in chord_keys]
                    self.statusbar["text"] = "major chord: "+" ".join(n for n, o in chord_keys)
                    oscillators = []
                    arguments["amplitude"] /= len(chord_freqs)
                    for f in chord_freqs:
                        arguments["frequency"] = f
                        oscillators.append(clazz(**arguments))
                    return MixingFilter(*oscillators)
                else:
                    # no chord (or an LFO instead of audio output oscillator), return one osc for only the given frequency
                    return clazz(**arguments)

            waveform = from_gui.input_waveformtype.get()
            amp = from_gui.input_amp.get()
            bias = from_gui.input_bias.get()
            if waveform == "noise":
                return WhiteNoise(freq, amplitude=amp, bias=bias, samplerate=self.synth.samplerate)
            elif waveform == "linear":
                startlevel = from_gui.input_lin_start.get()
                increment = from_gui.input_lin_increment.get()
                minvalue = from_gui.input_lin_min.get()
                maxvalue = from_gui.input_lin_max.get()
                return Linear(startlevel, increment, minvalue, maxvalue)
            else:
                phase = from_gui.input_phase.get()
                pw = from_gui.input_pw.get()
                fm_choice = from_gui.input_fm.get()
                pwm_choice = from_gui.input_pwm.get()
                if fm_choice in (None, "", "<none>"):
                    fm = None
                elif fm_choice.startswith("osc"):
                    osc_num = int(fm_choice.split()[1])
                    osc = all_oscillators[osc_num - 1]
                    fm = self.create_osc(note, octave, osc.input_freq.get(), all_oscillators[osc_num-1], all_oscillators)
                else:
                    raise ValueError("invalid fm choice")
                if pwm_choice in (None, "", "<none>"):
                    pwm = None
                elif pwm_choice.startswith("osc"):
                    osc_num = int(pwm_choice.split()[1])
                    osc = all_oscillators[osc_num-1]
                    pwm = self.create_osc(note, octave, osc.input_freq.get(), osc, all_oscillators)
                else:
                    raise ValueError("invalid fm choice")
                if waveform == "pulse":
                    return create_chord_osc(Pulse, frequency=freq, amplitude=amp, phase=phase,
                                            bias=bias, pulsewidth=pw, fm_lfo=fm, pwm_lfo=pwm,
                                            samplerate=self.synth.samplerate)
                elif waveform == "harmonics":
                    harmonics = self.parse_harmonics(from_gui.harmonics_text.get(1.0, tk.END))
                    return create_chord_osc(Harmonics, frequency=freq, harmonics=harmonics,
                                            amplitude=amp, phase=phase, bias=bias, fm_lfo=fm,
                                            samplerate=self.synth.samplerate)
                else:
                    o = {
                        "sine": Sine,
                        "triangle": Triangle,
                        "sawtooth": Sawtooth,
                        "sawtooth_h": SawtoothH,
                        "square": Square,
                        "square_h": SquareH,
                        "semicircle": Semicircle,
                        "pointy": Pointy,
                    }[waveform]
                    return create_chord_osc(o, frequency=freq, amplitude=amp, phase=phase,
                                            bias=bias, fm_lfo=fm, samplerate=self.synth.samplerate)

        def envelope(osc, envelope_gui):
            adsr_src = envelope_gui.input_source.get()
            if adsr_src not in (None, "", "<none>"):
                osc_num = int(adsr_src.split()[1])
                if from_gui is self.oscillators[osc_num-1]:
                    return envelope_gui.filter(osc)
            return osc

        osc = create_unfiltered_osc()
        for ev in self.envelope_filter_guis:
            osc = envelope(osc, ev)
        return osc

    def parse_harmonics(self, harmonics):
        parsed = []
        for harmonic in harmonics.split():
            num, frac = harmonic.split(",")
            num = int(num)
            if '/' in frac:
                numerator, denominator = frac.split("/")
            else:
                numerator, denominator = frac, 1
            frac = float(numerator)/float(denominator)
            parsed.append((num, frac))
        return parsed

    def do_play(self, osc):
        if osc.input_waveformtype.get() == "linear":
            self.statusbar["text"] = "cannot output linear osc to speakers"
            return
        duration = 1.0
        osc.set_title_status("TO SPEAKER")
        self.update()
        osc.after(int(duration*1000), lambda: osc.set_title_status(None))
        o = self.create_osc(None, None, osc.input_freq.get(), osc, all_oscillators=self.oscillators, is_audio=True)
        o = self.apply_filters(o)
        sample = self.generate_sample(o, duration)
        if sample.samplewidth != self.synth.samplewidth:
            print("16 bit overflow!")  # XXX
            sample = sample.make_16bit()
        self.output.play_sample(sample)
        self.after(1000, lambda: osc.set_title_status(""))

    def do_close_waveform(self):
        for child in self.waveform_area.winfo_children():
            child.destroy()

    def do_plot(self, osc):
        if not matplotlib:
            self.statusbar["text"] = "Cannot plot! To plot things, you need to have matplotlib installed!"
            return
        o = self.create_osc(None, None, osc.input_freq.get(), osc, all_oscillators=self.oscillators).blocks()
        blocks = list(itertools.islice(o, self.synth.samplerate//params.norm_osc_blocksize))
        # integrating matplotlib in tikinter, see http://matplotlib.org/examples/user_interfaces/embedding_in_tk2.html
        fig = Figure(figsize=(8, 2), dpi=100)
        axis = fig.add_subplot(111)
        axis.plot(sum(blocks, []))
        axis.set_title("Waveform")
        self.do_close_waveform()
        canvas = FigureCanvasTkAgg(fig, master=self.waveform_area)
        canvas.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
        canvas.draw()
        close_waveform = tk.Button(self.waveform_area, text="Close waveform", command=self.do_close_waveform)
        close_waveform.pack(side=tk.RIGHT)

    def generate_sample(self, oscillator: Oscillator, duration: float, use_fade: bool = False) -> Optional[Sample]:
        scale = 2**(8*self.synth.samplewidth-1)
        blocks = oscillator.blocks()
        try:
            sample_blocks = list(next(blocks) for _ in range(int(self.synth.samplerate*duration/params.norm_osc_blocksize)))
            float_frames = sum(sample_blocks, [])
            frames = [int(v*scale) for v in float_frames]
        except StopIteration:
            return None
        else:
            sample = Sample.from_array(frames, self.synth.samplerate, 1)
            if use_fade:
                sample.fadein(0.05).fadeout(0.1)
            return sample

    def render_and_play_note(self, oscillator: Oscillator, max_duration: float = 4) -> None:
        duration = 0
        for ev in self.envelope_filter_guis:
            duration = max(duration, ev.duration)
        if duration == 0:
            duration = 1
        duration = min(duration, max_duration)
        sample = self.generate_sample(oscillator, duration)
        if sample:
            sample.fadein(0.05).fadeout(0.05)
            if sample.samplewidth != self.synth.samplewidth:
                print("16 bit overflow!")  # XXX
                sample.make_16bit()
            self.output.play_sample(sample)

    keypresses = collections.defaultdict(float)         # (note, octave) -> timestamp
    keyrelease_counts = collections.defaultdict(int)    # (note, octave) -> int

    def _key_release(self, note, octave):
        # mechanism to filter out key repeats
        self.keyrelease_counts[(note, octave)] -= 1
        if self.keyrelease_counts[(note, octave)] <= 0:
            self.pressed(note, octave, True)

    def pressed_keyboard(self, note, octave, released=False):
        if released:
            self.keyrelease_counts[(note, octave)] += 1
            self.after(400, lambda n=note, o=octave: self._key_release(n, o))
        else:
            time_since_previous = time.time() - self.keypresses[(note, octave)]
            self.keypresses[(note, octave)] = time.time()
            if time_since_previous < 0.8:
                # assume auto-repeat, and do nothing
                return
            self.pressed(note, octave)

    def pressed(self, note, octave, released=False):
        if self.arp_filter_gui.input_mode.get().startswith("arp"):
            if released:
                if self.arp_after_id:
                    self.after_cancel(self.arp_after_id)   # stop the arp cycle
                    self.statusbar["text"] = "ok"
                    self.arp_after_id = 0
                return
            chord_keys = major_chord_keys(note, octave)
            if self.arp_filter_gui.input_mode.get() == "arpeggio3":
                chord_keys = list(chord_keys)[:-1]
            self.statusbar["text"] = "arpeggio: "+" ".join(note for note, octave in chord_keys)
            self.play_note(chord_keys)
        else:
            self.statusbar["text"] = "ok"
            self.play_note([(note, octave)], released)

    def play_note(self, list_of_notes, released=False):
        # list of notes to play (length 1 = just one note, more elements = arpeggiator list)
        to_speaker = [self.oscillators[i] for i in self.to_speaker_lb.curselection()]
        if not to_speaker:
            self.statusbar["text"] = "No oscillators connected to speaker output!"
            return
        if released:
            for note, octave in list_of_notes:
                if (note, octave) in self.currently_playing:
                    # stop the note
                    sid = self.currently_playing[(note, octave)]
                    self.output.stop_sample(sid)
            return

        first_note, first_octave = list_of_notes[0]
        first_freq = note_freq(first_note, first_octave, self.a4_choice.get())
        for osc in self.oscillators:
            if osc.input_freq_keys.get():
                osc.input_freq.set(first_freq*osc.input_freq_keys_ratio.get())
        for osc in to_speaker:
            if osc.input_waveformtype.get() == "linear":
                self.statusbar["text"] = "cannot output linear osc to speakers"
                return
            else:
                osc.set_title_status("TO SPEAKER")

        oscs_to_play = []
        for note, octave in list_of_notes:
            freq = note_freq(note, octave, self.a4_choice.get())
            oscs = [self.create_osc(note, octave, freq * osc.input_freq_keys_ratio.get(), osc,
                                    self.oscillators, is_audio=True) for osc in to_speaker]
            mixed_osc = MixingFilter(*oscs) if len(oscs) > 1 else oscs[0]
            self.echos_ending_time = 0
            if len(list_of_notes) <= 1:
                # you can't use filters and echo when using arpeggio for now
                mixed_osc = self.apply_filters(mixed_osc)
                current_echos_duration = getattr(mixed_osc, "echo_duration", 0)
                if current_echos_duration > 0:
                    self.echos_ending_time = time.time() + current_echos_duration
            oscs_to_play.append(mixed_osc)

        if len(list_of_notes) > 1:
            rate = self.arp_filter_gui.input_rate.get()
            duration = rate * self.arp_filter_gui.input_ratio.get() / 100.0
            self.statusbar["text"] = "playing ARP ({0}) from note {1} {2}".format(len(oscs_to_play), first_note, first_octave)
            for index, (note, octave) in enumerate(list_of_notes):
                sample = StreamingOscSample(oscs_to_play[index], self.synth.samplerate, duration)
                sid = self.output.play_sample(sample, delay=rate*index)
                self.currently_playing[(note, octave)] = sid
            self.arp_after_id = self.after(int(rate * len(list_of_notes) * 1000), lambda: self.play_note(list_of_notes))   # repeat arp!
        else:
            # normal, single note
            if self.rendering_choice.get() == "render":
                self.statusbar["text"] = "rendering note sample..."
                self.after_idle(lambda: self.render_and_play_note(mixed_osc))
            else:
                self.statusbar["text"] = "playing note {0} {1}".format(first_note, first_octave)
                sample = StreamingOscSample(oscs_to_play[0], self.synth.samplerate)
                sid = self.output.play_sample(sample)
                self.currently_playing[(first_note, first_octave)] = sid

        def reset_osc_title_status():
            for osc in to_speaker:
                osc.set_title_status("")
        self.after(1000, reset_osc_title_status)

    def apply_filters(self, output_oscillator):
        output_oscillator = self.tremolo_filter_gui.filter(output_oscillator)
        output_oscillator = self.echo_filter_gui.filter(output_oscillator)
        return output_oscillator

    def load_preset(self):
        file = askopenfile(filetypes=[("Synth presets", "*.ini")])
        cf = ConfigParser()
        cf.read_file(file)
        file.close()
        # general settings
        self.samplerate_choice.set(cf["settings"]["samplerate"])
        self.rendering_choice.set(cf["settings"]["rendering"])
        self.a4_choice.set(cf["settings"]["a4tuning"])
        self.to_speaker_lb.selection_clear(0, tk.END)
        to_speaker = cf["settings"]["to_speaker"]
        to_speaker = tuple(to_speaker.split(','))
        for o in to_speaker:
            self.to_speaker_lb.selection_set(int(o)-1)
        for section in cf.sections():
            if section.startswith("oscillator"):
                num = int(section.split('_')[1])-1
                osc = self.oscillators[num]
                for name, value in cf[section].items():
                    getattr(osc, name).set(value)
                osc.waveform_selected()
            elif section.startswith("envelope"):
                num = int(section.split('_')[1])-1
                env = self.envelope_filter_guis[num]
                for name, value in cf[section].items():
                    getattr(env, name).set(value)
            elif section == "arpeggio":
                for name, value in cf[section].items():
                    getattr(self.arp_filter_gui, name).set(value)
            elif section == "tremolo":
                for name, value in cf[section].items():
                    getattr(self.tremolo_filter_gui, name).set(value)
            elif section == "echo":
                for name, value in cf[section].items():
                    getattr(self.echo_filter_gui, name).set(value)
        self.statusbar["text"] = "preset loaded."

    def save_preset(self):
        file = asksaveasfile(filetypes=[("Synth presets", "*.ini")])
        cf = ConfigParser(dict_type=collections.OrderedDict)
        # general settings
        cf.add_section("settings")
        cf["settings"]["samplerate"] = str(self.samplerate_choice.get())
        cf["settings"]["rendering"] = self.rendering_choice.get()
        cf["settings"]["to_speaker"] = ",".join(str(v+1) for v in self.to_speaker_lb.curselection())
        cf["settings"]["a4tuning"] = str(self.a4_choice.get())
        # oscillators
        for num, osc in enumerate(self.oscillators, 1):
            section = "oscillator_"+str(num)
            cf.add_section(section)
            for name, var in vars(osc).items():
                if name.startswith("input_"):
                    cf[section][name] = str(var.get())
        # adsr envelopes
        for num, flter in enumerate(self.envelope_filter_guis, 1):
            section = "envelope_"+str(num)
            cf.add_section(section)
            for name, var in vars(flter).items():
                if name.startswith("input_"):
                    cf[section][name] = str(var.get())
        # echo
        cf.add_section("echo")
        for name, var in vars(self.echo_filter_gui).items():
            if name.startswith("input_"):
                cf["echo"][name] = str(var.get())
        # tremolo
        cf.add_section("tremolo")
        for name, var in vars(self.tremolo_filter_gui).items():
            if name.startswith("input_"):
                cf["tremolo"][name] = str(var.get())
        # arpeggio
        cf.add_section("arpeggio")
        for name, var in vars(self.arp_filter_gui).items():
            if name.startswith("input_"):
                cf["arpeggio"][name] = str(var.get())

        cf.write(file)
        file.close()
Exemplo n.º 3
0
class Player:
    update_rate = 50    # 50 ms = 20 updates/sec
    levelmeter_lowest = -40  # dB
    xfade_duration = 7

    def __init__(self, app, trackframes):
        self.app = app
        self.trackframes = trackframes
        self.app.after(self.update_rate, self.tick)
        self.stopping = False
        self.mixer = StreamMixer([], endless=True)
        self.output = Output(self.mixer.samplerate, self.mixer.samplewidth, self.mixer.nchannels, mixing="sequential", queue_size=2)
        self.mixed_samples = iter(self.mixer)
        self.levelmeter = LevelMeter(rms_mode=False, lowest=self.levelmeter_lowest)
        self.output.register_notify_played(self.levelmeter.update)
        for tf in self.trackframes:
            tf.player = self
        player_thread = Thread(target=self._play_sample_in_thread, name="jukebox_sampleplayer")
        player_thread.daemon = True
        player_thread.start()

    def skip(self, trackframe):
        if trackframe.state != TrackFrame.state_needtrack and trackframe.stream:
            trackframe.stream.close()
            trackframe.stream = None
        trackframe.display_track(None, None, None, "(next track...)")
        trackframe.state = TrackFrame.state_switching

    def stop(self):
        self.stopping = True
        for tf in self.trackframes:
            if tf.stream:
                tf.stream.close()
                tf.stream = None
            tf.state = TrackFrame.state_needtrack
        self.mixer.close()
        self.output.close()

    def tick(self):
        # the actual decoding and sound playing is done in a background thread
        self._levelmeter()
        self._load_song()
        self._play_song()
        self._crossfade()
        if not self.stopping:
            self.app.after(self.update_rate, self.tick)

    def _play_sample_in_thread(self):
        """
        This is run in a background thread to avoid GUI interactions interfering with audio output.
        """
        while True:
            if self.stopping:
                break
            _, sample = next(self.mixed_samples)
            if sample and sample.duration > 0:
                self.output.play_sample(sample)
            else:
                self.levelmeter.reset()
                time.sleep(self.update_rate/1000*2)   # avoid hogging the cpu while no samples are played

    def _levelmeter(self):
        self.app.update_levels(self.levelmeter.level_left, self.levelmeter.level_right)

    def _load_song(self):
        if self.stopping:
            return   # make sure we don't load new songs when the player is shutting down
        for tf in self.trackframes:
            if tf.state == TrackFrame.state_needtrack:
                track = self.app.pop_playlist_track()
                if track:
                    tf.track = track
                    tf.state = TrackFrame.state_idle

    def _play_song(self):
        def start_stream(tf, filename, volume):
            def _start_from_thread():
                # start loading the track from a thread to avoid gui stutters when loading takes a bit of time
                tf.stream = AudiofileToWavStream(filename, hqresample=hqresample)
                self.mixer.add_stream(tf.stream, [tf.volumefilter])
                tf.playback_started = datetime.datetime.now()
                tf.state = TrackFrame.state_playing
                tf.volume = volume
            tf.state = TrackFrame.state_loading
            Thread(target=_start_from_thread, name="stream_loader").start()
        for tf in self.trackframes:
            if tf.state == TrackFrame.state_playing:
                remaining = tf.track_duration - (datetime.datetime.now() - tf.playback_started)
                remaining = remaining.total_seconds()
                tf.time = datetime.timedelta(seconds=math.ceil(remaining))
                if tf.stream.closed and tf.time.total_seconds() <= 0:
                    self.skip(tf)  # stream ended!
            elif tf.state == TrackFrame.state_idle:
                if tf.xfade_state == TrackFrame.state_xfade_fadingin:
                    # if we're set to fading in, regardless of other tracks, we start playing as well
                    start_stream(tf, tf.track["location"], 0)
                elif not any(tf for tf in self.trackframes if tf.state in (TrackFrame.state_playing, TrackFrame.state_loading)):
                    # if there is no other track currently playing (or loading), it's our turn!
                    start_stream(tf, tf.track["location"], 100)
            elif tf.state == TrackFrame.state_switching:
                tf.state = TrackFrame.state_needtrack

    def _crossfade(self):
        for tf in self.trackframes:
            # nearing the end of the track? then start a fade out
            if tf.state == TrackFrame.state_playing \
                    and tf.xfade_state == TrackFrame.state_xfade_nofade \
                    and tf.time.total_seconds() <= self.xfade_duration:
                tf.xfade_state = TrackFrame.state_xfade_fadingout
                tf.xfade_started = datetime.datetime.now()
                tf.xfade_start_volume = tf.volume
                # fade in the first other track that is currently idle
                for other_tf in self.trackframes:
                    if tf is not other_tf and other_tf.state == TrackFrame.state_idle:
                        other_tf.xfade_state = TrackFrame.state_xfade_fadingin
                        other_tf.xfade_started = datetime.datetime.now()
                        other_tf.xfade_start_volume = 0
                        other_tf.volume = 0
                        break
        for tf in self.trackframes:
            if tf.xfade_state == TrackFrame.state_xfade_fadingin:
                # fading in, slide volume up from 0 to 100%
                volume = 100 * (datetime.datetime.now() - tf.xfade_started).total_seconds() / self.xfade_duration
                tf.volume = min(volume, 100)
                if volume >= 100:
                    tf.xfade_state = TrackFrame.state_xfade_nofade  # fade reached the end
            elif tf.xfade_state == TrackFrame.state_xfade_fadingout:
                # fading out, slide volume down from what it was at to 0%
                fade_progress = (datetime.datetime.now() - tf.xfade_started)
                fade_progress = (self.xfade_duration - fade_progress.total_seconds()) / self.xfade_duration
                volume = max(0, tf.xfade_start_volume * fade_progress)
                tf.volume = max(volume, 0)
                if volume <= 0:
                    tf.xfade_state = TrackFrame.state_xfade_nofade   # fade reached the end

    def play_sample(self, sample):
        def unmute(trf, vol):
            if trf:
                trf.volume = vol
        if sample and sample.duration > 0:
            for tf in self.trackframes:
                if tf.state == TrackFrame.state_playing:
                    old_volume = tf.mute_volume(40)
                    self.mixer.add_sample(sample, lambda mtf=tf, vol=old_volume: unmute(mtf, vol))
                    break
            else:
                self.mixer.add_sample(sample)
Exemplo n.º 4
0
class Repl(cmd.Cmd):
    """
    Interactive command line interface to load/record/save and play samples, patterns and whole tracks.
    Currently it has no way of defining and loading samples manually. This means you need to initialize
    it with a track file containing at least the instruments (samples) that you will be using.
    """
    def __init__(self, discard_unused_instruments=False):
        self.song = Song()
        self.discard_unused_instruments = discard_unused_instruments
        self.out = Output(mixing="sequential", queue_size=1)
        super(Repl, self).__init__()

    def do_quit(self, args):
        """quits the session"""
        print("Bye.", args)
        self.out.close()
        return True

    def do_bpm(self, bpm):
        """set the playback BPM (such as 174 for some drum'n'bass)"""
        try:
            self.song.bpm = int(bpm)
        except ValueError as x:
            print("ERROR:", x)

    def do_ticks(self, ticks):
        """set the number of pattern ticks per beat (usually 4 or 8)"""
        try:
            self.song.ticks = int(ticks)
        except ValueError as x:
            print("ERROR:", x)

    def do_samples(self, args):
        """show the loaded samples"""
        print("Samples:")
        print(",  ".join(self.song.instruments))

    def do_patterns(self, args):
        """show the loaded patterns"""
        print("Patterns:")
        for name, pattern in sorted(self.song.patterns.items()):
            self.print_pattern(name, pattern)

    def print_pattern(self, name, pattern):
        print("PATTERN {:s}".format(name))
        for instrument, bars in pattern.items():
            print("   {:>15s} = {:s}".format(instrument, bars))

    def do_pattern(self, names):
        """play the pattern with the given name(s)"""
        names = names.split()
        for name in sorted(set(names)):
            try:
                pat = self.song.patterns[name]
                self.print_pattern(name, pat)
            except KeyError:
                print("no such pattern '{:s}'".format(name))
                return
        patterns = [self.song.patterns[name] for name in names]
        try:
            m = Mixer(patterns, self.song.bpm, self.song.ticks,
                      self.song.instruments)
            result = m.mix(verbose=len(patterns) > 1).make_16bit()
            self.out.play_sample(result)
            self.out.wait_all_played()
        except ValueError as x:
            print("ERROR:", x)

    def do_play(self, args):
        """play a single sample by giving its name, add a bar (xx..x.. etc) to play it in a bar"""
        if ' ' in args:
            instrument, pattern = args.split(maxsplit=1)
            pattern = pattern.replace(' ', '')
        else:
            instrument = args
            pattern = None
        instrument = instrument.strip()
        try:
            sample = self.song.instruments[instrument]
        except KeyError:
            print("unknown sample")
            return
        if pattern:
            self.play_single_bar(sample, pattern)
        else:
            sample = sample.copy().make_16bit()
            self.out.play_sample(sample)
            self.out.wait_all_played()

    def play_single_bar(self, sample, pattern):
        try:
            m = Mixer([{
                "sample": pattern
            }], self.song.bpm, self.song.ticks, {"sample": sample})
            result = m.mix(verbose=False).make_16bit()
            self.out.play_sample(result)
            self.out.wait_all_played()
        except ValueError as x:
            print("ERROR:", x)

    def do_mix(self, args):
        """mix and play all patterns of the song"""
        if not self.song.pattern_sequence:
            print("Nothing to be mixed.")
            return
        output = "__temp_mix.wav"
        self.song.mix(output)
        mix = Sample(wave_file=output)
        print("Playing sound...")
        self.out.play_sample(mix)
        os.remove(output)

    def do_stream(self, args):
        """
        mix all patterns of the song and stream the output to your speakers in real-time,
        or to an output file if you give a filename argument.
        This is the fastest and most efficient way of generating the output mix because
        it uses very little memory and avoids large buffer copying.
        """
        if not self.song.pattern_sequence:
            print("Nothing to be mixed.")
            return
        if args:
            filename = args.strip()
            print("Mixing and streaming to output file '{0}'...".format(
                filename))
            self.out.stream_to_file(filename, self.song.mix_generator())
            print("\r                          ")
            return
        print("Mixing and streaming to speakers...")
        try:
            samples = self.out.normalized_samples(self.song.mix_generator())
            for sample in samples:
                self.out.play_sample(sample)
            print("\r                          ")
            self.out.wait_all_played()
        except KeyboardInterrupt:
            print("Stopped.")

    def do_rec(self, args):
        """Record (or overwrite) a new sample (instrument) bar in a pattern.
Args: [pattern name] [sample] [bar(s)].
Omit bars to remote the sample from the pattern.
If a pattern with the name doesn't exist yet it will be added."""
        args = args.split(maxsplit=2)
        if len(args) not in (2, 3):
            print("Wrong arguments. Use: patternname sample bar(s)")
            return
        if len(args) == 2:
            args.append(None)  # no bars
        pattern_name, instrument, bars = args
        if instrument not in self.song.instruments:
            print("Unknown sample '{:s}'.".format(instrument))
            return
        if pattern_name not in self.song.patterns:
            self.song.patterns[pattern_name] = {}
        pattern = self.song.patterns[pattern_name]
        if bars:
            bars = bars.replace(' ', '')
            if len(bars) % self.song.ticks != 0:
                print("Bar length must be multiple of the number of ticks.")
                return
            pattern[instrument] = bars
        else:
            if instrument in pattern:
                del pattern[instrument]
        if pattern_name in self.song.patterns:
            if not self.song.patterns[pattern_name]:
                del self.song.patterns[pattern_name]
                print("Pattern was empty and has been removed.")
            else:
                self.print_pattern(pattern_name,
                                   self.song.patterns[pattern_name])

    def do_seq(self, names):
        """
        Print the sequence of patterns that form the current track,
        or if you give a list of names: use that as the new pattern sequence.
        """
        if not names:
            print("  ".join(self.song.pattern_sequence))
            return
        names = names.split()
        for name in names:
            if name not in self.song.patterns:
                print("Unknown pattern '{:s}'.".format(name))
                return
        self.song.pattern_sequence = names

    def do_load(self, filename):
        """Load a new song file"""
        song = Song()
        try:
            song.read(filename, self.discard_unused_instruments)
            self.song = song
        except IOError as x:
            print("ERROR:", x)

    def do_save(self, filename):
        """Save current song to file"""
        if not filename:
            print("Give filename to save song to.")
            return
        if not filename.endswith(".ini"):
            filename += ".ini"
        if os.path.exists(filename):
            if input("File exists: '{:s}'. Overwrite y/n? ".format(
                    filename)) not in ('y', 'yes'):
                return
        self.song.write(filename)
Exemplo n.º 5
0
class LevelGUI(tk.Frame):
    def __init__(self, audio_source, master=None):
        self.lowest_level = -50
        super().__init__(master)
        self.master.title("Levels")

        self.pbvar_left = tk.IntVar()
        self.pbvar_right = tk.IntVar()
        pbstyle = ttk.Style()
        pbstyle.theme_use("classic")
        pbstyle.configure("green.Vertical.TProgressbar", troughcolor="gray", background="light green")
        pbstyle.configure("yellow.Vertical.TProgressbar", troughcolor="gray", background="yellow")
        pbstyle.configure("red.Vertical.TProgressbar", troughcolor="gray", background="orange")

        frame = tk.LabelFrame(self, text="Left")
        frame.pack(side=tk.LEFT)
        tk.Label(frame, text="dB").pack()
        self.pb_left = ttk.Progressbar(frame, orient=tk.VERTICAL, length=300,
                                       maximum=-self.lowest_level, variable=self.pbvar_left,
                                       mode='determinate', style='yellow.Vertical.TProgressbar')
        self.pb_left.pack()

        frame = tk.LabelFrame(self, text="Right")
        frame.pack(side=tk.LEFT)
        tk.Label(frame, text="dB").pack()
        self.pb_right = ttk.Progressbar(frame, orient=tk.VERTICAL, length=300,
                                        maximum=-self.lowest_level, variable=self.pbvar_right,
                                        mode='determinate', style='yellow.Vertical.TProgressbar')
        self.pb_right.pack()

        frame = tk.LabelFrame(self, text="Info")
        self.info = tk.Label(frame, text="", justify=tk.LEFT)
        frame.pack()
        self.info.pack(side=tk.TOP)
        self.pack()
        self.open_audio_file(audio_source)
        self.after_idle(self.update)
        self.after_idle(self.stream_audio)

    def open_audio_file(self, filename_or_stream):
        wav = wave.open(filename_or_stream, 'r')
        self.samplewidth = wav.getsampwidth()
        self.samplerate = wav.getframerate()
        self.nchannels = wav.getnchannels()
        self.samplestream = iter(SampleStream(wav, self.samplerate // 10))
        self.levelmeter = LevelMeter(rms_mode=False, lowest=self.lowest_level)
        self.audio_out = Output(self.samplerate, self.samplewidth, self.nchannels, mixing="sequential", queue_size=3)
        print("Audio API used:", self.audio_out.audio_api)
        if not self.audio_out.supports_streaming:
            raise RuntimeError("need api that supports streaming")
        self.audio_out.register_notify_played(self.levelmeter.update)
        filename = filename_or_stream if isinstance(filename_or_stream, str) else "<stream>"
        info = "Source:\n{}\n\nRate: {:g} Khz\nBits: {}\nChannels: {}"\
            .format(filename, self.samplerate/1000, 8*self.samplewidth, self.nchannels)
        self.info.configure(text=info)

    def stream_audio(self):
        try:
            sample = next(self.samplestream)
            self.audio_out.play_sample(sample)
            self.after(20, self.stream_audio)
        except StopIteration:
            self.audio_out.close()

    def update(self):
        if not self.audio_out.still_playing():
            self.pbvar_left.set(0)
            self.pbvar_right.set(0)
            print("done!")
            return
        left, peak_l = self.levelmeter.level_left, self.levelmeter.peak_left
        right, peak_r = self.levelmeter.level_right, self.levelmeter.peak_right
        self.pbvar_left.set(left-self.lowest_level)
        self.pbvar_right.set(right-self.lowest_level)
        if left > -3:
            self.pb_left.configure(style="red.Vertical.TProgressbar")
        elif left > -6:
            self.pb_left.configure(style="yellow.Vertical.TProgressbar")
        else:
            self.pb_left.configure(style="green.Vertical.TProgressbar")
        if right > -3:
            self.pb_right.configure(style="red.Vertical.TProgressbar")
        elif right > -6:
            self.pb_right.configure(style="yellow.Vertical.TProgressbar")
        else:
            self.pb_right.configure(style="green.Vertical.TProgressbar")
        self.after(1000//update_rate, self.update)