示例#1
0
def stereo_pan():
    synth = WaveSynth()
    # panning a stereo source:
    wave = Sample("samples/SOS 020.wav").clip(6, 12).normalize().fadein(0.5).fadeout(0.5).lock()
    osc = Sine(0.4)
    panning = wave.copy().pan(lfo=osc).fadeout(0.2)
    with Output.for_sample(panning) as out:
        out.play_sample(panning)
    # panning a generated mono source:
    fm = Sine(0.5, 0.1999, bias=0.2)
    wave = synth.triangle(220, 5, fm_lfo=fm).lock()
    osc = Sine(0.4)
    panning = wave.copy().pan(lfo=osc).fadeout(0.2)
    with Output.for_sample(panning) as out:
        out.play_sample(panning)
示例#2
0
 def update(self, *args, **kwargs):
     frames = self.wave.readframes(self.samplerate//self.update_rate)
     if not frames:
         self.pbvar_left.set(0)
         self.pbvar_right.set(0)
         print("done!")
         return
     sample = Sample.from_raw_frames(frames, self.samplewidth, self.samplerate, self.nchannels)
     self.audio_out.play_sample(sample, async=True)
     time.sleep(sample.duration/3)   # print the peak meter more or less halfway during the sample
     left, peak_l, right, peak_r = self.levelmeter.update(sample)
     self.pbvar_left.set(left-self.lowest_level)
     self.pbvar_right.set(right-self.lowest_level)
     if left > -3:
         self.pb_left.configure(style="red.Vertical.TProgressbar")
     elif left > -6:
         self.pb_left.configure(style="yellow.Vertical.TProgressbar")
     else:
         self.pb_left.configure(style="green.Vertical.TProgressbar")
     if right > -3:
         self.pb_right.configure(style="red.Vertical.TProgressbar")
     elif right > -6:
         self.pb_right.configure(style="yellow.Vertical.TProgressbar")
     else:
         self.pb_right.configure(style="green.Vertical.TProgressbar")
     self.after(self.update_rate, self.update)
示例#3
0
def stereo_pan():
    synth = WaveSynth()
    # panning a stereo source:
    wave = Sample("samples/SOS 020.wav").clip(
        6, 12).normalize().fadein(0.5).fadeout(0.5).lock()
    osc = Sine(0.4)
    panning = wave.copy().pan(lfo=osc).fadeout(0.2)
    with Output.for_sample(panning) as out:
        out.play_sample(panning)
    # panning a generated mono source:
    fm = Sine(0.5, 0.1999, bias=0.2)
    wave = synth.triangle(220, 5, fm_lfo=fm).lock()
    osc = Sine(0.4)
    panning = wave.copy().pan(lfo=osc).fadeout(0.2)
    with Output.for_sample(panning) as out:
        out.play_sample(panning)
示例#4
0
 def update(self, *args, **kwargs):
     frames = self.wave.readframes(self.samplerate//self.update_rate)
     if not frames:
         self.pbvar_left.set(0)
         self.pbvar_right.set(0)
         print("done!")
         return
     sample = Sample.from_raw_frames(frames, self.samplewidth, self.samplerate, self.nchannels)
     self.audio_out.play_sample(sample)
     left, peak_l = self.levelmeter.level_left, self.levelmeter.peak_left
     right, peak_r = self.levelmeter.level_right, self.levelmeter.peak_right
     self.pbvar_left.set(left-self.lowest_level)
     self.pbvar_right.set(right-self.lowest_level)
     if left > -3:
         self.pb_left.configure(style="red.Vertical.TProgressbar")
     elif left > -6:
         self.pb_left.configure(style="yellow.Vertical.TProgressbar")
     else:
         self.pb_left.configure(style="green.Vertical.TProgressbar")
     if right > -3:
         self.pb_right.configure(style="red.Vertical.TProgressbar")
     elif right > -6:
         self.pb_right.configure(style="yellow.Vertical.TProgressbar")
     else:
         self.pb_right.configure(style="green.Vertical.TProgressbar")
     self.after(self.update_rate, self.update)
示例#5
0
def main(args):
    if len(args) < 1:
        raise SystemExit(
            "Mixes one or more audio files. Arguments: inputfile...")
    hqresample = AudiofileToWavStream.supports_hq_resample()
    if not hqresample:
        print(
            "WARNING: ffmpeg isn't compiled with libsoxr, so hq resampling is not supported."
        )
    wav_streams = [
        AudiofileToWavStream(filename, hqresample=hqresample)
        for filename in args
    ]
    with StreamMixer(wav_streams, endless=True) as mixer:
        mixed_samples = iter(mixer)
        with Output(mixer.samplerate, mixer.samplewidth,
                    mixer.nchannels) as output:
            levelmeter = LevelMeter(rms_mode=False, lowest=-50)
            temp_stream = AudiofileToWavStream("samples/909_crash.wav",
                                               hqresample=hqresample)
            for timestamp, sample in mixed_samples:
                levelmeter.update(sample)
                output.play_sample(sample)
                time.sleep(sample.duration * 0.4)
                levelmeter.print(bar_width=60)
                if 5.0 <= timestamp <= 5.1:
                    mixer.add_stream(temp_stream)
                if 10.0 <= timestamp <= 10.1:
                    sample = Sample("samples/909_crash.wav").normalize()
                    mixer.add_sample(sample)
    print("done.")
示例#6
0
 def update(self, *args, **kwargs):
     frames = self.wave.readframes(self.samplerate // self.update_rate)
     if not frames:
         self.pbvar_left.set(0)
         self.pbvar_right.set(0)
         print("done!")
         return
     sample = Sample.from_raw_frames(frames, self.samplewidth,
                                     self.samplerate, self.nchannels)
     self.audio_out.play_sample(sample, async=True)
     time.sleep(
         sample.duration /
         3)  # print the peak meter more or less halfway during the sample
     left, peak_l, right, peak_r = self.levelmeter.update(sample)
     self.pbvar_left.set(left - self.lowest_level)
     self.pbvar_right.set(right - self.lowest_level)
     if left > -3:
         self.pb_left.configure(style="red.Vertical.TProgressbar")
     elif left > -6:
         self.pb_left.configure(style="yellow.Vertical.TProgressbar")
     else:
         self.pb_left.configure(style="green.Vertical.TProgressbar")
     if right > -3:
         self.pb_right.configure(style="red.Vertical.TProgressbar")
     elif right > -6:
         self.pb_right.configure(style="yellow.Vertical.TProgressbar")
     else:
         self.pb_right.configure(style="green.Vertical.TProgressbar")
     self.after(self.update_rate, self.update)
示例#7
0
def play_console(filename_or_stream):
    with wave.open(filename_or_stream, 'r') as wav:
        samplewidth = wav.getsampwidth()
        samplerate = wav.getframerate()
        nchannels = wav.getnchannels()
        bar_width = 60
        update_rate = 20  # lower this if you hear the sound crackle!
        levelmeter = LevelMeter(rms_mode=False, lowest=-50.0)
        with Output(samplerate, samplewidth, nchannels,
                    int(update_rate / 4)) as out:
            while True:
                frames = wav.readframes(samplerate // update_rate)
                if not frames:
                    break
                sample = Sample.from_raw_frames(frames, wav.getsampwidth(),
                                                wav.getframerate(),
                                                wav.getnchannels())
                out.play_sample(sample, async=True)
                levelmeter.update(sample)
                time.sleep(
                    sample.duration * 0.4
                )  # print the peak meter more or less halfway during the sample
                levelmeter.print(bar_width)
    print("\ndone")
    input("Enter to exit:")
示例#8
0
 def __next__(self):
     frames = self.source.readframes(self.buffer_size)
     for filter in self.frames_filters:
         frames = filter(frames)
     if not frames:
         return None
     sample = Sample.from_raw_frames(frames, self.samplewidth, self.samplerate, self.nchannels)
     for filter in self.filters:
         sample = filter(sample)
     return sample
示例#9
0
 def __next__(self):
     frames = self.source.readframes(self.buffer_size)
     for filter in self.frames_filters:
         frames = filter(frames)
     if not frames:
         return None
     sample = Sample.from_raw_frames(frames, self.samplewidth,
                                     self.samplerate, self.nchannels)
     for filter in self.filters:
         sample = filter(sample)
     return sample
示例#10
0
 def generate_sample(self, oscillator, duration, use_fade=False):
     o = oscillator  # iter(oscillator)
     scale = 2**(8*self.synth.samplewidth-1)
     try:
         frames = [int(next(o)*scale) for _ in range(int(self.synth.samplerate*duration))]
     except StopIteration:
         return None
     else:
         sample = Sample.from_array(frames, self.synth.samplerate, 1)
         if use_fade:
             sample.fadein(0.05).fadeout(0.1)
         return sample
示例#11
0
 def generate_sample(self, oscillator, duration, use_fade=False):
     o = oscillator  # iter(oscillator)
     scale = 2**(8*self.synth.samplewidth-1)
     try:
         frames = [int(next(o)*scale) for _ in range(int(self.synth.samplerate*duration))]
     except StopIteration:
         return None
     else:
         sample = Sample.from_array(frames, self.synth.samplerate, 1)
         if use_fade:
             sample.fadein(0.05).fadeout(0.1)
         return sample
示例#12
0
def echo_lfo():
    synth = WaveSynth(22050)
    s = Sine(440, amplitude=25000, samplerate=synth.samplerate)
    s = EnvelopeFilter(s, .2, .2, 0, 0, 1.5, stop_at_end=True)
    s = EchoFilter(s, .15, 5, 0.3, 0.6)
    s = ClipFilter(s, -32000, 32000)
    frames = [int(v) for v in s]
    import matplotlib.pyplot as plot
    plot.plot(frames)
    plot.show()
    samp = Sample.from_array(frames, synth.samplerate, 1)
    with Output.for_sample(samp) as out:
        out.play_sample(samp)
示例#13
0
def echo_lfo():
    synth = WaveSynth(22050)
    s = Sine(440, amplitude=25000, samplerate=synth.samplerate)
    s = EnvelopeFilter(s, .2, .2, 0, 0, 1.5, stop_at_end=True)
    s = EchoFilter(s, .15, 5, 0.3, 0.6)
    s = ClipFilter(s, -32000, 32000)
    frames = [int(v) for v in s]
    import matplotlib.pyplot as plot
    plot.plot(frames)
    plot.show()
    samp = Sample.from_array(frames, synth.samplerate, 1)
    with Output.for_sample(samp) as out:
        out.play_sample(samp)
示例#14
0
 def set_effect(self, effect_nr, filename):
     try:
         with AudiofileToWavStream(filename, hqresample=hqresample) as wav:
             sample = Sample(wav)
             self.effects[effect_nr] = sample
     except IOError as x:
         print("Can't load effect sample:", x)
     else:
         for button in self.buttons:
             if button.effect_nr == effect_nr:
                 button["state"] = tk.NORMAL
                 button["text"] = os.path.splitext(
                     os.path.basename(filename))[0]
                 break
示例#15
0
 def do_button_release(self, event):
     if event.state & 0x0100 == 0:
         return  # no left mouse button event
     shift = event.state & 0x0001
     if shift:
         filename = tkinter.filedialog.askopenfilename()
         if filename:
             with AudiofileToWavStream(filename,
                                       hqresample=hqresample) as wav:
                 sample = Sample(wav)
                 self.jingles[event.widget.jingle_nr] = sample
             event.widget["state"] = tk.NORMAL
             event.widget["text"] = os.path.splitext(
                 os.path.basename(filename))[0]
     else:
         sample = self.jingles[event.widget.jingle_nr]
         if sample:
             self.app.play_sample(sample)
示例#16
0
 def __iter__(self):
     """
     Yields tuple(timestamp, Sample) that represent the mixed audio streams.
     """
     while True:
         mixed_sample = Sample.from_raw_frames(b"", self.samplewidth, self.samplerate, self.nchannels)
         for sample_stream in self.sample_streams:
             try:
                 sample = next(sample_stream)
             except (os.error, ValueError):
                 # Problem reading from stream. Assume stream closed.
                 sample = None
             if sample:
                 mixed_sample.mix(sample)
             else:
                 self.remove_stream(sample_stream)
         yield self.timestamp, mixed_sample
         self.timestamp += mixed_sample.duration
示例#17
0
def play_console(filename_or_stream):
    with wave.open(filename_or_stream, 'r') as wav:
        samplewidth = wav.getsampwidth()
        samplerate = wav.getframerate()
        nchannels = wav.getnchannels()
        bar_width = 60
        update_rate = 20   # lower this if you hear the sound crackle!
        levelmeter = LevelMeter(rms_mode=False, lowest=-50.0)
        with Output(samplerate, samplewidth, nchannels, int(update_rate/4)) as out:
            while True:
                frames = wav.readframes(samplerate//update_rate)
                if not frames:
                    break
                sample = Sample.from_raw_frames(frames, wav.getsampwidth(), wav.getframerate(), wav.getnchannels())
                out.play_sample(sample, async=True)
                levelmeter.update(sample)
                time.sleep(sample.duration*0.4)   # print the peak meter more or less halfway during the sample
                levelmeter.print(bar_width)
    print("\ndone")
    input("Enter to exit:")
示例#18
0
 def __iter__(self):
     """
     Yields tuple(timestamp, Sample) that represent the mixed audio streams.
     """
     while True:
         mixed_sample = Sample.from_raw_frames(b"", self.samplewidth,
                                               self.samplerate,
                                               self.nchannels)
         for sample_stream in self.sample_streams:
             try:
                 sample = next(sample_stream)
             except (os.error, ValueError):
                 # Problem reading from stream. Assume stream closed.
                 sample = None
             if sample:
                 mixed_sample.mix(sample)
             else:
                 self.remove_stream(sample_stream)
         yield self.timestamp, mixed_sample
         self.timestamp += mixed_sample.duration
示例#19
0
def play_console(filename_or_stream):
    with wave.open(filename_or_stream, 'r') as wav:
        samplewidth = wav.getsampwidth()
        samplerate = wav.getframerate()
        nchannels = wav.getnchannels()
        bar_width = 60
        update_rate = 20   # lower this if you hear the sound crackle!
        levelmeter = LevelMeter(rms_mode=False, lowest=-50.0)
        with Output(samplerate, samplewidth, nchannels) as out:
            print("Audio API used:", out.audio_api)
            if not out.supports_streaming:
                raise RuntimeError("need api that supports streaming")
            out.register_notify_played(levelmeter.update)
            while True:
                frames = wav.readframes(samplerate//update_rate)
                if not frames:
                    break
                sample = Sample.from_raw_frames(frames, wav.getsampwidth(), wav.getframerate(), wav.getnchannels())
                out.play_sample(sample)
                levelmeter.print(bar_width)
    print("\ndone")
    input("Enter to exit:")
示例#20
0
def main(track_file, outputfile=None, interactive=False):
    discard_unused = not interactive
    if interactive:
        repl = Repl(discard_unused_instruments=discard_unused)
        repl.do_load(track_file)
        repl.cmdloop("Interactive Samplebox session. Type 'help' for help on commands.")
    else:
        song = Song()
        song.read(track_file, discard_unused_instruments=discard_unused)
        with Output() as out:
            if out.supports_streaming:
                # mix and stream output in real time
                print("Mixing and streaming to speakers...")
                out.play_samples(song.mix_generator(), False)
                print("\r                          ")
            else:
                # output can't stream, fallback on mixing everything to a wav
                print("(Sorry, streaming audio is not possible, perhaps because you don't have pyaudio installed?)")
                song.mix(outputfile)
                mix = Sample(wave_file=outputfile)
                print("Playing sound...")
                out.play_sample(mix)