Пример #1
0
    def load_samples(self):

        self.samples[KICK] = Sample(
            wave_file='jam/samples/kick.wav').normalize().make_32bit(
                scale_amplitude=True).lock()
        self.samples[SNARE] = Sample(
            wave_file='jam/samples/snare.wav').normalize().make_32bit(
                scale_amplitude=True).lock()
        self.samples[CLAP] = Sample(
            wave_file='jam/samples/clap.wav').normalize().make_32bit(
                scale_amplitude=True).lock()

        for note in scale:
            if note == C3:
                path = f'jam/samples/Lead C3.wav'
            elif note.islower():
                path = f'jam/samples/Lead {note.upper()}1.wav'
            else:
                path = f'jam/samples/Lead {note}2.wav'

            # print(path)
            self.samples[f'lead_{note}'] = Sample(
                wave_file=path).normalize().make_32bit(
                    scale_amplitude=True).lock()

            if note not in 'beBE' + C3:
                path = path[:-5] + '#' + path[-5:]
                self.samples[f'lead_{note}#'] = Sample(
                    wave_file=path).normalize().make_32bit(
                        scale_amplitude=True).lock()
Пример #2
0
    def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                           stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
        notes = itertools.cycle(self.title_music) if repeat else iter(self.title_music)
        attack, decay, sustain, release = self.adsr_times

        num_frames = chunksize // synth_params.norm_samplewidth // synth_params.norm_nchannels
        sample_residue = Sample(None, nchannels=2)
        for v1, v2 in notes:
            if stopcondition():
                break
            vf1 = self.music_freq_table[v1]
            vf2 = self.music_freq_table[v2]
            osc1 = FastTriangle(vf1 * _sidfreq, amplitude=0.5)
            osc2 = FastTriangle(vf2 * _sidfreq, amplitude=0.5)
            f1 = EnvelopeFilter(osc1, attack, decay, sustain, 1.0, release, stop_at_end=True)
            f2 = EnvelopeFilter(osc2, attack, decay, sustain, 1.0, release, stop_at_end=True)
            sample1 = Sample.from_oscillator(f1, 1, synth_params.norm_samplerate)       # length is max. 1 second
            sample2 = Sample.from_oscillator(f2, 1, synth_params.norm_samplerate)       # length is max. 1 second
            sample_residue.join(sample1.stereo_mix(sample2, "R"))
            while len(sample_residue) >= num_frames:
                # TODO optimize this a bit by not using Samples but instead by looping over a memoryview of the frames (just like the super class does)
                yield sample_residue.view_frame_data()[:chunksize]
                sample_residue = Sample.from_raw_frames(sample_residue.view_frame_data()[chunksize:],
                                                        sample_residue.samplewidth, sample_residue.samplerate, sample_residue.nchannels)
        if len(sample_residue):
            yield sample_residue.view_frame_data()
Пример #3
0
 def mix(self, verbose=True):
     """
     Mix all the patterns into a single result sample.
     """
     if not self.patterns:
         if verbose:
             print("No patterns to mix, output is empty.")
         return Sample()
     total_seconds = 0.0
     for p in self.patterns:
         bar = next(iter(p.values()))
         total_seconds += len(bar) * 60.0 / self.bpm / self.ticks
     if verbose:
         print("Mixing {:d} patterns...".format(len(self.patterns)))
     mixed = Sample().make_32bit()
     for index, timestamp, sample in self.mixed_samples(tracker=False):
         if verbose:
             print("\r{:3.0f} % ".format(timestamp / total_seconds * 100),
                   end="")
         mixed.mix_at(timestamp, sample)
     # chop/extend to get to the precise total duration (in case of silence in the last bars etc)
     missing = total_seconds - mixed.duration
     if missing > 0:
         mixed.add_silence(missing)
     elif missing < 0:
         mixed.clip(0, total_seconds)
     if verbose:
         print("\rMix done.")
     return mixed
Пример #4
0
 def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                        stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
     assert not repeat
     sample_residue = Sample(nchannels=2)
     for n in range(0, 180):
         if stopcondition():
             break
         freq = 0x8000 - n * 180
         osc = FastTriangle(freq * _sidfreq, amplitude=0.8)
         filtered = EnvelopeFilter(osc, 0.002, 0.004, 0.0, 0.6, 0.02, stop_at_end=True)
         sample_residue = yield from self.render_samples(filtered, sample_residue, chunksize, return_residue=True)
     if len(sample_residue) > 0:
         yield sample_residue.view_frame_data()
Пример #5
0
def main(track_file, outputfile=None, interactive=False):
    discard_unused = not interactive
    if interactive:
        repl = Repl(discard_unused_instruments=discard_unused)
        repl.do_load(track_file)
        repl.cmdloop(
            "Interactive Samplebox session. Type 'help' for help on commands.")
    else:
        song = Song()
        song.read(track_file, discard_unused_instruments=discard_unused)
        with Output(mixing="sequential", queue_size=1) as out:
            if out.supports_streaming:
                # mix and stream output in real time
                print("Mixing and streaming to speakers...")
                samples = out.normalized_samples(song.mix_generator())
                for s in samples:
                    out.play_sample(s)
                out.wait_all_played()
                print("\r                          ")
            else:
                # output can't stream, fallback on mixing everything to a wav
                print(
                    "(Sorry, streaming audio is not possible, install one of the audio libraries that supports that)"
                )
                song.mix(outputfile)
                mix = Sample(wave_file=outputfile)
                print("Playing sound...")
                out.play_sample(mix)
                out.wait_all_played()
Пример #6
0
 def read_samples(self, instruments, samples_path):
     """Reads the sample files for the instruments."""
     self.instruments = {}
     for name, file in sorted(instruments.items()):
         self.instruments[name] = Sample(wave_file=os.path.join(
             samples_path, file)).normalize().make_32bit(
                 scale_amplitude=False).lock()
Пример #7
0
    def _audio_playback(self, pcm_stream):
        # thread 3: audio playback
        levelmeter = LevelMeter()

        def played(sample):
            if self.client.stream_title != self.stream_title:
                self.stream_title = self.client.stream_title
                if self.song_title_callback:
                    self.song_title_callback(self.stream_title)
                else:
                    print("\n\nNew Song:", self.stream_title, "\n")
            levelmeter.update(sample)
            if self.update_ui:
                self.update_ui(levelmeter, None)
            else:
                levelmeter.print(60, True)

        with Output(mixing="sequential", frames_per_chunk=44100//4) as output:
            output.register_notify_played(played)
            while not self._stop_playback:
                try:
                    audio = pcm_stream.read(44100 * 2 * 2 // 20)
                    if not audio:
                        break
                except (IOError, ValueError):
                    break
                else:
                    if not self._stop_playback:
                        sample = Sample.from_raw_frames(audio, 2, 44100, 2)
                        output.play_sample(sample)
Пример #8
0
 def __init__(
         self, samples_to_load: Dict[str, Tuple[Union[str, Sample],
                                                int]]) -> None:
     global samples
     samples.clear()
     self.output = Output(mixing="mix")
     if any(isinstance(smp, str) for smp, _ in samples_to_load.values()):
         print("Loading sound files...")
     for name, (filename, max_simultaneously) in samples_to_load.items():
         if isinstance(filename, Sample):
             samples[name] = filename
         else:
             data = pkgutil.get_data(__name__, "sounds/" + filename)
             if data:
                 tmp = tempfile.NamedTemporaryFile(delete=False,
                                                   suffix=".ogg")
                 try:
                     tmp.write(data)
                     tmp.close()
                     samples[name] = Sample(
                         streaming.AudiofileToWavStream(tmp.name),
                         name).stereo()
                 finally:
                     os.remove(tmp.name)
             else:
                 raise SystemExit("corrupt package; sound data is missing")
         self.output.set_sample_play_limit(name, max_simultaneously)
     print("Sound API initialized:", self.output.audio_api)
Пример #9
0
def play_console(filename_or_stream):
    with wave.open(filename_or_stream, 'r') as wav:
        samplewidth = wav.getsampwidth()
        samplerate = wav.getframerate()
        nchannels = wav.getnchannels()
        bar_width = 60
        levelmeter = LevelMeter(rms_mode=False, lowest=-50.0)
        with Output(samplerate, samplewidth, nchannels, mixing="sequential") as out:
            print("Audio API used:", out.audio_api)
            if not out.supports_streaming:
                raise RuntimeError("need api that supports streaming")
            out.register_notify_played(levelmeter.update)
            while True:
                frames = wav.readframes(samplerate//update_rate)
                if not frames:
                    break
                sample = Sample.from_raw_frames(frames, wav.getsampwidth(), wav.getframerate(), wav.getnchannels())
                out.play_sample(sample)
                levelmeter.print(bar_width)
            while out.still_playing():
                time.sleep(1/update_rate)
                levelmeter.print(bar_width)
            out.wait_all_played()
    print("\nDone. Enter to exit:")
    input()
Пример #10
0
def demo_song():
    synth = WaveSynth()
    notes = {note: key_freq(49+i) for i, note in enumerate(['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#'])}
    tempo = 0.3

    def synth_sample(freq, duration):
        harmonics = [(1, 1), (2, 1/2), (4, 1/4), (6, 1/6)]
        a = synth.harmonics(freq, duration, harmonics)
        return a.envelope(0.05, 0.2, 0.8, 0.5)

    silence = Sample.from_array([0]*int(synth.samplerate*tempo*2), synth.samplerate, numchannels=1)
    song = "A A B. A D. C#.. ;  A A B. A E. D.. ;  A A A. F#.. D C#.. B ;  G G F#.. D E D ; ; "\
        "A A B. A D C#.. ; A A B. A E D. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; "
    with Output(synth.samplerate, synth.samplewidth, 1, mixing="sequential", queue_size=50) as out:
        for note in song.split():
            if note == ";":
                print()
                out.play_sample(silence)
                continue
            print(note, end="  ", flush=True)
            if note.endswith(".."):
                sample = synth_sample(notes[note[:-2]], tempo*4)
            elif note.endswith("."):
                sample = synth_sample(notes[note[:-1]], tempo*2)
            else:
                sample = synth_sample(notes[note], tempo)
            out.play_sample(sample)
        print()
        out.wait_all_played()
Пример #11
0
 def region(self, line: str) -> Optional[Region]:
     region = Region()
     pairs = line.split()
     while pairs:
         variable, value = pairs[0].split("=")
         del pairs[0]
         if variable == "seq_position":
             region.seq = int(value)
         elif variable == "sample":
             if "\\" in value:
                 value = value.replace("\\", os.path.sep)
             if value:
                 filename = os.path.join(self._samples_location, value)
                 if not os.path.isfile(filename):
                     print("Warning: sample not found:", filename, file=sys.stderr)
                     return None
                 region.sample = Sample(filename, value)
                 region.sample.amplify(0.7)    # adjust base volume down to avoid clipping issues when mixing
                 region.sample.normalize()
                 self.total_sample_memory += len(region.sample) * region.sample.samplewidth * region.sample.nchannels
         elif variable == "lorand":
             if value.endswith("s"):
                 value = value[:-1]
             region.lo_rand = float(value)
         elif variable == "hirand":
             if value.endswith("s"):
                 value = value[:-1]
             region.hi_rand = float(value)
         else:
             raise IOError("invalid variable in region: "+variable)
     return region
Пример #12
0
def stereo_pan():
    synth = WaveSynth()
    # panning a stereo source:
    wave = Sample("samples/SOS 020.wav").clip(6, 12).normalize().fadein(0.5).fadeout(0.5).lock()
    osc = Sine(0.4)
    panning = wave.copy().pan(lfo=osc).fadeout(0.2)
    with Output.for_sample(panning) as out:
        out.play_sample(panning)
        out.wait_all_played()
    # panning a generated mono source:
    fm = Sine(0.5, 0.1999, bias=0.2)
    wave = synth.triangle(220, 5, fm_lfo=fm).lock()
    osc = Sine(0.4)
    panning = wave.copy().pan(lfo=osc).fadeout(0.2)
    with Output.for_sample(panning) as out:
        out.play_sample(panning)
        out.wait_all_played()
Пример #13
0
 def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                        stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
     assert repeat, "cover is a repeating sound"
     sample_residue = Sample(nchannels=2)
     while not stopcondition():
         freq = random.randint(0x6000, 0xd800)
         osc = FastTriangle(freq * _sidfreq, amplitude=0.7)
         filtered = EnvelopeFilter(osc, 0.002, 0.02, 0.0, 0.5, 0.02, stop_at_end=True)
         sample_residue = yield from self.render_samples(filtered, sample_residue, chunksize, return_residue=True)
Пример #14
0
 def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                        stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
     # generate a new random diamond sound everytime this is played
     freq = random.randint(0x8600, 0xfeff)
     freq &= 0b0111100011111111
     freq |= 0b1000011000000000
     osc = FastTriangle(freq * _sidfreq, amplitude=0.7)
     filtered = EnvelopeFilter(osc, 0.002, 0.006, 0.0, 0.7, 0.6, stop_at_end=True)
     sample_residue = Sample(nchannels=2)
     yield from self.render_samples(filtered, sample_residue, chunksize, stopcondition=stopcondition)
Пример #15
0
 def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                        stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
     assert not repeat
     fm = Linear(0, -2.3e-5)
     osc = Triangle(1567.98174, fm_lfo=fm)
     filtered = EnvelopeFilter(osc, 0.1, 0.3, 1.5, 1.0, 0.07, stop_at_end=True)
     ampmod = SquareH(10, 9, amplitude=0.5, bias=0.5)
     modulated = AmpModulationFilter(filtered, ampmod)
     sample_residue = Sample(nchannels=2)
     yield from self.render_samples(modulated, sample_residue, chunksize, stopcondition=stopcondition)
def sample_serializer(s: sample.Sample) -> Dict[str, Any]:
    return {
        "__class__": "synthplayer.sample.Sample",
        "samplerate": s.samplerate,
        "samplewidth": s.samplewidth,
        "duration": s.duration,
        "nchannels": s.nchannels,
        "name": s.name,
        "frames": s.view_frame_data()
    }
Пример #17
0
 def generate_sample(self, oscillator, duration, use_fade=False):
     scale = 2**(8*self.synth.samplewidth-1)
     try:
         frames = [int(v*scale) for v in itertools.islice(oscillator, int(self.synth.samplerate*duration))]
     except StopIteration:
         return None
     else:
         sample = Sample.from_array(frames, self.synth.samplerate, 1)
         if use_fade:
             sample.fadein(0.05).fadeout(0.1)
         return sample
Пример #18
0
 def do_mix(self, args):
     """mix and play all patterns of the song"""
     if not self.song.pattern_sequence:
         print("Nothing to be mixed.")
         return
     output = "__temp_mix.wav"
     self.song.mix(output)
     mix = Sample(wave_file=output)
     print("Playing sound...")
     self.out.play_sample(mix)
     os.remove(output)
Пример #19
0
 def chunked_frame_data(self, chunksize, repeat=False, stopcondition=lambda: False):
     played_duration = 0.0
     num_frames = chunksize // self.samplewidth // self.nchannels
     scale = 2 ** (8 * self.samplewidth - 1)
     while played_duration < self.max_play_duration:
         try:
             frames = [int(v * scale) for v in itertools.islice(self.oscillator, num_frames)]
         except StopIteration:
             break
         else:
             sample = Sample.from_array(frames, self.samplerate, 1)
             yield sample.view_frame_data()
         played_duration += num_frames / self.samplerate
Пример #20
0
 def set_effect(self, effect_nr, filename):
     try:
         with AudiofileToWavStream(filename, hqresample=hqresample) as wav:
             sample = Sample(wav)
             self.effects[effect_nr] = sample
     except IOError as x:
         print("Can't load effect sample:", x)
     else:
         for button in self.buttons:
             if button.effect_nr == effect_nr:
                 button["state"] = tk.NORMAL
                 button["text"] = os.path.splitext(os.path.basename(filename))[0]
                 break
Пример #21
0
def echo_lfo():
    synth = WaveSynth(22050)
    s = Sine(440, amplitude=25000, samplerate=synth.samplerate)
    s = EnvelopeFilter(s, .2, .2, 0, 0, 1.5, stop_at_end=True)
    s = EchoFilter(s, .15, 5, 0.3, 0.6)
    s = ClipFilter(s, -32000, 32000)
    frames = [int(v) for v in s]
    import matplotlib.pyplot as plot
    plot.plot(frames)
    plot.show()
    samp = Sample.from_array(frames, synth.samplerate, 1)
    with Output.for_sample(samp) as out:
        out.play_sample(samp)
        out.wait_all_played()
Пример #22
0
 def generate_sample(self, oscillator: Oscillator, duration: float, use_fade: bool = False) -> Optional[Sample]:
     scale = 2**(8*self.synth.samplewidth-1)
     blocks = oscillator.blocks()
     try:
         sample_blocks = list(next(blocks) for _ in range(int(self.synth.samplerate*duration/params.norm_osc_blocksize)))
         float_frames = sum(sample_blocks, [])
         frames = [int(v*scale) for v in float_frames]
     except StopIteration:
         return None
     else:
         sample = Sample.from_array(frames, self.synth.samplerate, 1)
         if use_fade:
             sample.fadein(0.05).fadeout(0.1)
         return sample
Пример #23
0
 def chunked_frame_data(self, chunksize, repeat=False, stopcondition=lambda: False):
     num_frames = chunksize // self.samplewidth // self.nchannels
     if num_frames != params.norm_osc_blocksize:
         raise ValueError("streaming osc num_frames must be equal to the oscillator blocksize")
     played_duration = 0.0
     scale = 2 ** (8 * self.samplewidth - 1)
     while played_duration < self.max_play_duration:
         try:
             frames = [int(v * scale) for v in next(self.blocks)]
         except StopIteration:
             break
         else:
             sample = Sample.from_array(frames, self.samplerate, 1)
             yield sample.view_frame_data()
         played_duration += num_frames / self.samplerate
Пример #24
0
 def render_samples(self, osc: Oscillator, sample_residue: Sample,
                    sample_chunksize: int, stopcondition: Callable[[], bool] = lambda: False,
                    return_residue: bool = False) -> Generator[memoryview, None, Sample]:
     num_frames = sample_chunksize // synth_params.norm_samplewidth // synth_params.norm_nchannels
     blocks = osc.blocks()
     while not stopcondition():
         try:
             block = next(blocks)
         except StopIteration:
             break
         sample = Sample.from_osc_block(block, osc.samplerate, 2 ** (8 * synth_params.norm_samplewidth - 1)).stereo()
         sample_residue.join(sample)
         while len(sample_residue) >= num_frames:
             yield sample_residue.view_frame_data()[:sample_chunksize]
             sample_residue = Sample.from_raw_frames(sample_residue.view_frame_data()[sample_chunksize:],
                                                     sample_residue.samplewidth, sample_residue.samplerate, sample_residue.nchannels)
     if return_residue:
         return sample_residue
     yield sample_residue.view_frame_data()
     return sample_residue
Пример #25
0
def demo_song(profiling=False):
    synth = WaveSynth()
    notes = {note: key_freq(49+i) for i, note in enumerate(['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#'])}
    tempo = 0.3

    def instrument(freq, duration):
        harmonics = [(1, 1), (2, 1/2), (4, 1/4), (6, 1/6)]
        a = synth.harmonics(freq, duration, harmonics)
        return a.envelope(0.05, 0.2, 0.8, 0.5)

    print("Synthesizing tones...")
    perf_c = time.perf_counter()
    quarter_notes = {note: instrument(notes[note], tempo) for note in notes}
    half_notes = {note: instrument(notes[note], tempo*2) for note in notes}
    full_notes = {note: instrument(notes[note], tempo*4) for note in notes}
    silence = Sample.from_array([0]*int(synth.samplerate*tempo*2), synth.samplerate, numchannels=1)
    if profiling:
        print(time.perf_counter()-perf_c)
    else:
        song = "A A B. A D. C#.. ;  A A B. A E. D.. ;  A A A. F#.. D C#.. B ;  G G F#.. D E D ; ; "\
            "A A B. A D C#.. ; A A B. A E D. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; "
        with Output(synth.samplerate, synth.samplewidth, 1, mixing="sequential") as out:
            for note in song.split():
                if note == ";":
                    print()
                    out.play_sample(silence)
                    continue
                print(note, end="  ", flush=True)
                if note.endswith(".."):
                    sample = full_notes[note[:-2]]
                elif note.endswith("."):
                    sample = half_notes[note[:-1]]
                else:
                    sample = quarter_notes[note]
                out.play_sample(sample)
            print()
            out.wait_all_played()
Пример #26
0
def sample_from_osc(osc: Oscillator) -> Sample:
    s = Sample.from_oscillator(osc, 5)     # 5 seconds is the maximum it will be.
    return s.stereo()
Пример #27
0
 def mix_generator(self):
     """
     Returns a generator that produces samples that are the chronological
     chunks of the final output mix. This avoids having to mix it into one big
     output mix sample.
     """
     if not self.patterns:
         yield Sample()
         return
     total_seconds = 0.0
     for p in self.patterns:
         bar = next(iter(p.values()))
         total_seconds += len(bar) * 60.0 / self.bpm / self.ticks
     mixed_duration = 0.0
     samples = self.mixed_samples()
     # get the first sample
     index, previous_timestamp, sample = next(samples)
     mixed = Sample().make_32bit()
     mixed.mix_at(previous_timestamp, sample)
     # continue mixing the following samples
     for index, timestamp, sample in samples:
         trigger_duration = timestamp - previous_timestamp
         overflow = None
         if mixed.duration < trigger_duration:
             # fill with some silence to reach the next sample position
             mixed.add_silence(trigger_duration - mixed.duration)
         elif mixed.duration > trigger_duration:
             # chop off the sound that extends into the next sample position
             # keep this overflow and mix it later!
             overflow = mixed.split(trigger_duration)
         mixed_duration += mixed.duration
         yield mixed
         mixed = overflow if overflow else Sample().make_32bit()
         mixed.mix(sample)
         previous_timestamp = timestamp
     # output the last remaining sample and extend it to the end of the duration if needed
     timestamp = total_seconds
     trigger_duration = timestamp - previous_timestamp
     if mixed.duration < trigger_duration:
         mixed.add_silence(trigger_duration - mixed.duration)
     elif mixed.duration > trigger_duration:
         mixed.clip(0, trigger_duration)
     mixed_duration += mixed.duration
     yield mixed
Пример #28
0
    def mix(self, song):

        # kick = ('x-x-' * 4)[:len(melody)]
        # snare = ('-x-x' * 4)[:len(melody)]

        # total_seconds = len(melody) * 60.0 / self.bpm / self.ticks
        mixed = Sample().make_32bit()
        time_per_index = 60.0 / song.bpm / song.ticks

        notes = max(len(song.melody), len(song.drums))

        for i in range(notes):

            timestamp = time_per_index * i

            try:
                chord = song.melody[i]
            except (IndexError, TypeError) as e:
                pass
            else:
                if chord != REST_NOTE:
                    if isinstance(chord, list):
                        group = chord[:5]
                    else:
                        group = [chord]
                    for note in group:
                        sample = self.samples[f'lead_{note}']
                        if len(chord) > 1:  # 2
                            # volume = 1/(len(chord))
                            # volume = (1 / len(chord)) * 1.75
                            # volume = 1 - math.log10(len(chord) - 1)
                            # if len(chord) == 2:
                            #     volume = 0.7
                            # volume = 0.5
                            # print('notes/volume', len(chord), volume)

                            volume = [1, 0.9, 0.65, 0.6, 0.5][len(chord) - 1]

                            mixed.mix_at(timestamp, sample.at_volume(volume))

                            # .mix_at(timestamp, sample.at_volume(1/len(chord)-1))
                        # elif len(chord) == 2:
                        #     mixed.mix_at(timestamp, sample.at_volume(0.75))
                        else:
                            mixed.mix_at(timestamp, sample)

            try:
                beat = song.drums[i]
            except (IndexError, TypeError) as e:
                pass
            else:
                if beat != REST_NOTE:
                    for instrument in beat[:5]:
                        mixed.mix_at(timestamp, self.samples[instrument])

            # for instrument, pattern in song.drums.items():
            #     try:
            #         char = pattern[i]
            #     except (IndexError, TypeError) as e:
            #         continue
            #
            #     if char == instrument:
            #         mixed.mix_at(timestamp, self.samples[instrument])

        # chop/extend to get to the precise total duration (in case of silence in the last bars etc)
        # missing = total_seconds - mixed.duration
        # print('missing', missing)
        # if missing > 0:
        #     mixed.add_silence(missing)
        # elif missing < 0:
        #     mixed.clip(0, total_seconds)

        return mixed
Пример #29
0
# Playing a long sample or music file doesn't map nicely to this pattern.
# Instead, just play it _as a single huge sample_ where the sample itself
# takes care of dynamically producing its audio data chunks.
print("Streaming mp3 using realtime mixer...")
counter = 1


def played_callback(sample):
    global counter
    print(" played sound chunk", counter, end="\r")
    counter += 1


with AudiofileToWavStream("example_mixes/track3.mp3") as wavstream:
    sample = StreamingSample(wavstream, wavstream.name)
    hihat = Sample("samples/909_hihat_closed.wav").normalize()
    with Output(mixing="mix", frames_per_chunk=afmt.rate//10) as out:
        out.register_notify_played(played_callback)
        # as an example, we show the capability of real time mixing by adding some other samples in the timeline
        out.play_sample(hihat, delay=0.0)
        out.play_sample(hihat, delay=0.5)
        out.play_sample(hihat, delay=1.0)
        out.play_sample(hihat, delay=1.5)
        out.play_sample(sample, delay=2.0)
        out.wait_all_played()    # the mixer itself takes care of grabbing new data as needed

# ** Streaming a large mp3 file using the sequential mixing output **
# This is more efficient for just playing large music files,
# and can be done by simply playing sample chunks one after another.
print("Streaming mp3 using sequential mixer...")
with AudiofileToWavStream("example_mixes/track3.mp3") as wavstream:
Пример #30
0
"""
Plays a couple of samples each at a certain delayed time.
Written by Irmen de Jong ([email protected]) - License: GNU LGPL 3.
"""

import os
import time
from synthplayer.sample import Sample
from synthplayer.playback import Output


s1 = Sample("samples/909_clap.wav").normalize()
s2 = Sample("samples/909_hi_tom.wav").normalize()
s3 = Sample("samples/909_ride.wav").normalize()
s4 = Sample("samples/Drop the bass now.wav").normalize()
s5 = Sample("samples/909_snare_drum.wav").normalize()
s6 = Sample("samples/909_hihat_closed.wav").normalize()
s6_soft = s6.copy().amplify(0.2)

with Output(mixing="sequential", queue_size=3) as out:
    print("\nPlaying samples with sequential mixing mode.")
    print("This takes care of playing samples only if the previous one finished,")
    print("but you cannot mix any sounds. It's ideal for playback of a single sound source,")
    print("such as an audio clip or audio stream that comes in chunks.")
    out.play_sample(s1)
    out.play_sample(s2)
    out.play_sample(s3)
    out.play_sample(s4)
    out.play_sample(s5)
    out.play_sample(s5)
    out.play_sample(s5)