Esempio n. 1
0
    def load_samples(self):

        self.samples[KICK] = Sample(
            wave_file='jam/samples/kick.wav').normalize().make_32bit(
                scale_amplitude=True).lock()
        self.samples[SNARE] = Sample(
            wave_file='jam/samples/snare.wav').normalize().make_32bit(
                scale_amplitude=True).lock()
        self.samples[CLAP] = Sample(
            wave_file='jam/samples/clap.wav').normalize().make_32bit(
                scale_amplitude=True).lock()

        for note in scale:
            if note == C3:
                path = f'jam/samples/Lead C3.wav'
            elif note.islower():
                path = f'jam/samples/Lead {note.upper()}1.wav'
            else:
                path = f'jam/samples/Lead {note}2.wav'

            # print(path)
            self.samples[f'lead_{note}'] = Sample(
                wave_file=path).normalize().make_32bit(
                    scale_amplitude=True).lock()

            if note not in 'beBE' + C3:
                path = path[:-5] + '#' + path[-5:]
                self.samples[f'lead_{note}#'] = Sample(
                    wave_file=path).normalize().make_32bit(
                        scale_amplitude=True).lock()
Esempio n. 2
0
 def mix(self, verbose=True):
     """
     Mix all the patterns into a single result sample.
     """
     if not self.patterns:
         if verbose:
             print("No patterns to mix, output is empty.")
         return Sample()
     total_seconds = 0.0
     for p in self.patterns:
         bar = next(iter(p.values()))
         total_seconds += len(bar) * 60.0 / self.bpm / self.ticks
     if verbose:
         print("Mixing {:d} patterns...".format(len(self.patterns)))
     mixed = Sample().make_32bit()
     for index, timestamp, sample in self.mixed_samples(tracker=False):
         if verbose:
             print("\r{:3.0f} % ".format(timestamp / total_seconds * 100),
                   end="")
         mixed.mix_at(timestamp, sample)
     # chop/extend to get to the precise total duration (in case of silence in the last bars etc)
     missing = total_seconds - mixed.duration
     if missing > 0:
         mixed.add_silence(missing)
     elif missing < 0:
         mixed.clip(0, total_seconds)
     if verbose:
         print("\rMix done.")
     return mixed
Esempio n. 3
0
 def read_samples(self, instruments, samples_path):
     """Reads the sample files for the instruments."""
     self.instruments = {}
     for name, file in sorted(instruments.items()):
         self.instruments[name] = Sample(wave_file=os.path.join(
             samples_path, file)).normalize().make_32bit(
                 scale_amplitude=False).lock()
Esempio n. 4
0
 def region(self, line: str) -> Optional[Region]:
     region = Region()
     pairs = line.split()
     while pairs:
         variable, value = pairs[0].split("=")
         del pairs[0]
         if variable == "seq_position":
             region.seq = int(value)
         elif variable == "sample":
             if "\\" in value:
                 value = value.replace("\\", os.path.sep)
             if value:
                 filename = os.path.join(self._samples_location, value)
                 if not os.path.isfile(filename):
                     print("Warning: sample not found:", filename, file=sys.stderr)
                     return None
                 region.sample = Sample(filename, value)
                 region.sample.amplify(0.7)    # adjust base volume down to avoid clipping issues when mixing
                 region.sample.normalize()
                 self.total_sample_memory += len(region.sample) * region.sample.samplewidth * region.sample.nchannels
         elif variable == "lorand":
             if value.endswith("s"):
                 value = value[:-1]
             region.lo_rand = float(value)
         elif variable == "hirand":
             if value.endswith("s"):
                 value = value[:-1]
             region.hi_rand = float(value)
         else:
             raise IOError("invalid variable in region: "+variable)
     return region
Esempio n. 5
0
def main(track_file, outputfile=None, interactive=False):
    discard_unused = not interactive
    if interactive:
        repl = Repl(discard_unused_instruments=discard_unused)
        repl.do_load(track_file)
        repl.cmdloop(
            "Interactive Samplebox session. Type 'help' for help on commands.")
    else:
        song = Song()
        song.read(track_file, discard_unused_instruments=discard_unused)
        with Output(mixing="sequential", queue_size=1) as out:
            if out.supports_streaming:
                # mix and stream output in real time
                print("Mixing and streaming to speakers...")
                samples = out.normalized_samples(song.mix_generator())
                for s in samples:
                    out.play_sample(s)
                out.wait_all_played()
                print("\r                          ")
            else:
                # output can't stream, fallback on mixing everything to a wav
                print(
                    "(Sorry, streaming audio is not possible, install one of the audio libraries that supports that)"
                )
                song.mix(outputfile)
                mix = Sample(wave_file=outputfile)
                print("Playing sound...")
                out.play_sample(mix)
                out.wait_all_played()
Esempio n. 6
0
    def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                           stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
        notes = itertools.cycle(self.title_music) if repeat else iter(self.title_music)
        attack, decay, sustain, release = self.adsr_times

        num_frames = chunksize // synth_params.norm_samplewidth // synth_params.norm_nchannels
        sample_residue = Sample(None, nchannels=2)
        for v1, v2 in notes:
            if stopcondition():
                break
            vf1 = self.music_freq_table[v1]
            vf2 = self.music_freq_table[v2]
            osc1 = FastTriangle(vf1 * _sidfreq, amplitude=0.5)
            osc2 = FastTriangle(vf2 * _sidfreq, amplitude=0.5)
            f1 = EnvelopeFilter(osc1, attack, decay, sustain, 1.0, release, stop_at_end=True)
            f2 = EnvelopeFilter(osc2, attack, decay, sustain, 1.0, release, stop_at_end=True)
            sample1 = Sample.from_oscillator(f1, 1, synth_params.norm_samplerate)       # length is max. 1 second
            sample2 = Sample.from_oscillator(f2, 1, synth_params.norm_samplerate)       # length is max. 1 second
            sample_residue.join(sample1.stereo_mix(sample2, "R"))
            while len(sample_residue) >= num_frames:
                # TODO optimize this a bit by not using Samples but instead by looping over a memoryview of the frames (just like the super class does)
                yield sample_residue.view_frame_data()[:chunksize]
                sample_residue = Sample.from_raw_frames(sample_residue.view_frame_data()[chunksize:],
                                                        sample_residue.samplewidth, sample_residue.samplerate, sample_residue.nchannels)
        if len(sample_residue):
            yield sample_residue.view_frame_data()
Esempio n. 7
0
 def __init__(
         self, samples_to_load: Dict[str, Tuple[Union[str, Sample],
                                                int]]) -> None:
     global samples
     samples.clear()
     self.output = Output(mixing="mix")
     if any(isinstance(smp, str) for smp, _ in samples_to_load.values()):
         print("Loading sound files...")
     for name, (filename, max_simultaneously) in samples_to_load.items():
         if isinstance(filename, Sample):
             samples[name] = filename
         else:
             data = pkgutil.get_data(__name__, "sounds/" + filename)
             if data:
                 tmp = tempfile.NamedTemporaryFile(delete=False,
                                                   suffix=".ogg")
                 try:
                     tmp.write(data)
                     tmp.close()
                     samples[name] = Sample(
                         streaming.AudiofileToWavStream(tmp.name),
                         name).stereo()
                 finally:
                     os.remove(tmp.name)
             else:
                 raise SystemExit("corrupt package; sound data is missing")
         self.output.set_sample_play_limit(name, max_simultaneously)
     print("Sound API initialized:", self.output.audio_api)
Esempio n. 8
0
 def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                        stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
     assert repeat, "cover is a repeating sound"
     sample_residue = Sample(nchannels=2)
     while not stopcondition():
         freq = random.randint(0x6000, 0xd800)
         osc = FastTriangle(freq * _sidfreq, amplitude=0.7)
         filtered = EnvelopeFilter(osc, 0.002, 0.02, 0.0, 0.5, 0.02, stop_at_end=True)
         sample_residue = yield from self.render_samples(filtered, sample_residue, chunksize, return_residue=True)
Esempio n. 9
0
 def mix_generator(self):
     """
     Returns a generator that produces samples that are the chronological
     chunks of the final output mix. This avoids having to mix it into one big
     output mix sample.
     """
     if not self.patterns:
         yield Sample()
         return
     total_seconds = 0.0
     for p in self.patterns:
         bar = next(iter(p.values()))
         total_seconds += len(bar) * 60.0 / self.bpm / self.ticks
     mixed_duration = 0.0
     samples = self.mixed_samples()
     # get the first sample
     index, previous_timestamp, sample = next(samples)
     mixed = Sample().make_32bit()
     mixed.mix_at(previous_timestamp, sample)
     # continue mixing the following samples
     for index, timestamp, sample in samples:
         trigger_duration = timestamp - previous_timestamp
         overflow = None
         if mixed.duration < trigger_duration:
             # fill with some silence to reach the next sample position
             mixed.add_silence(trigger_duration - mixed.duration)
         elif mixed.duration > trigger_duration:
             # chop off the sound that extends into the next sample position
             # keep this overflow and mix it later!
             overflow = mixed.split(trigger_duration)
         mixed_duration += mixed.duration
         yield mixed
         mixed = overflow if overflow else Sample().make_32bit()
         mixed.mix(sample)
         previous_timestamp = timestamp
     # output the last remaining sample and extend it to the end of the duration if needed
     timestamp = total_seconds
     trigger_duration = timestamp - previous_timestamp
     if mixed.duration < trigger_duration:
         mixed.add_silence(trigger_duration - mixed.duration)
     elif mixed.duration > trigger_duration:
         mixed.clip(0, trigger_duration)
     mixed_duration += mixed.duration
     yield mixed
Esempio n. 10
0
 def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                        stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
     assert not repeat
     fm = Linear(0, -2.3e-5)
     osc = Triangle(1567.98174, fm_lfo=fm)
     filtered = EnvelopeFilter(osc, 0.1, 0.3, 1.5, 1.0, 0.07, stop_at_end=True)
     ampmod = SquareH(10, 9, amplitude=0.5, bias=0.5)
     modulated = AmpModulationFilter(filtered, ampmod)
     sample_residue = Sample(nchannels=2)
     yield from self.render_samples(modulated, sample_residue, chunksize, stopcondition=stopcondition)
Esempio n. 11
0
 def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                        stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
     # generate a new random diamond sound everytime this is played
     freq = random.randint(0x8600, 0xfeff)
     freq &= 0b0111100011111111
     freq |= 0b1000011000000000
     osc = FastTriangle(freq * _sidfreq, amplitude=0.7)
     filtered = EnvelopeFilter(osc, 0.002, 0.006, 0.0, 0.7, 0.6, stop_at_end=True)
     sample_residue = Sample(nchannels=2)
     yield from self.render_samples(filtered, sample_residue, chunksize, stopcondition=stopcondition)
Esempio n. 12
0
 def do_mix(self, args):
     """mix and play all patterns of the song"""
     if not self.song.pattern_sequence:
         print("Nothing to be mixed.")
         return
     output = "__temp_mix.wav"
     self.song.mix(output)
     mix = Sample(wave_file=output)
     print("Playing sound...")
     self.out.play_sample(mix)
     os.remove(output)
Esempio n. 13
0
 def set_effect(self, effect_nr, filename):
     try:
         with AudiofileToWavStream(filename, hqresample=hqresample) as wav:
             sample = Sample(wav)
             self.effects[effect_nr] = sample
     except IOError as x:
         print("Can't load effect sample:", x)
     else:
         for button in self.buttons:
             if button.effect_nr == effect_nr:
                 button["state"] = tk.NORMAL
                 button["text"] = os.path.splitext(os.path.basename(filename))[0]
                 break
Esempio n. 14
0
 def chunked_frame_data(self, chunksize: int, repeat: bool=False,
                        stopcondition: Callable[[], bool]=lambda: False) -> Generator[memoryview, None, None]:
     assert not repeat
     sample_residue = Sample(nchannels=2)
     for n in range(0, 180):
         if stopcondition():
             break
         freq = 0x8000 - n * 180
         osc = FastTriangle(freq * _sidfreq, amplitude=0.8)
         filtered = EnvelopeFilter(osc, 0.002, 0.004, 0.0, 0.6, 0.02, stop_at_end=True)
         sample_residue = yield from self.render_samples(filtered, sample_residue, chunksize, return_residue=True)
     if len(sample_residue) > 0:
         yield sample_residue.view_frame_data()
Esempio n. 15
0
def stereo_pan():
    synth = WaveSynth()
    # panning a stereo source:
    wave = Sample("samples/SOS 020.wav").clip(6, 12).normalize().fadein(0.5).fadeout(0.5).lock()
    osc = Sine(0.4)
    panning = wave.copy().pan(lfo=osc).fadeout(0.2)
    with Output.for_sample(panning) as out:
        out.play_sample(panning)
        out.wait_all_played()
    # panning a generated mono source:
    fm = Sine(0.5, 0.1999, bias=0.2)
    wave = synth.triangle(220, 5, fm_lfo=fm).lock()
    osc = Sine(0.4)
    panning = wave.copy().pan(lfo=osc).fadeout(0.2)
    with Output.for_sample(panning) as out:
        out.play_sample(panning)
        out.wait_all_played()
Esempio n. 16
0
    def mix(self, song):

        # kick = ('x-x-' * 4)[:len(melody)]
        # snare = ('-x-x' * 4)[:len(melody)]

        # total_seconds = len(melody) * 60.0 / self.bpm / self.ticks
        mixed = Sample().make_32bit()
        time_per_index = 60.0 / song.bpm / song.ticks

        notes = max(len(song.melody), len(song.drums))

        for i in range(notes):

            timestamp = time_per_index * i

            try:
                chord = song.melody[i]
            except (IndexError, TypeError) as e:
                pass
            else:
                if chord != REST_NOTE:
                    if isinstance(chord, list):
                        group = chord[:5]
                    else:
                        group = [chord]
                    for note in group:
                        sample = self.samples[f'lead_{note}']
                        if len(chord) > 1:  # 2
                            # volume = 1/(len(chord))
                            # volume = (1 / len(chord)) * 1.75
                            # volume = 1 - math.log10(len(chord) - 1)
                            # if len(chord) == 2:
                            #     volume = 0.7
                            # volume = 0.5
                            # print('notes/volume', len(chord), volume)

                            volume = [1, 0.9, 0.65, 0.6, 0.5][len(chord) - 1]

                            mixed.mix_at(timestamp, sample.at_volume(volume))

                            # .mix_at(timestamp, sample.at_volume(1/len(chord)-1))
                        # elif len(chord) == 2:
                        #     mixed.mix_at(timestamp, sample.at_volume(0.75))
                        else:
                            mixed.mix_at(timestamp, sample)

            try:
                beat = song.drums[i]
            except (IndexError, TypeError) as e:
                pass
            else:
                if beat != REST_NOTE:
                    for instrument in beat[:5]:
                        mixed.mix_at(timestamp, self.samples[instrument])

            # for instrument, pattern in song.drums.items():
            #     try:
            #         char = pattern[i]
            #     except (IndexError, TypeError) as e:
            #         continue
            #
            #     if char == instrument:
            #         mixed.mix_at(timestamp, self.samples[instrument])

        # chop/extend to get to the precise total duration (in case of silence in the last bars etc)
        # missing = total_seconds - mixed.duration
        # print('missing', missing)
        # if missing > 0:
        #     mixed.add_silence(missing)
        # elif missing < 0:
        #     mixed.clip(0, total_seconds)

        return mixed
# Playing a long sample or music file doesn't map nicely to this pattern.
# Instead, just play it _as a single huge sample_ where the sample itself
# takes care of dynamically producing its audio data chunks.
print("Streaming mp3 using realtime mixer...")
counter = 1


def played_callback(sample):
    global counter
    print(" played sound chunk", counter, end="\r")
    counter += 1


with AudiofileToWavStream("example_mixes/track3.mp3") as wavstream:
    sample = StreamingSample(wavstream, wavstream.name)
    hihat = Sample("samples/909_hihat_closed.wav").normalize()
    with Output(mixing="mix", frames_per_chunk=afmt.rate//10) as out:
        out.register_notify_played(played_callback)
        # as an example, we show the capability of real time mixing by adding some other samples in the timeline
        out.play_sample(hihat, delay=0.0)
        out.play_sample(hihat, delay=0.5)
        out.play_sample(hihat, delay=1.0)
        out.play_sample(hihat, delay=1.5)
        out.play_sample(sample, delay=2.0)
        out.wait_all_played()    # the mixer itself takes care of grabbing new data as needed

# ** Streaming a large mp3 file using the sequential mixing output **
# This is more efficient for just playing large music files,
# and can be done by simply playing sample chunks one after another.
print("Streaming mp3 using sequential mixer...")
with AudiofileToWavStream("example_mixes/track3.mp3") as wavstream:
Esempio n. 18
0
"""
Plays a couple of samples each at a certain delayed time.
Written by Irmen de Jong ([email protected]) - License: GNU LGPL 3.
"""

import os
import time
from synthplayer.sample import Sample
from synthplayer.playback import Output


s1 = Sample("samples/909_clap.wav").normalize()
s2 = Sample("samples/909_hi_tom.wav").normalize()
s3 = Sample("samples/909_ride.wav").normalize()
s4 = Sample("samples/Drop the bass now.wav").normalize()
s5 = Sample("samples/909_snare_drum.wav").normalize()
s6 = Sample("samples/909_hihat_closed.wav").normalize()
s6_soft = s6.copy().amplify(0.2)

with Output(mixing="sequential", queue_size=3) as out:
    print("\nPlaying samples with sequential mixing mode.")
    print("This takes care of playing samples only if the previous one finished,")
    print("but you cannot mix any sounds. It's ideal for playback of a single sound source,")
    print("such as an audio clip or audio stream that comes in chunks.")
    out.play_sample(s1)
    out.play_sample(s2)
    out.play_sample(s3)
    out.play_sample(s4)
    out.play_sample(s5)
    out.play_sample(s5)
    out.play_sample(s5)