Пример #1
0
    def test_sub_soundbuffers(self):
        snd = dsp.buffer([1, 2, 3])
        self.assertEqual(len(snd), 3)
        self.assertEqual(snd - 2, dsp.buffer([-1, 0, 1]))
        self.assertEqual(snd, dsp.buffer([1, 2, 3]))

        self.assertEqual(snd - dsp.buffer([1, 3, 5]), dsp.buffer([0, -1, -2]))
        self.assertEqual(snd, dsp.buffer([1, 2, 3]))

        self.assertEqual(dsp.buffer([1, 3, 5]) - snd, dsp.buffer([0, 1, 2]))
        self.assertEqual(snd, dsp.buffer([1, 2, 3]))

        snd -= 2
        self.assertEqual(snd, dsp.buffer([-1, 0, 1]))
Пример #2
0
    def test_add_soundbuffers(self):
        snd = dsp.buffer([1,2,3])
        self.assertEqual(len(snd), 3)
        self.assertEqual(snd + 2, dsp.buffer([3,4,5]))
        self.assertEqual(snd, dsp.buffer([1,2,3]))

        self.assertEqual(snd + dsp.buffer([1,3,5]), dsp.buffer([1,2,3,1,3,5]))
        self.assertEqual(snd, dsp.buffer([1,2,3]))

        self.assertEqual(dsp.buffer([1,3,5]) + snd, dsp.buffer([1,3,5,1,2,3]))
        self.assertEqual(snd, dsp.buffer([1,2,3]))

        snd += 2
        self.assertEqual(snd, dsp.buffer([3,4,5]))
Пример #3
0
    def test_mul_soundbuffers(self):
        snd = dsp.buffer([1, 2, 3])
        self.assertEqual(len(snd), 3)
        self.assertEqual(snd * 2, dsp.buffer([2, 4, 6]))
        self.assertEqual(snd, dsp.buffer([1, 2, 3]))

        self.assertEqual(snd * dsp.buffer([1, 3, 5]), dsp.buffer([1, 6, 15]))
        self.assertEqual(snd, dsp.buffer([1, 2, 3]))

        self.assertEqual(dsp.buffer([1, 3, 5]) * snd, dsp.buffer([1, 6, 15]))
        self.assertEqual(snd, dsp.buffer([1, 2, 3]))

        snd *= 2
        self.assertEqual(snd, dsp.buffer([2, 4, 6]))
Пример #4
0
def rank_score(r):
    params = rToParams(r)
    out = Synth(params)
    out = dsp.buffer(length=1, channels=1)
    s = Synth(params)
    out.dub(s.buff, params.start)
    a = memToAud(out)
    try:
        im = audToImage(a, 128)
    except:
        return [], False
    z = librosa.util.normalize(im)

    z = (((z - z.min()) / (z.max() - z.min())) * 253).astype(np.uint8)
    zi = Image.fromarray(z)
    z = t(zi)
    images = z.reshape([1, 1, 128, 128])
    dimg = images.to(device)
    outputs = cnn(dimg)
    _, predicted = torch.max(outputs, 1)
    o = outputs.cpu().detach().numpy()[0]
    o_norm = o - min(o)
    o_norm = o_norm / sum(o_norm)
    score_dict = dict(zip(classes, o_norm))
    #ranks based on score
    ranks = 1 + len(classes_ranked) - ss.rankdata(o_norm)
    rank_dict = dict(zip(classes_ranked, ranks))
    df = pd.concat([
        pd.DataFrame.from_dict([rank_dict]),
        pd.DataFrame.from_dict([score_dict]),
        paramToDF([params])
    ],
                   axis=1)

    return df.iloc[0], True
Пример #5
0
    def make_melody(self, sequence, name):
        out = dsp.buffer()

        pos = 0
        beat = 0.05
        pause = 0.5

        for keys in sequence:

            notes = self.keys_to_notes(keys=keys)
            print("Playing the following notes: {}".format(notes))

            frequencies = [
                self.get_frequency(key=tone[:-1], octave=int(tone[-1]))
                for tone in notes
            ]
            print("Corresponding to the frequencies: {}".format(frequencies))

            speeds = self.get_speeds(frequencies)

            for speed in speeds:
                # Create a pitch-shifted copy of the original guitar
                tone = self.base_tone.speed(speed)

                # Dub it into the output buffer at the current position in seconds
                out.dub(tone, pos)

                # Now move the write position forward <beat> seconds
                pos += beat
            pos += pause

        # Save this output buffer
        out.write('../output/harmonic/renders/melody_{}.wav'.format(name))
        pass
Пример #6
0
def rush(snd):
    out = dsp.buffer()
    length = random.randint(4410, 44100 * 3)
    numbeats = random.randint(16, 64)
    reverse = random.choice([True, False])
    wintype = random.choice(
        ['sine', 'tri', 'kaiser', 'hann', 'blackman', None])
    wavetable = None if wintype is not None else [
        random.random() for _ in range(random.randint(3, 10))
    ]
    pattern = rhythm.curve(numbeats=numbeats,
                           wintype=wintype,
                           length=length,
                           reverse=reverse,
                           wavetable=wavetable)
    minspeed = random.triangular(0.15, 2)

    pan = random.random()

    for onset in pattern:
        hit = snd * random.triangular(0, 0.5)
        hit = hit.speed(random.triangular(minspeed, minspeed + 0.08))
        hit = hit.pan(pan)
        out.dub(hit, onset)

    return out
Пример #7
0
def dub2(songid1, songid2, dist_value, posi, var):
    out = dsp.buffer()
    dubhead = 0
    #filename = Song.query.filter_by(id=songid1).first().filename
    #audio = dsp.read(os.path.join(app.instance_path, filename))
    labels2 = [i.note for i in Beat.query.filter_by(song_id=songid1)]
    ar = dist(dist_value, posi)

    for e, i in enumerate(labels2):
        while dubhead < 60:
            rstart = [s.start for s in Beat.query.filter_by(note=i)]
            rend = [s.end for s in Beat.query.filter_by(note=i)]
            source = [s.song_id for s in Beat.query.filter_by(note=i)]
            rpool = [(rstart[i], rend[i], source[i])
                     for i in range(0, len(rstart))]

            sl = random.choice(rpool)
            bl = int(sl[1] - sl[0])
            l = (sl[1] + (bl * np.random.choice(16, p=ar)))
            filename = Song.query.filter_by(id=sl[2]).first().filename
            audio = dsp.read(os.path.join(app.instance_path, filename))
            a = audio[sl[0]:l]
            stime = librosa.samples_to_time(len(a), sr=44100)
            #var = 0.5
            a = a.taper((stime / 2) * var)
            out.dub(a, dubhead)
            dubhead += stime - ((stime / 2) * var)
    return out
Пример #8
0
def makelayer(speeds):
    snd = dsp.read('harps/harp_006.wav')
    length = int(60 * 1000 * sr) * 2
    out = dsp.buffer(length=1)
    speed = random.choice(speeds) * random.choice([0.125, 0.25, 0.5, 1])
    s = snd.speed(speed)
    panpos = random.random()
    pulselength = int(random.triangular(1, 80) * sr)
    numpulses = length // pulselength
    tablesize = numpulses // 100
    ptable = interpolation.linear([ random.random() for _ in range(tablesize) ], numpulses)
    ftable = wavetables.window('line', numpulses)

    print('rendering speed: %s pan: %s len: %s num: %s' % (speed, panpos, pulselength, numpulses))
    osc = oscs.Osc(wavetable='tri')

    for i in range(numpulses-1):

        start = ptable[i] * len(s) - pulselength
        bit1 = s.cut(start if start > 0 else 0, pulselength)

        #bit2 = osc.play(length=pulselength, freq=100 * (ftable[i] + 1), amp=0.5)
        #bit1 = bit1 * bit2

        bit1 = bit1.env('sine')

        out.dub(bit1.pan(panpos) * 0.5, i * (pulselength//2))

    print('done      speed: %s pan: %s len: %s num: %s' % (speed, panpos, pulselength, numpulses))

    return out
Пример #9
0
    def offline_render(self):
        print('BEGIN RENDER')
        self.rendering = True
        notes = []
        maxlength = 0
        for notelane in self.lanes.notes:
            for noteindex, note in enumerate(notelane.notes):
                notes += [(note.onset, note.length, note.freq)]
                maxlength = max(maxlength, note.onset + note.length)

        out = dsp.buffer(length=maxlength)

        # load instrument
        manager = mp.Manager()
        bus = manager.Namespace()
        bus.stop_all = manager.Event() # voices
        bus.shutdown_flag = manager.Event() # render & analysis processes
        bus.stop_listening = manager.Event() # midi listeners
        instrument = orc.load_instrument('default', 'orc/pianotone.py', bus)

        for note in notes:
            params = {'length': float(note[1]), 'freq': float(note[2])}
            ctx = instrument.create_ctx(params)
            generator = instrument.renderer.play(ctx)
            for snd in generator:
                out.dub(snd, float(note[0]))

        # render
        out.write('pianoroll_render.wav')
        self.sndfile = SoundLoader.load('pianoroll_render.wav')
        if self.sndfile:
            self.sndfile.play()
        self.playhead_clock = Clock.schedule_interval(self.update_playhead, 0.01)
        self.rendering = False
        print('DONE RENDERING')
Пример #10
0
    def test_create_bar(self):
        length = 1
        out = dsp.buffer(length=length)

        params = [(0.21, 1, 0), (0.3, 0.9, 0.5), (0.22, 0.8, 1)]

        for beat, inc, pan in params:
            pos = 0
            stiffness = 280
            while pos < length:
                duration = dsp.rand(1, 4)
                decay = dsp.rand(0.1, 10)
                velocity = dsp.rand(500, 2000)
                barpos = dsp.rand(0.2, 0.8)
                width = dsp.rand(0.1, 0.8)
                stiffness = max(1, stiffness)

                note = Bar(decay=decay, stiffness=stiffness, velocity=velocity, barpos=barpos, width=width).play(duration).env('hannout').pan(pan)
                out.dub(note, pos)
                pos += beat
                stiffness -= inc

        out = fx.norm(out, 1)

        out.write('tests/renders/osc_bar.wav')
Пример #11
0
def mix(ingredients, length):
    out = dsp.buffer(length=length)
    for ingredient in ingredients:
        pos = dsp.rand(0, length - ingredient.dur)
        out.dub(ingredient, pos)

    out = fx.norm(out, 0.5)

    return out
Пример #12
0
def clap(amp, length):
    if amp == 0:
        return dsp.buffer(length=length)

    # Two layers of noise: lowmid and high
    out = dsp.mix([ bln(int(length * 0.2), 600, 1200), bln(int(length * 0.2), 7000, 9000) ])
    out = out.env('phasor').pad(end=length - out.dur)

    return out
Пример #13
0
def makesnares():
    out = dsp.buffer(length=1)
    beat = length
    numbeats = 16 * 4 * lenmult
    pat = rhythm.topattern('..x...x...x...xx..x...x...x....x')
    onsets = rhythm.onsets(pat, beat, numbeats)

    for i, pos in enumerate(onsets):
        if random.random() > 0.75:
            s = dsp.buffer(length=1)
            p = rhythm.curve(numbeats=random.randint(4, 10), wintype='random', length=beat*random.randint(2,3))
            for o in p:
                s.dub(snare.speed(random.triangular(0.9,1.1)) * random.random(), o)

            out.dub(s, pos)

        out.dub(snare, pos)

    return out
Пример #14
0
def make(drum, pat, lengths):
    events = [ [pat[i], lengths[i]] for i in range(len(pat)) ]

    if len(events) > 0:
        out = dsp.join([ drum(event[0] * 0.3, event[1]) for event in events ])
    else: 
        print(lengths, pat)
        out = dsp.buffer(length=sum(lengths))

    return out
Пример #15
0
    def test_dub_overflow(self):
        sound = dsp.read('tests/sounds/guitar1s.wav')
        out = dsp.buffer()

        numdubs = 3
        maxpos = 4
        for _ in range(numdubs):
            pos = random.triangular(0, maxpos)
            out.dub(sound, pos)

        self.assertTrue(len(out) <= (maxpos * out.samplerate) + len(sound))
Пример #16
0
def makekicks():
    out = dsp.buffer(length=1)
    beat = length
    numbeats = 16 * 4 * lenmult
    pat = rhythm.topattern('x...')
    onsets = rhythm.onsets(pat, beat, numbeats)

    for i, pos in enumerate(onsets):
        out.dub(kick.speed(random.triangular(0.8,1)) * 1.4, pos)

    return out
Пример #17
0
def snare(amp, length):
    if amp == 0:
        return dsp.buffer(length=length)

    # Two layers of noise: lowmid and high
    out = dsp.mix([ bln(int(length * 0.2), 700, 3200, 'impulse'), bln(int(length * 0.01), 7000, 9000) ])
    
    out = out.env('phasor')
    out = out.pad(end=length - out.dur)

    return out
Пример #18
0
    def test_create_mono_buffer_from_wavetable(self):
        wt = dsp.wt('sine', wtsize=4096)
        self.assertTrue(len(wt) == 4096)

        snd = dsp.buffer(wt)
        self.assertTrue(len(snd) == 4096)
        self.assertTrue(snd[100][0] != 0)

        snd = SoundBuffer(wt)
        self.assertTrue(len(snd) == 4096)
        self.assertTrue(snd[100][0] != 0)
Пример #19
0
 def test_create_tukey(self):
     length = 10
     shape = dsp.win(shapes.win('sine', length=3), 0, 0.5)
     chord = tune.chord('i9', octave=2)
     out = dsp.buffer(length=length)
     for freq in chord:
         freq = dsp.wt('sinc', freq, freq*4)
         l = Tukey(freq=freq, shape=shape).play(length)
         l = l.pan(dsp.rand())
         out.dub(l)
     out = fx.norm(out, 0.8)
     out.write('tests/renders/osc_tukey.wav')
Пример #20
0
    def bass(amp, length, oct=2):
        if amp == 0:
            return dsp.buffer(length=length)

        bass_note = dsp.choice(scale) * 0.25

        stack = Waveset(rmx, limit=dsp.randint(5, 20), offset=dsp.randint(0, 100))
        stack.normalize()
        out = oscs.Pulsar2d(stack, windows=['sine'], freq=bass_note).play(length) * dsp.rand(0.02, 0.2)
        out = fx.lpf(out, bass_note*2)

        return out.env('hannout').taper(dsp.MS*10)
Пример #21
0
def makehats():
    out = dsp.buffer(length=1)
    beat = length // 2
    numbeats = 16 * 8 * lenmult
    pat = rhythm.topattern('x xxx xx')
    onsets = rhythm.onsets(pat, beat, numbeats)
    onsets = rhythm.swing(onsets, 0.5, beat)

    for i, pos in enumerate(onsets):
        out.dub(hat.speed(random.triangular(1.5, 2)) * random.triangular(0.35, 0.45), pos)

    return out
Пример #22
0
def render(ttfeatures, ttfit, db, freq=1.0):
    out = dsp.buffer(length=512 * ttfeatures.shape[0] / sr, samplerate=sr)
    skip = sr // int((1 / freq) * 512)
    steps = ttfit.shape[0] // skip
    for i in range(steps):
        j = i * skip
        index = int(ttfit[j])
        s = get_samples(db["filenames"][index])
        t = j * 512 / sr
        # print(t,index)
        out.dub(s, t)
    return out
Пример #23
0
def arp(i):
    cluster = dsp.buffer()
    length = random.randint(44100, 44100 + 22050)
    numnotes = random.randint(3, 12)
    onsets = rhythm.curve(numnotes, dsp.RND, length)
    chord = chords[i % len(chords)]
    freqs = tune.chord(chord, octave=random.randint(1, 3))
    for i, onset in enumerate(onsets):
        freq = freqs[i % len(freqs)]
        note = samp.play(freq)
        note = note.pan(random.random())
        note *= random.triangular(0, 0.125)
        cluster.dub(note, onset / cluster.samplerate)

    return cluster
Пример #24
0
def blips(length, pos, total_length):
    print('BLIPS', length, pos)
    notes = [ rhodes.rhodes(dsp.rand(4, 7), freq, 0.3) for freq in scale ]

    the_blip = notes[0].speed(2.0 * dsp.randint(1, 3)) * 0.4
    blip = dsp.mix([ the_blip, notes[0].speed(1.5 * dsp.randint(1, 4)) * 0.4 ])

    out = dsp.buffer(length=length)
    for _ in range(dsp.randint(2, 6)):
        ba = blip.cut(dsp.rand(0, blip.dur / 4), length / 2).pad(dsp.rand(0, length))
        bb = blip.cut(dsp.rand(0, blip.dur / 4), length / 2).pad(dsp.rand(0, length))
        b = dsp.mix([ba.pan(dsp.rand()), bb.pan(dsp.rand())]).taper(0.01)
        b = fx.crush(b)
        b = mixdrift(b)
        out.dub(b)

    return out
Пример #25
0
def dub(df1, df2, audio):
    out = dsp.buffer()
    dubhead = 0
    while dubhead < 210
        for e, i in enumerate(df2.labels[:1200]):
            rpool = list(df1['startstop'][df1['labels'] == i])
            try:
                sl = random.choice(rpool)
            except:
                print(i)
            if audio[sl[0]:sl[1]+int((sl[1]-sl[0])/2)]:
                a = audio[sl[0]:sl[1]+int((sl[1]-sl[0])/2)]
            else:
                a = audio[sl[0]:sl[1]]
            out.dub(a, dubhead)
            dubhead += librosa.samples_to_time((sl[1]-sl[0]), sr=44100)
            print("done dubbing number " + str(e) + " of " + str(len(df2.labels)))
    return out
Пример #26
0
def makesynths():
    out = dsp.buffer(length=1)
    beat = length // 2
    numbeats = 16 * 8 * lenmult
    pat = rhythm.topattern('x..x....')
    onsets = rhythm.onsets(pat, beat, numbeats)

    for i, pos in enumerate(onsets):
        chord = chords[i % len(chords)]
        freqs = tune.chord(chord)

        for freq in freqs:
            lfo = random.random() * i * 0.5 
            note = makenote(random.randint(length // 4, length * 3), freq * 0.25 * 2**random.randint(0, 4), lfo, factor=random.randint(1, 10))
            note = note.env('phasor')
            note = note.pan(random.random())
            out.dub(note * 0.65, pos)

    return out
Пример #27
0
def makearps():
    out = dsp.buffer(length=1)
    beat = length // 2
    numbeats = 16 * 8 * lenmult
    pat = rhythm.topattern('xxxxxxxx')
    onsets = rhythm.onsets(pat, beat, numbeats)

    osc = oscs.Osc('tri')

    for i, pos in enumerate(onsets):
        chord = chords[i//4 % len(chords)]
        freqs = tune.chord(chord)
        freq = freqs[i % len(freqs)]
        amp = random.triangular(0.1, 0.2)
        pw = random.triangular(0.15, 1)

        note = osc.play(beat, freq * 2, amp * 0.25, pw)
        note = note.env('phasor')
        out.dub(note, pos)

    return out
Пример #28
0
def makeblips():
    out = dsp.buffer(length=1)
    beat = length // 2
    numbeats = 16 * 8 * lenmult
    pat = rhythm.topattern('x..x..x.')
    onsets = rhythm.onsets(pat, beat, numbeats)

    for i, pos in enumerate(onsets):
        chord = chords[i*2 % len(chords)]
        freqs = tune.chord(chord)

        lfo = random.random() * 2
        freq = freqs[0] * 0.5
        if i % 8 == 0:
            freq *= 2

        note = makenote(length, freq, lfo, factor=random.randint(1, 10))
        note = note.env('phasor')
        out.dub(note * 2, pos)

    return out
Пример #29
0
def hihat(amp, length):
    if amp == 0:
        return dsp.buffer(length=length)

    def hat(length):
        lowf = dsp.rand(6000, 11000)
        highf = dsp.rand(11000, 17000)

        if dsp.rand() > 0.5:
            length *= 0.05
        
        out = bln(length, lowf, highf)
        out = out.env(dsp.choice(['rsaw', 'phasor', 'hannout']))

        return out

    if dsp.rand() > 0.5:
        out = dsp.join([ hat(length / 2), hat(length / 2) ])
    else:
        out = hat(length)

    return out
Пример #30
0
    def test_fft_transform(self):
        snd = dsp.read('tests/sounds/guitar1s.wav')
        snd2 = dsp.read('tests/sounds/LittleTikes-B1.wav').cut(0, 1)
        mod = dsp.buffer(dsp.win('sine', wtsize=len(snd))).remix(1).remix(2)

        # Transform
        real1, imag1 = fft.transform(snd)
        real2, imag2 = fft.transform(snd2)

        # Do stuff
        imag = real1 * real2
        real = imag1 * imag2

        mag, arg = fft.to_polar(real, imag)

        #mag = fx.lpf(mag, 100)
        real, imag = fft.to_xy(mag, arg)

        # Inverse Transform
        out = fft.itransform(real, imag)
        out = fx.norm(out, 1)
        out.write('tests/renders/fft_transform.wav')
Пример #31
0
    def test_onsets(self):
        snd = dsp.read('tests/sounds/rain.wav')

        onsets = mir.onsets(snd, 'specflux')
        self.assertEqual(len(onsets), 7)

        onsets = mir.onsets(snd, 'specflux', seconds=False)
        print(onsets)
        self.assertEqual(len(onsets), 7)

        segments = mir.segments(snd, 'specflux')
        self.assertEqual(len(segments), 7)

        out = dsp.buffer(length=7)
        pos = 0
        count = 1
        for segment in segments:
            segment = fx.norm(segment.env('pluckout').taper(0.05), 1)
            segment.write('tests/renders/mir_segment%02d.wav' % count)
            out.dub(segment, pos)
            pos += 1
            count += 1
        out.write('tests/renders/mir_segments.wav')
Пример #32
0
    pluckout
) * 0.5  # Also multiply by 0.5 to reduce the amplitude of the signal by half
hat.write('docs/tutorials/renders/002-plucked-hat.flac')


def makehat(length=dsp.MS * 80):
    lowhz = dsp.win('rnd', 9000, 11000)
    highhz = dsp.win('rnd', 12000, 14000)
    return noise.bln('sine', length, lowhz, highhz).env(pluckout) * 0.5


lfo = dsp.win('sinc', 0.1,
              1)  # Hat lengths between 100ms and 1s over a sinc window
lfo.graph('docs/tutorials/figures/002-sinc-win.png', label='sinc window')

out = dsp.buffer(length=30)

elapsed = 0
while elapsed < 30:
    pos = elapsed / 30  # position in the buffer between 0 and 1
    hatlength = lfo.interp(
        pos
    )  # Sample the current interpolated position in the curve to get the hat length
    hat = makehat(hatlength)
    out.dub(
        hat, elapsed
    )  # Finally, we dub the hat into the output buffer at the current time
    elapsed += 0.5  # and move our position forward again a half second so we can do it all again!

out.write('docs/tutorials/renders/002-hats-on-ice.flac')
Пример #33
0
    return osc.play(length)

def makebass(length, freq, lfo=0.5, amp=0.05, factor=10):
    wavetable = 'square'
    wtsize = 4096
    ftable = [ v * factor + 1 for v in wavetables.wavetable('random', wtsize) ]
    factors = [ v * ftable[i] for i, v in enumerate(wavetables.wavetable('sine', wtsize)) ]
    osc = oscs.Fold(wavetable, factors, freq, lfo, amp)
    return osc.play(length)


chords = []
for c in 'I I6 IV6 IV69 I I6 IV6 IV69 iii vi ii7 V11'.split(' '):
    chords += [ c ] * 4

out = dsp.buffer(length=1)
length = 44100 // 4

lenmult = 4

hat = dsp.read('manys/many_300.wav')
snare = dsp.read('manys/many_400.wav')
kick = dsp.read('manys/many_500.wav')
kick = kick.speed(0.8)

def makesynths():
    out = dsp.buffer(length=1)
    beat = length // 2
    numbeats = 16 * 8 * lenmult
    pat = rhythm.topattern('x..x....')
    onsets = rhythm.onsets(pat, beat, numbeats)
Пример #34
0
import random
import time
from pippi import dsp, sampler, tune, rhythm
import os

PATH = os.path.dirname(os.path.realpath(__file__))
print(__file__)
start_time = time.time()

samp = sampler.Sampler('%s/sounds/harpc2.wav' % PATH, 'c2')

out = dsp.buffer(length=32)
chords = ['iii', 'vi', 'ii', 'V']


def arp(i):
    cluster = dsp.buffer()
    length = random.randint(44100, 44100 + 22050)
    numnotes = random.randint(3, 12)
    onsets = rhythm.curve(numnotes, dsp.RND, length)
    chord = chords[i % len(chords)]
    freqs = tune.chord(chord, octave=random.randint(1, 3))
    for i, onset in enumerate(onsets):
        freq = freqs[i % len(freqs)]
        note = samp.play(freq)
        note = note.pan(random.random())
        note *= random.triangular(0, 0.125)
        cluster.dub(note, onset / cluster.samplerate)

    return cluster
Пример #35
0
from pippi import dsp
import mml2music
import os

with open('input.mml', 'r') as file:
    mml = file.read().lower()

parser = mml2music.MMLParser()

track = parser.get_notes(mml, max_length=-1, max_notes=200)

print(f'Parsed {len(track.notes)} notes.\nTotal length: {track.position}')

track.reverse()

out = dsp.buffer(channels=1, samplerate=48000)

writer = mml2music.Writer(
    f'{os.path.dirname(os.path.abspath(__file__))}/sounds/flute', out)

writer.compose(track)

writer.export('renders/output48_reversed.wav')

print('Done!')
Пример #36
0
import random
import time
from pippi import dsp, oscs, tune, wavetables
import os

PATH = os.path.dirname(os.path.realpath(__file__))
print(__file__)
start_time = time.time()

tlength = 20
out = dsp.buffer(length=tlength)
pos = 0
count = 0
count2 = 0

def make_note(freq, amp, length):
    lfo = dsp.SINE
    lfo_freq = random.triangular(0.001, 15)

    # Frequency modulation wavetable
    mod = wavetables.randline(random.randint(10, 30))

    # Frequency of the freq mod wavetable
    mod_freq = random.triangular(0.001, 2)

    # Range / depth of the freq mod wavetable
    mod_range = random.triangular(0, 0.025)

    pulsewidth = random.random()

    # Fill up a stack of wavetables to give to the Osc.
Пример #37
0
    A = dsp.rand(minval, maxval)
    B = dsp.rand(minval, maxval)

    numbranches = numgrains // grainsperbranch

    if A > B:
        A, B = B, A

    trunk = dsp.win('rsaw', A, B)
    trunk.graph('trunk-%s-rsaw.png' % numgrains, y=(0,1))

    branches = []
    for _ in range(numbranches):
        bD = dsp.rand(0.001, 0.999) # delta
        bA = dsp.rand(max(A - (bD/2), 0), min(A + (bD/2), 1))
        branches += [ dsp.buffer(dsp.win('rsaw', bA, B), channels=1) ]

    branches = dsp.stack(branches)
    branches.graph('branches-%s-rsaw.png' % numgrains, y=(0,1))

    curve = shapes.win('hann', length=0.1)
    trunk = dsp.win(curve, A, B)
    trunk.graph('trunk-%s-randhann.png' % numgrains, y=(0,1))

    branches = []
    for _ in range(numbranches):
        bD = dsp.rand(0.001, 0.999) # delta
        bA = dsp.rand(max(A - (bD/2), 0), min(A + (bD/2), 1))
        branches += [ dsp.buffer(dsp.win(curve, bA, B), channels=1) ]

    branches = dsp.stack(branches)
Пример #38
0
from pippi import dsp, rhythm
import random

# Create an empty buffer to dub sounds into
out = dsp.buffer()

# Load a snare drum sample from the `sounds` directory
snare = dsp.read('sounds/snare.wav')

# Make a random number of passes dubbing into the
# output buffer. On each pass...
numpasses = random.randint(4, 8)

for _ in range(numpasses):
    # Set the length of the
    length = random.randint(44100, 44100 * 30)

    # Pick a random number of beats / events
    numbeats = random.randint(16, 64)

    # If `rhythm.curve` get reverse=True, the window function will be
    # read in reverse -- high to low.
    reverse = random.choice([True, False])

    # Randomly choose a window function for `rhythm.curve`
    wintype = random.choice(
        ['sine', 'tri', 'kaiser', 'hann', 'blackman', None])

    # If wintype is None, generate a random list of values to use as a wavetable
    wavetable = None if wintype is not None else [
        random.random() for _ in range(random.randint(3, 10))
Пример #39
0
    1, 
    math.sqrt(5) * 0.5,
    math.sqrt(6) * 0.5, 
    math.sqrt(7) * 0.5, 
    math.sqrt(2), 
    math.sqrt(9) * 0.5, 
    math.sqrt(10) * 0.5, 
    math.sqrt(11) * 0.5, 
    math.sqrt(3), 
    math.sqrt(13) * 0.5, 
    math.sqrt(14) * 0.5, 
    math.sqrt(15) * 0.5, 
    2, 
]


out = dsp.buffer()
osc = oscs.Osc()
osc.amp = 0.5
length = 44100//10
pos = 0

for freq in scale * 8:
    osc.freq = freq * 330
    note = osc.play(length)
    note = note.env('phasor')
    out.dub(note, pos)
    pos += int(length * 0.8)

out.write('howdoesit.wav')