def bell(length=22050, freq=220, amp=0.5): ding = dsp.read('/home/hecanjog/sounds/vibesc1.wav').data ding = dsp.amp(ding, dsp.rand(0.5, 0.8)) bell = dsp.read('/home/hecanjog/sounds/tones/bellc.wav').data bell = dsp.amp(bell, dsp.rand(10, 50)) bell = dsp.amp(bell, 0.3) rhodes = dsp.read('/home/hecanjog/sounds/tones/rhodes.wav').data rhodes = dsp.transpose(rhodes, 1.2) rhodes = dsp.pan(rhodes, dsp.rand()) glade = dsp.read('/home/hecanjog/sounds/glade.wav').data numgs = dsp.randint(2, 6) gs = [] for _ in range(numgs): g = dsp.rcut(glade, dsp.mstf(100, 500)) g = dsp.amp(g, dsp.rand(0.2, 0.5)) g = dsp.pan(g, dsp.rand()) g = dsp.transpose(g, dsp.rand(0.15, 0.75)) gs += [ g ] gs = dsp.mix(gs) gs = dsp.env(gs, 'phasor') clump = dsp.mix([ ding, gs, bell, rhodes ]) clump = dsp.transpose(clump, freq / tune.ntf('c', octave=4)) clump = dsp.fill(clump, length, silence=True) clump = dsp.env(clump, 'phasor') clump = dsp.amp(clump, amp) return clump
def test_sandwich_board(self): l = dsp.read('tests/sounds/linux.wav') g = dsp.read('tests/sounds/guitar1s.wav') f = fx.crossover(l, dsp.win('phasor', 0, 1), dsp.rand(0.1, 0.3), dsp.win('rnd', 0, 1)).graph(fontsize=50, label='Weird FX') ws = Waveset(g).substitute('sine').graph(fontsize=50, label='Waveset Manipulation') ps = oscs.Pulsar2d(freq=dsp.rand(10, 80), pulsewidth=shapes.win('hann')).play(2).graph( fontsize=50, label='Pulsar Synthesis') wt = shapes.win('hann', length=0.4) * shapes.win('sine') * shapes.win('rnd') wt.graph( 'tests/renders/graph_sandwich_board.png', insets=[ps, ws, f], width=900, height=340, label='Pippi: Computer Music With Python', stroke=30, fontsize=30, ) # For the readme shutil.copy('tests/renders/graph_sandwich_board.png', 'banner.png')
def test_wconvolve(self): sound = dsp.read('tests/sounds/guitar10s.wav') impulse = dsp.read('tests/sounds/LittleTikes-A1.wav') out = fx.wconvolve(sound, impulse) out.write('tests/renders/fx_wconvolve_guitar_littletikes-0.02.wav') """
def test_crush(self): snd = dsp.read('tests/sounds/linux.wav') out = fx.crush(snd) out.write('tests/renders/fx_crush_linux.wav') snd = dsp.read('tests/sounds/guitar1s.wav') out = fx.crush(snd) out.write('tests/renders/fx_crush_guitar.wav')
def test_vspeed(self): g = dsp.read('tests/sounds/guitar10s.wav') lfo = dsp.win('sine', 4096) snd = fx.vspeed(g, lfo, 0.5, 1) snd = fx.norm(snd, 1) g = dsp.read('tests/sounds/guitar10s.wav') snd = snd + g snd.write('tests/renders/fx_vspeed.wav')
def test_smear(self): dsp.seed() g = dsp.read('tests/sounds/guitar10s.wav').rcut(1) out = multiband.smear(g) out.write('tests/renders/multiband_smear-guitar-0.01.wav') dsp.seed() g = dsp.read('tests/sounds/living.wav') out = multiband.smear(g) out.write('tests/renders/multiband_smear-living-0.01.wav')
def test_convolve(self): sound = dsp.read('tests/sounds/guitar1s.wav') impulse = dsp.read('tests/sounds/LittleTikes-A1.wav') out = fx.convolve(sound, impulse) out.write('tests/renders/fx_convolve_guitar_littletikes.wav') impulse = dsp.win('sinc') out = fx.convolve(sound, impulse) out.write('tests/renders/fx_convolve_guitar_sinc.wav')
def test_render_image(self): length = 30 degrees = list(range(1, 65)) freqs = tune.degrees(degrees, octave=2) out = hyperupic.sineosc('tests/images/louis.jpg', freqs, length) * 0.6 out.write('tests/renders/hyperupic-sineosc-render.wav') snd = dsp.read('tests/sounds/living.wav') out = hyperupic.bandpass('tests/images/louis.jpg', freqs, snd) out.write('tests/renders/hyperupic-bandpass-render.wav') snd = dsp.read('tests/sounds/guitar1s.wav') out = hyperupic.pulsar('tests/images/louis.jpg', freqs, snd, length) out.write('tests/renders/hyperupic-pulsar-render.wav')
def test_vdelay(self): snd = dsp.read('tests/sounds/guitar10s.wav') tlfo = dsp.randline(3, 30, 0, 1) lfo = dsp.randline(30, 4096, 0, 1) snd = fx.vdelay(snd, lfo, 0.1, 0.75, 0.5) snd = fx.norm(snd, 1) snd.write('tests/renders/fx_vdelay.wav')
def test_mdelay(self): snd = dsp.read('tests/sounds/guitar10s.wav') ndelays = 20 snd = fx.mdelay(snd, [random.triangular(0, 8) for _ in range(ndelays)], 0.7) snd = fx.norm(snd, 1) snd.write('tests/renders/fx_mdelay.wav')
def makelayer(speeds): snd = dsp.read('harps/harp_006.wav') length = int(60 * 1000 * sr) * 2 out = dsp.buffer(length=1) speed = random.choice(speeds) * random.choice([0.125, 0.25, 0.5, 1]) s = snd.speed(speed) panpos = random.random() pulselength = int(random.triangular(1, 80) * sr) numpulses = length // pulselength tablesize = numpulses // 100 ptable = interpolation.linear([ random.random() for _ in range(tablesize) ], numpulses) ftable = wavetables.window('line', numpulses) print('rendering speed: %s pan: %s len: %s num: %s' % (speed, panpos, pulselength, numpulses)) osc = oscs.Osc(wavetable='tri') for i in range(numpulses-1): start = ptable[i] * len(s) - pulselength bit1 = s.cut(start if start > 0 else 0, pulselength) #bit2 = osc.play(length=pulselength, freq=100 * (ftable[i] + 1), amp=0.5) #bit1 = bit1 * bit2 bit1 = bit1.env('sine') out.dub(bit1.pan(panpos) * 0.5, i * (pulselength//2)) print('done speed: %s pan: %s len: %s num: %s' % (speed, panpos, pulselength, numpulses)) return out
def play(voice_id): bpm = config('bpm') beat = dsp.bpm2frames(bpm) root = config('key') quality = getattr(tune, config('quality')) ratios = getattr(tune, config('tune')) bar = beat * dsp.randchoose([8, 16, 32]) groot = tune.ntf('c') scale = tune.fromdegrees([1,3,5,6,8,9], root=root, octave=2, ratios=ratios) v = dsp.read('sounds/vibesc1.wav').data out = '' # lens = [ bar / 5, bar / 8, bar / 12 ] lens = [ bar / 6, bar / 8, bar / 16 ] maxbend = 2 maxbend = 0.02 layers = [] for nlen in lens: layer = '' nlen /= 2 note = dsp.transpose(v, (dsp.randchoose(scale) * 2**dsp.randint(0, 3)) / groot) note = dsp.fill(note, nlen) note = dsp.env(note, 'phasor') note = dsp.amp(note, 0.125) nbeats = bar / nlen for b in range(nbeats): b = dsp.pan(note, dsp.rand()) b = dsp.drift(b, dsp.rand(0, dsp.rand(0.01, maxbend))) if dsp.flen(b) < nlen: b = dsp.pad(b, 0, nlen - dsp.flen(b)) # if dsp.rand() > 0.5: # b = dsp.vsplit(b, dsp.flen(b) / 3, dsp.flen(b) / 2) # b = dsp.randshuffle(b) # b = [ dsp.amp(bb, dsp.rand(0.5, 2)) for bb in b ] # b = ''.join(b) layer += b # layer = dsp.fill(layer, bar) layers += [ layer ] out = dsp.mix(layers) out = dsp.fill(out, bar) return out
def play(ctl): param = ctl.get("param") pc = ctl.get("midi").get("pc") pc.setOffset(111) lpd = ctl.get("midi").get("lpd") amp = pc.get(7) snds = ["sounds/melodica.wav", "sounds/rhodes.wav", "sounds/chime.wav", "sounds/bell.wav", "sounds/lap.wav"] # snds = ['sounds/rhodes.wav'] m = dsp.read(dsp.randchoose(snds)).data m = dsp.transpose(m, 0.125) m = dsp.transpose(m, dsp.randchoose([1, 1.5, 2, 3]) * 2 ** dsp.randint(0, 3)) m = dsp.fill(m, dsp.stf(pc.get(15, low=0.125, high=2))) reverse = dsp.randchoose([True, False]) numlayers = dsp.randint(10, 20) numgrains = dsp.randint(pc.geti(8, low=3, high=10), pc.geti(8, low=10, high=20)) minlen = dsp.rand(10, 100) # lenranges = (dsp.rand(10, 20), dsp.rand(50, 1000)) lenranges = (pc.get(15, low=10, high=50), pc.get(15, low=50, high=500)) env = dsp.randchoose(["sine", "hann", "tri", "vary"]) # out = m out = fx.spider(m, numlayers, numgrains, minlen, lenranges, reverse) out = dsp.amp(out, amp) # out = dsp.env(out, 'sine') # out = dsp.alias(out) return out
def __init__(self, audio): self.audio = dsp.read(audio) self.y, self.sr = librosa.load(audio, sr=44100) self._, self.beats = librosa.beat.beat_track(y=self.y, sr=self.sr, units='samples') self.beatpairs = [i for i in pairwise(self.beats)] self.halfbeats = halftime(self.beats) self.halfbeatpairs = [i for i in pairwise(self.halfbeats)]
def play(voice_id): bpm = config('bpm') key = config('key') quality = getattr(tune, config('quality')) ratios = getattr(tune, config('tune')) beat = dsp.bpm2frames(bpm) nlen = beat / dsp.randchoose([4,5,6,7,8,9,10]) root = 340.0 target = tune.ntf(key) n = dsp.read('sounds/mike.wav').data n = dsp.transpose(n, target / root) n = dsp.amp(n, 0.4) length = dsp.randint(16, 64) * beat ngrains = length / nlen n = dsp.transpose(n, dsp.randchoose([0.5, 1, 2, 2, 4, 4])) n = dsp.split(n, nlen) snd = dsp.randchoose(n) snd = dsp.env(snd, 'sine') grains = [ snd for i in range(ngrains) ] grains = [ dsp.pan(grain, dsp.rand()) for grain in grains ] out = ''.join(grains) out = dsp.env(out, 'sine') # out = dsp.pad(out, 0, dsp.stf(dsp.randint(0.5, 3))) return out
def dub2(songid1, songid2, dist_value, posi, var): out = dsp.buffer() dubhead = 0 #filename = Song.query.filter_by(id=songid1).first().filename #audio = dsp.read(os.path.join(app.instance_path, filename)) labels2 = [i.note for i in Beat.query.filter_by(song_id=songid1)] ar = dist(dist_value, posi) for e, i in enumerate(labels2): while dubhead < 60: rstart = [s.start for s in Beat.query.filter_by(note=i)] rend = [s.end for s in Beat.query.filter_by(note=i)] source = [s.song_id for s in Beat.query.filter_by(note=i)] rpool = [(rstart[i], rend[i], source[i]) for i in range(0, len(rstart))] sl = random.choice(rpool) bl = int(sl[1] - sl[0]) l = (sl[1] + (bl * np.random.choice(16, p=ar))) filename = Song.query.filter_by(id=sl[2]).first().filename audio = dsp.read(os.path.join(app.instance_path, filename)) a = audio[sl[0]:l] stime = librosa.samples_to_time(len(a), sr=44100) #var = 0.5 a = a.taper((stime / 2) * var) out.dub(a, dubhead) dubhead += stime - ((stime / 2) * var) return out
def __init__(self, instrument='guitar', octave_offset=-0): """ Initialize class :param instrument: :param octave_offset: """ self.instrument = instrument self.octave_offset = octave_offset if self.instrument == 'guitar': self.base_tone = dsp.read( '/home/nik/Projects/pippi/tests/sounds/guitar1s.wav') #Tone A self.original_freq = tune.ntf('A{}'.format(4 - octave_offset)) print("Frequency of base tone: {} Hz ".format(self.original_freq)) self.keys = { 'A0': 0, 'Bb': 1, 'B': 2, 'C': 3, 'Db': 4, 'D': 5, 'Eb': 6, 'E': 7, 'F': 8, 'Gb': 9, 'G': 10, 'Ab': 11, 'A': 12 } self.keys_inverted = {value: key for key, value in self.keys.items()}
def play(voice_id): tel = bot.getTel() b = dsp.read('sounds/birds.wav').data b = dsp.split(b, dsp.randint(100, 1000)) b = b[:dsp.randint(len(b) / 10, len(b))] blen = len(b) pans = dsp.breakpoint([ dsp.rand() for i in range(dsp.randint(10, 100)) ], blen) speeds = dsp.breakpoint([ dsp.rand(0.25, 1.5) for i in range(dsp.randint(10, 100)) ], blen) amps = dsp.breakpoint([ dsp.rand(0.05, 1.5) for i in range(dsp.randint(10, 100)) ], blen) b = [ dsp.pan(b[i], pans[i]) for i in range(blen) ] b = [ dsp.transpose(b[i], speeds[i]) for i in range(blen) ] b = [ dsp.amp(b[i], amps[i]) for i in range(blen) ] b = dsp.packet_shuffle(b, dsp.randint(4, 30)) for i, bb in enumerate(b): if dsp.rand(0, 100) > 60: b[i] = bb * dsp.randint(2, 10) out = ''.join(b) dsp.log('') dsp.log('birds') dsp.log(blen) dsp.log('%s length: %.2f' % (voice_id, dsp.fts(dsp.flen(out)))) bot.show_telemetry(tel) return out
def play(ctl): param = ctl.get("param") # lpd = ctl.get('midi').get('lpd') # pc = ctl.get('midi').get('pc') # pc.setOffset(111) # r = dsp.read('sounds/roll.wav').data r = dsp.read("sounds/pills.wav").data r = dsp.fill(r, dsp.stf(3)) # tr = pc.get(10, low=0.125, high=4) tr = dsp.rand(0.125, 4) r = dsp.transpose(r, dsp.rand(tr * 0.25, tr * 2)) # r = dsp.amp(r, pc.get(1, low=0, high=10)) r = dsp.amp(r, dsp.rand(0, 10)) reverse = dsp.randchoose([True, False]) # numgrains = pc.geti(2, low=5, high=20) numgrains = dsp.randint(5, 20) # numlayers = pc.geti(3, low=5, high=50) numlayers = dsp.randint(5, 50) # minlen = lpd.get(9, low=10, high=100) minlen = dsp.rand(10, 100) # lenranges = (lpd.get(9, low=10, high=50), lpd.get(9, low=50, high=500)) lenranges = (dsp.rand(10, 50), dsp.rand(50, 500)) out = fx.spider(r, numlayers, numgrains, minlen, lenranges, reverse) # out = dsp.mix([out, dsp.env(r, 'sine')]) return out
def iowa(inst, freq=440, wildcard=''): midi_note = tune.ftomi(freq) files = getMatches(inst, midi_note, wildcard) filename = dsp.randchoose(files) return dsp.read(filename).data
def test_waveset_pulsar2d(self): rain = dsp.read('tests/sounds/rain.wav').cut(0, 10) ws = Waveset(rain) ws.normalize() osc = Pulsar2d(ws, windows=['sine'], freq=200.0, amp=0.2) out = osc.play(60) out.write('tests/renders/osc_waveset_pulsar2d.wav')
def test_split(self): g = dsp.read('tests/sounds/guitar1s.wav') bands = multiband.split(g, 3) for i, b in enumerate(bands): b.write('tests/renders/multiband_split-band%02d.wav' % i) out = dsp.mix(bands) out = fx.norm(out, 1) out.write('tests/renders/multiband_split-reconstruct.wav')
def test_saturator_dc(self): snd = dsp.read('tests/sounds/guitar1s.wav') drive = 10 dcoffset = 0.1 dcblock = True out = fx.saturator(snd, drive, dcoffset, dcblock) out.write('tests/renders/fx_saturator_dc.wav')
def test_paulstretch(self): snd = dsp.read('tests/sounds/guitar1s.wav') windowsize = 1 stretch = 10 snd.frames = soundpipe.paulstretch(snd.frames, windowsize, stretch, snd.samplerate) snd.write('tests/renders/soundpipe_paulstretch.wav')
def test_mincer(self): snd = dsp.read('tests/sounds/linux.wav') length = 20 position = dsp.randline(10, 0.5, 2) pitch = dsp.randline(10) out = fx.mincer(snd, length, position, pitch) out.write('tests/renders/fx_mincer.wav')
def test_saturator_dc(self): snd = dsp.read('tests/sounds/guitar1s.wav') drive = 10 dcoffset = 0.1 dcblock = True snd.frames = soundpipe.saturator(snd.frames, drive, dcoffset, dcblock) snd.write('tests/renders/soundpipe_saturator_dc.wav')
def snares(ctx): speed = ctx.p.freq / 1000.0 snare = random.choice(ctx.before.get('snares')) snare = dsp.read(snare) snare = snare * random.triangular(0.75, 0.85) snare = snare.speed(random.triangular(1.2, 1.4) * speed) yield snare
def test_create_stereo_buffer_from_soundfile(self): sound = SoundBuffer(filename='tests/sounds/guitar1s.wav') self.assertEqual(len(sound), 44100) self.assertTrue(sound.samplerate == 44100) sound = dsp.read('tests/sounds/guitar1s.wav') self.assertEqual(len(sound), 44100) self.assertTrue(sound.samplerate == 44100)
def play(voice_id): bpm = C('bpm') beat = dsp.bpm2frames(bpm) volume = P(voice_id, 'volume', default=1.0) crinkle = dsp.read('sounds/s/crinkle.wav').data glass1 = dsp.read('sounds/s/glass1.wav').data glass2 = dsp.read('sounds/s/glass2.wav').data toys = dsp.read('sounds/s/rolling.wav').data c = dsp.vsplit(crinkle, dsp.mstf(10), dsp.stf(3)) c = dsp.randshuffle(c) c = c[:40] c = [dsp.pan(cc, dsp.rand()) for cc in c] c = [dsp.env(cc, 'sine') for cc in c] c = [dsp.transpose(cc, dsp.rand(0.25, 0.5)) for cc in c] t = dsp.vsplit(toys, dsp.mstf(10), dsp.stf(1)) t = dsp.randshuffle(t) t = t[:40] t = [dsp.amp(tt, dsp.rand(0.1, 0.8)) for tt in t] t = [dsp.pan(tt, dsp.rand(0, 1)) for tt in t] t = [dsp.env(tt, 'sine') for tt in t] t = [dsp.transpose(tt, 0.5) for tt in t] g = dsp.vsplit(glass2, dsp.mstf(1), dsp.mstf(100)) g = dsp.randshuffle(g) g = g[:40] g = [dsp.amp(gg, dsp.rand(0.35, 0.95)) for gg in g] g = [dsp.transpose(gg, dsp.rand(0.5, 1.75)) for gg in g] g = [gg * dsp.randint(1, 8) for gg in g] things = [c, t, g] out = [ dsp.mix([ dsp.randchoose(dsp.randchoose(things)) for l in range(dsp.randint(2, 4)) ]) for i in range(4) ] out = ''.join(out) dsp.log('voice %s length: %.2f' % (voice_id, dsp.fts(dsp.flen(out)))) return out
def play(ctl): mpk = ctl.get('midi').get('mpk') nk = ctl.get('midi').get('nk') amp = mpk.get(4, low=0, high=1, default=0) kick = dsp.read('/home/hecanjog/sounds/drums/Junglebd.wav').data klength = dsp.mstf(mpk.get(1, low=60, high=1500, default=100)) k = dsp.fill(kick, klength, silence=True) kamp = nk.get(0, low=0, high=1, default=1) k = dsp.amp(k, kamp) kpitch = nk.get(16, low=0.25, high=1, default=1) k = dsp.transpose(k, kpitch) snare = dsp.read('/home/hecanjog/sounds/drums/Hipclap1.wav').data slength = dsp.mstf(mpk.get(2, low=60, high=500, default=100)) s = dsp.fill(snare, slength, silence=True) soffset = dsp.mstf(mpk.get(6, low=0, high=500, default=0)) s = dsp.pad(s, soffset, 0) samp = nk.get(1, low=0, high=1, default=1) s = dsp.amp(s, samp) spitch = nk.get(17, low=0.25, high=2, default=1) s = dsp.transpose(s, spitch) hat = dsp.read('/home/hecanjog/sounds/drums/78ch.wav').data hlength = dsp.mstf(mpk.get(3, low=60, high=500, default=100)) h = dsp.fill(hat, hlength, silence=True) hoffset = dsp.mstf(mpk.get(7, low=0, high=500, default=0)) h = dsp.pad(h, hoffset, 0) hamp = nk.get(2, low=0, high=1, default=1) h = dsp.amp(h, hamp) hpitch = nk.get(18, low=0.25, high=2, default=1) h = dsp.transpose(h, hpitch) longest = max([ dsp.flen(k), dsp.flen(h), dsp.flen(s) ]) k = dsp.fill(k, longest) h = dsp.fill(h, longest) s = dsp.fill(s, longest) out = dsp.mix([k, s, h]) out = dsp.amp(out, amp) return out
def play(ctl): mpk = ctl.get('midi').get('mpk') nk = ctl.get('midi').get('nk') amp = mpk.get(4, low=0, high=1, default=0) kick = dsp.read('/home/hecanjog/sounds/drums/Junglebd.wav').data klength = dsp.mstf(mpk.get(1, low=60, high=1500, default=100)) k = dsp.fill(kick, klength, silence=True) kamp = nk.get(0, low=0, high=1, default=1) k = dsp.amp(k, kamp) kpitch = nk.get(16, low=0.25, high=1, default=1) k = dsp.transpose(k, kpitch) snare = dsp.read('/home/hecanjog/sounds/drums/Hipclap1.wav').data slength = dsp.mstf(mpk.get(2, low=60, high=500, default=100)) s = dsp.fill(snare, slength, silence=True) soffset = dsp.mstf(mpk.get(6, low=0, high=500, default=0)) s = dsp.pad(s, soffset, 0) samp = nk.get(1, low=0, high=1, default=1) s = dsp.amp(s, samp) spitch = nk.get(17, low=0.25, high=2, default=1) s = dsp.transpose(s, spitch) hat = dsp.read('/home/hecanjog/sounds/drums/78ch.wav').data hlength = dsp.mstf(mpk.get(3, low=60, high=500, default=100)) h = dsp.fill(hat, hlength, silence=True) hoffset = dsp.mstf(mpk.get(7, low=0, high=500, default=0)) h = dsp.pad(h, hoffset, 0) hamp = nk.get(2, low=0, high=1, default=1) h = dsp.amp(h, hamp) hpitch = nk.get(18, low=0.25, high=2, default=1) h = dsp.transpose(h, hpitch) longest = max([dsp.flen(k), dsp.flen(h), dsp.flen(s)]) k = dsp.fill(k, longest) h = dsp.fill(h, longest) s = dsp.fill(s, longest) out = dsp.mix([k, s, h]) out = dsp.amp(out, amp) return out
def text2wave(lyrics): path = os.getcwd() + '/bag.wav' cmd = "echo '%s' | /usr/bin/text2wave -o %s" % (lyrics, path) ret = subprocess.call(cmd, shell=True) words = dsp.read('bag.wav').data return words
def play(ctl): snd = dsp.read('/home/hecanjog/sounds/guitarpluck.wav').data #out = fx.rb(snd, interval=dsp.randint(0, 8) * 2, length=dsp.stf(0.25), formant=False) snd = dsp.transpose(snd, 0.968) out = dsp.stretch(snd, length=dsp.stf(dsp.rand(12, 15)), grain_size=120) out = dsp.env(out, 'hann') return out
def test_saturator_nodc(self): snd = dsp.read('tests/sounds/guitar1s.wav') drive = 10 dcoffset = 0 dcblock = False out = fx.saturator(snd, drive, dcoffset, dcblock) out = fx.norm(out, 1) out.write('tests/renders/fx_saturator_nodc.wav')
def test_widen(self): snd = dsp.read('tests/sounds/linux.wav') out = fx.widen(snd, dsp.win('phasor', 0, 1)) out.write('tests/renders/fx_widen_linux.wav') osc = oscs.Osc('sine', amp=0.2) out = osc.play(snd.dur) out = fx.widen(out, dsp.win('phasor', 0, 1)) out.write('tests/renders/fx_widen_sine.wav')
def test_vspeed2(self): quality = 20 sample = dsp.read('tests/sounds/linux.wav') snd = fx.vspeed2(sample, [1, 2, 1], quality) snd.write('tests/renders/fx_vspeed2.wav') snd = fx.vspeed2(sample, -1, 20) snd.write('tests/renders/fx_vspeed2_rev.wav') snd = fx.vspeed2(sample, [-1, 2, 1], 20) snd.write('tests/renders/fx_vspeed2_bipolar.wav')
def test_crush(self): snd = dsp.read('tests/sounds/linux.wav') out = fx.crush(snd) out.write('tests/renders/fx_crush_linux.wav') snd = dsp.read('tests/sounds/guitar10s.wav') out = fx.crush(snd) out.write('tests/renders/fx_crush_guitar.wav') out = fx.crush(snd, dsp.win('sine', 2, 16), 44100) out.write('tests/renders/fx_crush_guitar_vbitdepth.wav') out = fx.crush(snd, 16, dsp.win('sine', 200, 44100)) out.write('tests/renders/fx_crush_guitar_vsamplerate.wav') out = fx.crush(snd, dsp.win('hannin', 2, 16), dsp.win('sine', 200, 44100)) out.write('tests/renders/fx_crush_guitar_vboth.wav')
def test_compressor(self): snd = dsp.read('tests/sounds/guitar1s.wav') ratio = 4 threshold = -10 attack = 0.2 release = 0.2 out = fx.compressor(snd, ratio, threshold, attack, release) out.write('tests/renders/fx_compressor.wav')
def text2wave(lyrics): stamp = str(time.time()) filename = 'bag-' + stamp + '.wav' path = os.getcwd() + '/tmp/' + filename cmd = "echo '%s' | /usr/bin/text2wave -o %s" % (lyrics, path) ret = subprocess.call(cmd, shell=True) words = dsp.read('tmp/' + filename).data return words
def test_dub_overflow(self): sound = dsp.read('tests/sounds/guitar1s.wav') out = dsp.buffer() numdubs = 3 maxpos = 4 for _ in range(numdubs): pos = random.triangular(0, maxpos) out.dub(sound, pos) self.assertTrue(len(out) <= (maxpos * out.samplerate) + len(sound))
def test_insets(self): wt1 = wavetables.seesaw('rnd', 4096, dsp.rand(0, 1)) wt1_graph = wt1.graph(stroke=10) wt2 = wavetables.seesaw('rnd', 4096, dsp.rand(0, 1)) wt2_graph = wt2.graph(stroke=10) wt3 = wavetables.seesaw('rnd', 4096, dsp.rand(0, 1)) wt3_graph = wt3.graph(stroke=10) snd = dsp.read('tests/sounds/linux.wav') snd.graph('tests/renders/graph_insets.png', insets=[wt1_graph, wt2_graph, wt3_graph], stroke=3, width=1200, height=500)
def play(voice_id): bpm = C('bpm') beat = dsp.bpm2frames(bpm) volume = P(voice_id, 'volume', default=1.0) chord = dsp.read('sounds/sag1.wav').data bass = dsp.read('sounds/sag2.wav').data out = '' for i in range(32): b = dsp.amp(bass, 0.5) b = dsp.transpose(b, 2) b *= 2 bar = dsp.flen(b) / 2 blayers = [] for blayer in range(3): blayer = dsp.split(b, bar / dsp.randchoose([6, 12])) blayer = dsp.randshuffle(blayer) blayer = blayer[:2] blayer = ''.join(blayer) blayer = dsp.pan(blayer, dsp.rand()) # blayer *= dsp.randint(2, 8) blayers += [ blayer ] b = dsp.mix(blayers) c = dsp.amp(chord, 0.5) c = dsp.fill(c, bar / dsp.randchoose([8, 16, 12, 24])) c = dsp.fill(c, dsp.flen(b)) out += dsp.mix([ b, c ]) dsp.log('voice %s length: %.2f' % (voice_id, dsp.fts(dsp.flen(out)))) return out
def play(voice_id): bpm = C('bpm') beat = dsp.bpm2frames(bpm) volume = P(voice_id, 'volume', default=1.0) crinkle = dsp.read('sounds/s/crinkle.wav').data glass1 = dsp.read('sounds/s/glass1.wav').data glass2 = dsp.read('sounds/s/glass2.wav').data toys = dsp.read('sounds/s/rolling.wav').data c = dsp.vsplit(crinkle, dsp.mstf(10), dsp.stf(3)) c = dsp.randshuffle(c) c = c[:40] c = [ dsp.pan(cc, dsp.rand()) for cc in c ] c = [ dsp.env(cc, 'sine') for cc in c ] c = [ dsp.transpose(cc, dsp.rand(0.25, 0.5)) for cc in c ] t = dsp.vsplit(toys, dsp.mstf(10), dsp.stf(1)) t = dsp.randshuffle(t) t = t[:40] t = [ dsp.amp(tt, dsp.rand(0.1, 0.8)) for tt in t ] t = [ dsp.pan(tt, dsp.rand(0, 1)) for tt in t ] t = [ dsp.env(tt, 'sine') for tt in t ] t = [ dsp.transpose(tt, 0.5) for tt in t ] g = dsp.vsplit(glass2, dsp.mstf(1), dsp.mstf(100)) g = dsp.randshuffle(g) g = g[:40] g = [ dsp.amp(gg, dsp.rand(0.35, 0.95)) for gg in g ] g = [ dsp.transpose(gg, dsp.rand(0.5, 1.75)) for gg in g ] g = [ gg * dsp.randint(1, 8) for gg in g ] things = [c,t,g] out = [ dsp.mix([ dsp.randchoose(dsp.randchoose(things)) for l in range(dsp.randint(2, 4)) ]) for i in range(4) ] out = ''.join(out) dsp.log('voice %s length: %.2f' % (voice_id, dsp.fts(dsp.flen(out)))) return out
def sox(cmd, sound): path = os.getcwd() filename_in = '/proc-in' filename_out = '/proc-out.wav' dsp.write(sound, filename_in) cmd = cmd % (path + filename_in + '.wav', path + filename_out) subprocess.call(cmd, shell=True) sound = dsp.read(path + filename_out).data return sound
def sox(cmd, sound): stamp = str(time.time()) path = os.getcwd() filename_in = "/tmp/proc-in" + stamp filename_out = "/tmp/proc-out" + stamp + ".wav" dsp.write(sound, filename_in) cmd = cmd % (path + filename_in + ".wav", path + filename_out) subprocess.call(cmd, shell=True) sound = dsp.read(path + filename_out).data return sound
def clap1(beat): c = dsp.read('sounds/mikeclap.wav').data c = dsp.transpose(c, dsp.rand(1, 2.5)) c = dsp.fill(c, dsp.mstf(dsp.rand(10, 100))) c = dsp.env(c, 'phasor') c = dsp.amp(c, dsp.rand(1, 3)) c = dsp.pad(c, 0, beat - dsp.flen(c)) blen = beat / dsp.randchoose([1,2]) c = dsp.pad(c, blen, 0) c *= 4 return c
def play(ctl): bell = dsp.read('sounds/bell.wav').data bell = dsp.transpose(bell, dsp.randchoose([0.125, 0.25, 0.5, 1, 2])) chime = dsp.read('sounds/chime.wav').data chime = dsp.transpose(chime, dsp.randchoose([0.125, 0.25, 0.5, 1, 2])) note = dsp.mix([ bell, chime ]) def makeNote(length, note, degree=1): speed = tune.terry[tune.major[degree - 1]] speed = speed[0] / speed[1] note = dsp.transpose(note, speed) note = dsp.fill(note, length, silence=True) note = dsp.taper(note, dsp.mstf(10)) return note scale = [ dsp.randchoose([1,5,6]) for s in range(4) ] * 3 out = ''.join([ makeNote(dsp.mstf(dsp.rand(10, 500)), note, d) for d in scale ]) return out
def play(voice_id): bpm = config('bpm') key = config('key') quality = getattr(tune, config('quality')) ratios = getattr(tune, config('tune')) beat = dsp.bpm2frames(bpm) beat = beat / 4 glitch = False alias = False nbeats = P(voice_id, 'multiple', dsp.randchoose([8, 16])) gs = ['gC1', 'gC2'] g = dsp.randchoose(gs) n = dsp.read('sounds/%s.wav' % g).data # speeds = [1, 1.25, 1.5, 1.666, 2, 4] speeds = [1, 1.25, 1.5, 2, 3, 4, 6, 8, 16] root = tune.ntf('c') target = tune.ntf(key) n = dsp.transpose(n, target / root) n = dsp.fill(n, dsp.stf(20)) n = dsp.transpose(n, dsp.randchoose(speeds)) n = dsp.split(n, beat) n = dsp.randshuffle(n) n = n[:nbeats + 1] if alias: n = [ dsp.alias(nn) for nn in n ] n = [ dsp.amp(nn, dsp.rand(0.1, 0.75)) for nn in n ] n = [ dsp.pan(nn, dsp.rand()) for nn in n ] n = ''.join(n) out = n if glitch: out = dsp.vsplit(out, dsp.mstf(dsp.rand(80, 140)), dsp.mstf(500)) out = dsp.randshuffle(out) out = ''.join(out) return out
def play(ctl): lpd = ctl.get('midi').get('lpd') def rain(snd, freqs): layers = [] for freq in freqs: #layer = dsp.pine(snd, dsp.flen(snd) * 16, freq) layer = dsp.pan(snd, dsp.rand()) layer = dsp.amp(layer, 0.5) layer = dsp.alias(layer) layers += [ layer ] return dsp.mix(layers) wf = dsp.breakpoint([0] + [ dsp.rand(-1,1) for w in range(lpd.geti(7, low=4, high=200, default=0)) ] + [0], 512) win = dsp.wavetable('sine', 512) mod = [ dsp.rand(0, 1) for m in range(512) ] modr = lpd.get(5, low=0.01, high=1, default=1) modf = dsp.rand(0.5, 2) amp = lpd.get(3, low=0, high=0.5, default=0) length = dsp.mstf(lpd.get(2, low=150, high=500)) pw = lpd.get(1, low=0.1, high=1, default=1) freqs = tune.fromdegrees([1,3,5], octave=2, root='c') freq = dsp.randchoose(freqs) / 4.0 #o = dsp.pulsar(freq, length, pw, wf, win, mod, modr, modf, amp) #o = dsp.read('/home/hecanjog/sounds/guitarpluck.wav').data o = dsp.read('sounds/rhodes.wav').data o = dsp.transpose(o, dsp.randchoose([0.5, 1, 2, 1.5, 3])) o = dsp.fill(o, dsp.stf(dsp.rand(0.1, 2))) out = rain(o, freqs) #out = dsp.env(out, 'random') return out
def rb(snd, length=None, speed=None, hz=None, interval=None, ratios=None, crisp=0, formant=False): pid = os.getpid() cmd = ['rubberband'] # Time stretching if length is not None and dsp.flen(snd) != length and length > 0: cmd += [ '--duration %s' % dsp.fts(length) ] # crisp setting cmd += [ '--crisp %s' % dsp.cap(crisp, 6, 0) ] # preserve formants if formant: cmd += [ '--formant' ] # pitch shift by speed if speed is not None: cmd += [ '--frequency %s' % speed ] # pitch shift by semitones if interval is not None: # TODO use pippi.tune ratios and calc frequency args cmd += [ '--pitch %s' % interval ] vpid = pid + random.randint(1, 10000) cmd = ' '.join(cmd) + ' /tmp/infile%s.wav /tmp/outfile%s.wav' % (vpid, vpid) dsp.write(snd, '/tmp/infile%s' % vpid, cwd=False) with open(os.devnull, 'w') as devnull: p = subprocess.Popen(cmd, stdout=devnull, stderr=devnull, shell=True) p.wait() out = dsp.read('/tmp/outfile%s.wav' % vpid).data os.remove('/tmp/outfile%s.wav' % vpid) os.remove('/tmp/infile%s.wav' % vpid) return out
from pippi import dsp from pippi import tune import audioop thirty = dsp.read('thirty.wav').data wesley = dsp.read('wesley.wav').data snds = [thirty, wesley] ## 01 out = '' t = thirty * 30 t = dsp.pan(t, 0) tt = dsp.cut(thirty, 0, dsp.flen(thirty) - dsp.mstf(30)) * 30 tt = dsp.pan(tt, 1) out = dsp.mix([ t, tt ]) dsp.write(out, 'wesley_thirty_01') ## 02 out = '' t = dsp.split(thirty, dsp.mstf(40)) t = [ dsp.env(tt, 'sine') for tt in t ] t = [ tt * 4 for tt in t ] out = ''.join(t) dsp.write(out, 'wesley_thirty_02') ## 03 out = ''
from pippi import dsp, tune from hcj import fx import math import orc.wes guitars = [ dsp.read('samples/guitar%s.wav' % (i + 1)).data for i in range(5) ] # Intro ########## def makeShape(): shape = [] num_shapelets = dsp.randint(3, 8) for _ in range(num_shapelets): shapelet_size = dsp.randint(20, 100) num_points = dsp.randint(4, shapelet_size / dsp.randint(3, 4)) shapelet = dsp.breakpoint([ dsp.rand() for _ in range(num_points) ], shapelet_size) shape += shapelet return shape def makeGrains(): guitar = dsp.randchoose(guitars) guitar = dsp.transpose(guitar, dsp.randchoose([1, 2, 3, 4, 8])) max_grain_length = dsp.mstf(dsp.rand(10, 500)) positions = [ math.floor(pos * (dsp.flen(guitar) - max_grain_length)) for pos in makeShape() ]
def load(snd): """ Return a sound string given a relative path to a sound """ return dsp.read(path(snd)).data
def play(ctl): mpk = ctl.get('midi').get('mpk') ccs = [ i + 48 for i in range(24) ] notes = [] for cc in ccs: if mpk.get(cc) < 1: notes += [ cc ] ssnd = dsp.read('/home/hecanjog/sounds/drums/78sd.wav').data ssnd = dsp.read('jesssnare.wav').data hsnd = dsp.read('/home/hecanjog/sounds/drums/Shaker.wav').data ksnd = dsp.read('/home/hecanjog/sounds/drums/Drybd2.wav').data #ksnd = dsp.read('jesskick.wav').data beat = dsp.bpm2frames(90) #beat = dsp.mstf(290 * 2) length = beat * 4 hat = 'xxx ' kick = 'x ' snare = ' x ' #snare = ' x xx' #snare = ' ' def makeHat(length, i, amp): h = hsnd h = dsp.env(h, 'phasor') h = dsp.pad(h, 0, length - dsp.flen(h)) return h def makeKick(length, i, amp): k = dsp.mix([ ksnd, drums.sinekick(length, i, amp) ]) #k = dsp.env(ksnd, 'phasor') k = dsp.fill(k, length, silence=True) k = dsp.amp(k, 1) return k def makeSnare(length, i, amp): s = ssnd s = dsp.amp(s, 1.2) s = dsp.transpose(s, dsp.rand(1.5, 3)) s = dsp.fill(s, length, silence=True) #ss = dsp.drift(s, dsp.rand(0.001, 0.1)) #s = dsp.mix([s, ss]) return s #hats = drums.parsebeat(hat, 16, beat, length, makeHat, 25) hats = drums.parsebeat(hat, 16, beat, length, makeHat, 0) kicks = drums.parsebeat(kick, 16, beat, length, makeKick, 0) snares = drums.parsebeat(snare, 8, beat, length, makeSnare, 0) out = dsp.mix([hats,kicks,snares]) shuf = True shuf = False if shuf: out = dsp.split(out, beat) out = dsp.randshuffle(out) out = ''.join(out) out = dsp.amp(out, 2) cuts = True if dsp.rand() > 0.5 else False cuts = True #cuts = False if cuts: o = dsp.split(out, beat / 2) o = dsp.randshuffle(o) o = [ dsp.amp(oo, dsp.rand(0, 2.5)) for oo in o ] o = [ dsp.env(oo, 'random') for oo in o ] out = dsp.mix([ ''.join(o), out ]) dsp.log(notes) synthy = False #synthy = True if synthy == True: s = '' for ii in range(dsp.flen(out) / (beat/2)): layers = [] if len(notes) > 0: scale = [ n - 47 for n in notes ] scale = [1,5,8,12] scale = tune.fromdegrees(scale, octave=3, root='d') p = ''.join([ keys.pulsar(scale[ii % len(scale)], pulsewidth=dsp.rand(0.1, 1), amp=0.5, length=(beat/2) / 3) for _ in range(3) ]) layers += [ p ] else: layers += [ dsp.pad('', beat / 2, 0) ] s += dsp.mix(layers) out = dsp.mix([ s, out ]) #out = dsp.alias(out) #out = dsp.drift(out, dsp.rand(0.5, 2)) return out
from pippi import dsp, tune from hcj import fx, keys, snds, drums, Sampler import ctl dloop2 = dsp.read('samples/jess/loop2.wav').data dloop1 = dsp.read('samples/jess/loop1.wav').data dloop1 = dsp.fill(dloop1, dsp.flen(dloop2)) kicksoft = dsp.read('samples/jess/kickshuffle.wav').data kickhard = dsp.read('samples/jess/kickcym.wav').data rimshot = dsp.read('samples/jess/rimshot.wav').data rimshot = dsp.amp(rimshot, 4) snare = dsp.read('samples/jess/snare.wav').data snare = dsp.amp(snare, 3) snare2 = snds.load('hits/hisnarespa.wav') snare2 = dsp.amp(snare2, 0.25) clap = snds.load('hits/tapeclap.wav') clap = dsp.amp(clap, 0.25) flam = dsp.read('samples/jess/snareflam.wav').data flam = dsp.amp(flam, 3) smash = dsp.read('samples/jess/smash.wav').data skitter = dsp.read('samples/jess/skitter.wav').data paper = snds.load('hits/papersnap.wav') sock = snds.load('hits/detroitkick1.wav') hat = snds.load('hits/keyshihat.wav') section_choices = {
from pippi import dsp, tune from hcj import keys, fx, drums kick = dsp.read('snds/kick.wav').data bigkick = dsp.read('snds/kick606.wav').data snare = dsp.read('snds/snare.wav').data snare = dsp.amp(snare, 6) snare = dsp.env(snare, 'phasor') snarex = dsp.split(snare, 0, 1) key = 'a' hatp = 'xxxx' snarep = '..x...x...' kickp = 'x...-.....x..x...' pulsep = 'x..' # tempo path def tempoPath(nsegs): maxms = dsp.rand(100, 400) minms = dsp.rand(1, 100) wavetypes = ['hann', 'sine', 'vary'] out = [] for _ in range(nsegs): seglen = dsp.randint(20, 200) seg = dsp.wavetable(dsp.randchoose(wavetypes), seglen) # pull out a randomly selected subsegment of the curve
def play(params): """ Usage: shine.py [length] [volume] """ length = params.get('length', dsp.stf(dsp.rand(0.1, 1))) volume = params.get('volume', 100.0) volume = volume / 100.0 # TODO: move into param filter octave = params.get('octave', 2) + 1 # Add one to compensate for an old error for now note = params.get('note', ['c']) note = note[0] quality = params.get('quality', tune.major) glitch = params.get('glitch', False) superglitch = params.get('superglitch', False) pinecone = params.get('pinecone', False) glitchpad = params.get('glitch-padding', 0) glitchenv = params.get('glitch-envelope', False) env = params.get('envelope', False) ratios = params.get('ratios', tune.terry) pad = params.get('padding', False) bend = params.get('bend', False) bpm = params.get('bpm', 75.0) width = params.get('width', False) wform = params.get('waveform', False) instrument = params.get('instrument', 'r') scale = params.get('scale', [1,6,5,4,8]) shuffle = params.get('shuffle', False) # Reorganize input scale reps = params.get('repeats', len(scale)) alias = params.get('alias', False) phase = params.get('phase', False) pi = params.get('pi', False) wild = params.get('wii', False) root = params.get('root', 27.5) trigger_id = params.get('trigger_id', 0) tune.a0 = float(root) try: # Available input samples if instrument == 'r': instrument = 'rhodes' tone = dsp.read('sounds/synthrhodes.wav').data elif instrument == 's': instrument = 'synthrhodes' tone = dsp.read('sounds/220rhodes.wav').data elif instrument == 'c': instrument = 'clarinet' tone = dsp.read('sounds/clarinet.wav').data elif instrument == 'v': instrument = 'vibes' tone = dsp.read('sounds/glock220.wav').data elif instrument == 't': instrument = 'tape triangle' tone = dsp.read('sounds/tape220.wav').data elif instrument == 'g': instrument = 'glade' tone = dsp.read('sounds/glade.wav').data elif instrument == 'p': instrument = 'paperclips' tone = dsp.read('sounds/paperclips.wav').data elif instrument == 'i': instrument = 'input' tone = dsp.capture(dsp.stf(1)) except: instrument = None tone = None out = '' # Shuffle the order of pitches if shuffle is not False: scale = dsp.randshuffle(scale) # Translate the list of scale degrees into a list of frequencies freqs = tune.fromdegrees(scale, octave, note, quality, ratios) freqs = [ freq / 4.0 for freq in freqs ] # Format is: [ [ path, offset, id, value ] ] # Offset for video osc_messages = [ ['/dac', float(dsp.fts(length)), 1, tune.fts(osc_freq)] for osc_freq in freqs ] # Phase randomly chooses note lengths from a # set of ratios derived from the current bpm if phase is not False: ldivs = [0.5, 0.75, 2, 3, 4] ldiv = dsp.randchoose(ldivs) length = dsp.bpm2ms(bpm) / ldiv length = dsp.mstf(length) reps = ldiv if ldiv > 1 else 4 # Construct a sequence of notes for i in range(reps): # Get the freqency freq = freqs[i % len(freqs)] # Transpose the input sample or # synthesize tone if wform is False and tone is not None: # Determine the pitch shift required # to arrive at target frequency based on # the pitch of the original samples. if instrument == 'clarinet': diff = freq / 293.7 elif instrument == 'vibes': diff = freq / 740.0 else: diff = freq / 440.0 clang = dsp.transpose(tone, diff) elif wform == 'super': clang = dsp.tone(length, freq, 'phasor', 0.5) clang = [ dsp.drift(clang, dsp.rand(0, 0.02)) for s in range(7) ] clang = dsp.mix(clang) elif wform is False and tone is None: clang = dsp.tone(length, freq, 'sine2pi', 0.75) clang = dsp.amp(clang, 0.6) else: clang = dsp.tone(length, freq, wform, 0.75) clang = dsp.amp(clang, 0.6) # Stupidly copy the note enough or # trim it to meet the target length clang = dsp.fill(clang, length) # Give synth tones simple env (can override) if wform is not False and env is False: clang = dsp.env(clang, 'phasor') # Apply an optional amplitude envelope if env is not False: clang = dsp.env(clang, env) # Add optional padding between notes if pad != False: clang = dsp.pad(clang, 0, pad) # Add to the final note sequence out += clang # Add optional aliasing (crude bitcrushing) if alias is not False: out = dsp.alias(out) # Cut sound into chunks of variable length (between 5 & 300 ms) # Pan each chunk to a random position # Apply a sine amplitude envelope to each chunk # Finally, add variable silence between each chunk and shuffle the # order of the chunks before joining. if glitch is not False: out = dsp.vsplit(out, dsp.mstf(5), dsp.mstf(300)) out = [dsp.pan(o, dsp.rand()) for o in out] out = [dsp.env(o, 'sine') for o in out] out = [dsp.pad(o, 0, dsp.mstf(dsp.rand(0, glitchpad))) for o in out] out = ''.join(dsp.randshuffle(out)) # Detune between 1.01 and 0.99 times original speed # as a sine curve whose length equals the total output length if bend is not False: out = dsp.split(out, 441) freqs = dsp.wavetable('sine', len(out), 1.01, 0.99) out = [ dsp.transpose(out[i], freqs[i]) for i in range(len(out)) ] out = ''.join(out) if wild is not False: #out = dsp.vsplit(out, 400, 10000) out = dsp.split(out, 3000) out = [ dsp.amp(dsp.amp(o, dsp.rand(10, 50)), 0.5) for o in out ] #out = [ o * dsp.randint(1, 5) for o in out ] for index, o in enumerate(out): if dsp.randint(0, 1) == 0: out[index] = dsp.env(dsp.cut(o, 0, dsp.flen(o) / 4), 'gauss') * 4 if dsp.randint(0, 6) == 0: out[index] = dsp.transpose(o, 8) out = [ dsp.env(o, 'gauss') for o in out ] freqs = dsp.wavetable('sine', len(out), 1.02, 0.98) out = [ dsp.transpose(out[i], freqs[i]) for i in range(len(out)) ] out = ''.join(out) if pinecone == True: out = dsp.pine(out, int(length * dsp.rand(0.5, 8.0)), dsp.randchoose(freqs) * dsp.rand(0.5, 4.0)) # Adjust output amplitude as needed and return audio + OSC if pi: return (dsp.amp(out, volume), {'osc': osc_messages}) else: return dsp.amp(out, volume)