コード例 #1
0
ファイル: chop.py プロジェクト: stymy/bsides
def chop(src, nsecs=300):
    "Return a list of output files that have been written to disk"
    # XXX: May be nice to split on silence, instead of seconds.
    acc = np.zeros((nsecs*R,2), dtype=np.int16)
    idx = 0
    count = 0
    paths = []
    for chunk in numm.sound_chunks(src):
        if idx + len(chunk) <= len(acc):
            acc[idx:idx+len(chunk)] = chunk
            idx += len(chunk)
        else:
            nframes = len(acc) - idx
            acc[idx:] = chunk[:nframes]
            path = src + '.%06d.wav' % (count)
            numm.np2sound(acc, path)
            count += 1
            paths.append(path)

            acc[:] = 0
            acc[:len(chunk)-nframes] = chunk[nframes:]

            idx = len(chunk)-nframes

    if idx < len(acc):
        remainder = acc[:idx]
        path = src + '.%06d.wav' % (count)
        numm.np2sound(remainder, path)
        paths.append(path)
        
    return paths
コード例 #2
0
ファイル: player.py プロジェクト: strob/connectomusic
    def toggle_recording(self):
        if self._recording:
            # save recording
            numm.np2sound(np.concatenate(self._out), 'out.wav')
            print 'recording saved to out.wav'

            # save parameters
            params = {'target': self._target,
                      'flipped': self._flipped,
                      'sounds': self.graph.version,
                      'actions': self._actions,
                      'nummap': self._nummap,
                      'mouse': self._mouse,
                      'speed': self._speed}
            json.dump(params, open('out.params.json', 'w'))
            open('out.samples.txt', 'w').write('\n'.join(self._samples))

        else:
            print 'start recording'
        self._out = []
        self._actions = []
        self._mouse = []
        self._nummap = []
        self._samples = []
        self._recording = not self._recording
        return self._recording
コード例 #3
0
ファイル: cluster.py プロジェクト: stymy/bsides
def wave(src_np, clusters, outpattern, R=44100):
    for idx, segs in clusters.items():
        segchunks = [src_np[int(R * st) : int(R * (st + dur))] for (st, dur, _idx) in segs]
        if len(segchunks) == 0:
            print "zero-length cluster", idx
            continue
        segchunks = np.concatenate(segchunks)
        numm.np2sound(segchunks, outpattern % (int(idx)))
コード例 #4
0
def wave(src_np, clusters, outpattern, R=44100):
    for idx, segs in clusters.items():
        segchunks = [
            src_np[int(R * st):int(R * (st + dur))] for (st, dur, _idx) in segs
        ]
        if len(segchunks) == 0:
            print 'zero-length cluster', idx
            continue
        segchunks = np.concatenate(segchunks)
        numm.np2sound(segchunks, outpattern % (int(idx)))
コード例 #5
0
ファイル: convoflute2.py プロジェクト: cosmc/convoflute
def keyboard_in(event_type, key):
	# Quit on Q. Save current output chunk on space. Shift playback rectangle with WASD.
	# TODO: Write a helper function to make the WASD cases less repetitive.
	global runner, freshOutTL, freshOutBR, rectStale
	print "KEY DOWN: " + key
	if key == ' ':
		outFile = fileName1 + "--" + fileName2 + "--" + str(chunkSize) + "_" + str(outX) + "_" + str(outY) + ".wav"
		numm.np2sound(currentOutChunk, outFile)
		print "Rendered " + outFile
	elif key == 'q':
		runner.quit()
	elif (key == 'a' and outTL[0] > 0.01):
		freshOutTL = (freshOutTL[0]-0.01,freshOutTL[1])
		freshOutBR = (freshOutBR[0]-0.01,freshOutBR[1])
		rectStale = True
		try:
			thread.start_new_thread(updateLoop, (audio1, audio2, (freshOutTL[0], freshOutBR[0]), (freshOutTL[1], freshOutBR[1]),))
		except:
			print "Unable to start loop update thread."
	elif (key == 'd' and outBR[0] < 0.99):
		freshOutTL = (freshOutTL[0]+0.01,freshOutTL[1])
		freshOutBR = (freshOutBR[0]+0.01,freshOutBR[1])
		rectStale = True
		try:
			thread.start_new_thread(updateLoop, (audio1, audio2, (freshOutTL[0], freshOutBR[0]), (freshOutTL[1], freshOutBR[1]),))
		except:
			print "Unable to start loop update thread."
	elif (key == 'w' and outTL[1] > 0.01):
		freshOutTL = (freshOutTL[0],freshOutTL[1]-0.01)
		freshOutBR = (freshOutBR[0],freshOutBR[1]-0.01)
		rectStale = True
		try:
			thread.start_new_thread(updateLoop, (audio1, audio2, (freshOutTL[0], freshOutBR[0]), (freshOutTL[1], freshOutBR[1]),))
		except:
			print "Unable to start loop update thread."
	elif (key == 's' and outBR[1] < 0.99):
		freshOutTL = (freshOutTL[0],freshOutTL[1]+0.01)
		freshOutBR = (freshOutBR[0],freshOutBR[1]+0.01)
		rectStale = True
		try:
			thread.start_new_thread(updateLoop, (audio1, audio2, (freshOutTL[0], freshOutBR[0]), (freshOutTL[1], freshOutBR[1]),))
		except:
			print "Unable to start loop update thread."
	print "Updated freshOutTL: " + str(freshOutTL) + "  freshOutBR: " + str(freshOutBR)
コード例 #6
0
ファイル: convoflute.py プロジェクト: cosmc/convoflute
def keyboard_in(event_type, key):
	# Quit on Q. Save current output chunk on space. Navigate playback grid with WASD.
	global runner, outX, outY
	print "KEY DOWN: " + key
	if key == ' ':
		outFile = fileName1 + "--" + fileName2 + "--" + str(chunkSize) + "_" + str(outX) + "_" + str(outY) + ".wav"
		numm.np2sound(currentOutChunk, outFile)
		print "Rendered " + outFile
	elif key == 'q':
		runner.quit()
	elif (key == 'a' and outX > 0.01):
		outX -= 0.01
	elif (key == 'd' and outX < 0.99):
		outX += 0.01
	elif (key == 'w' and outY > 0.01):
		outY -= 0.01
	elif (key == 's' and outY < 0.99):
		outY += 0.01
	print "outX: " + str(outX) + "  outY: " + str(outY)
コード例 #7
0
ファイル: ferry.py プロジェクト: csv/sparse-data-sonification
import numm
import pandas
import numpy
import math
tau = 2 * math.pi


def wave(row):
    return math.sin(tau * row['Date'] * 440) * 2**15 - 1
    power = 1  # round(math.log(row['Downtown.Passengers'])+16)
    return math.sin(tau / 8 * row['Date'] * 440 * (3 / 2)**power) * 2**15 - 1


ferry = pandas.read_csv('ferry.smooth.csv')
ferry['Date'] = range(ferry.shape[0])
ferry['Downtown.Passengers.Wave'] = ferry.apply(wave, axis=1)
numm.np2sound(ferry['Downtown.Passengers.Wave'].values, '/tmp/a.wav')
コード例 #8
0
ファイル: ferry.py プロジェクト: csv/codame-dataviz
import numm
import pandas
import numpy
import math
tau = 2 * math.pi

def wave(row):
    return math.sin(tau * row['Date'] * 440) * 2**15 - 1
    power = 1 # round(math.log(row['Downtown.Passengers'])+16)
    return math.sin(tau/8 * row['Date'] * 440 * (3/2)**power) * 2**15 - 1

ferry = pandas.read_csv('ferry.smooth.csv')
ferry['Date'] = range(ferry.shape[0])
ferry['Downtown.Passengers.Wave'] = ferry.apply(wave, axis = 1)
numm.np2sound(ferry['Downtown.Passengers.Wave'].values, '/tmp/a.wav')
コード例 #9
0
ファイル: player.py プロジェクト: strob/connectomusic
    g = graph.load_graph(sys.argv[1])
    graph.connect_to_samples(g, sys.argv[2:])

    p = Player(g, speed=50)

    # # Trigger everything
    # for n in g.get_nodes():
    #     if n.frames is not None:
    #         p.trigger(n, 1.0)

    # Trigger *something*
    p.trigger(g.get_nodes()[50], 1.0)

    out = []
    v_out = cv2.VideoWriter()
    fr = p._get_base_frame()
    if not v_out.open('out.avi', cv2.cv.CV_FOURCC(*'MJPG'), int(R/2048), (fr.shape[1], fr.shape[0]), True):
        raise RuntimeError

    while p.active() and len(out) < 21*60:
        v_out.write(p.draw())
        out.append(p.next())

        print len(out)

    out = np.concatenate(out)

    # out /= out.max() / float(2**15-1)

    numm.np2sound(out, 'out.wav')
コード例 #10
0
ファイル: wolftones.py プロジェクト: stymy/bsides
                    # # Defer frames to avoid pitch-shifting
                    # if len(buffers) > 1:
                    #     buffers[buf_idx] = np.roll(buffers[buf_idx], -amnt*len(buffers), axis=0)
                if len(buffers) == 0:
                    return out
                buf_idx = (buf_idx + 1) % len(buffers)


if __name__ == '__main__':
    import numm
    import sys
    buffers = [numm.sound2np(X) for X in sys.argv[1:]]

    nframes = sum([len(X) for X in buffers])

    # freqrot = [440, 800, 1500, 300]
    freqrot = [4400, 1800, 1500, 2222]

    comp = []
    f_idx = 0
    w_len = R / 4
    while nframes > 0:
        amnt = min(nframes, w_len)
        comp.append((freqrot[f_idx], amnt))
        f_idx = (f_idx + 1) % len(freqrot)
        nframes -= amnt

    out = wolfcut(comp, buffers)

    numm.np2sound(out, 'wolf.wav')
コード例 #11
0
ファイル: bsides.py プロジェクト: strob/bsides
def structure_keys(type, button):
    global zoom_idx, rhythm_square, structure_rhythm_idx

    r_idx = 0
    nsquares = len(composition.rhythms)
    if nsquares > 0:
        N = int(np.ceil(np.sqrt(nsquares)))
        r_idx = min(len(composition.rhythms)-1, int(mousex * N) + int(mousey * N) * N)

    print 'sk on', r_idx, len(composition.rhythms)

    if type == 'key-press':
        if button == 'n':
            rhythm_square = Square()
            rhythm_init()
            composition.append(rhythm_square)
            zoom_idx = ZOOM_LEVELS.index('rhythm')
        elif button == 'e':
            print 'export'
            out = np.concatenate([X.getArrangement().getArray(tape) for X in composition.rhythms])
            numm.np2sound(out, 'export.wav')
        elif button == 'x':
            print 'delete', r_idx
            if len(composition.rhythms) == 0:
                return
            rhy = composition.rhythms.pop(r_idx)
            for i,g in enumerate(rhy.groups):
                rhy.remove(i)
                for s in g:
                    tape.unuse(s)
            if rhy == rhythm_square:
                rhythm_square = None
                audio_advance()
        elif button == 'c':
            rhythm_square = composition.rhythms[r_idx]
            rhythm_init()
            zoom_idx = ZOOM_LEVELS.index('rhythm')
        elif button == 'Left':
            rhy = composition.rhythms.pop(r_idx)
            n_idx = max(0, r_idx-1)
            composition.rhythms.insert(n_idx, rhy)
        elif button == 'Right':
            rhy = composition.rhythms.pop(r_idx)
            n_idx = max(len(composition.rhythms), r_idx+1)
            composition.rhythms.insert(n_idx, rhy)
        elif button == 'd':
            print 'DUPLICATE'
            rhy = composition.rhythms[r_idx]
            dupe = Square()
            dupe._fills = rhy._fills.copy()
            dupe._tones = rhy._tones.copy()
            dupe.theta = rhy.theta

            for g in rhy.groups:
                newg = []
                for s in g:
                    news = tape.getNearUnsed(s)
                    tape.use(news)
                    newg.append(news)
                dupe.append(newg)

            composition.append(dupe)
        elif button == 'space':
            structure_rhythm_idx = r_idx - 1
            audio_advance()
コード例 #12
0
ファイル: mix.py プロジェクト: strob/connectomusic
import numm
import numpy as np

def mix(l, r):
    "Mix two stereo tracks"
    out = np.zeros((max(len(l), len(r)), 2), int)
    out[:len(l),0] = l[:,0]
    out[:len(l),1] = l[:,1]/2
    out[:len(r),0] += r[:,0]/2
    out[:len(r),1] += r[:,1]
    return (out / (out.max() / float(2**15-1))).astype(np.int16)

if __name__=='__main__':
    import sys
    res = mix(numm.sound2np(sys.argv[1]),
              numm.sound2np(sys.argv[2]))
    numm.np2sound(res, 'mixed.wav')
コード例 #13
0
def structure_keys(type, button):
    global zoom_idx, rhythm_square, structure_rhythm_idx

    r_idx = 0
    nsquares = len(composition.rhythms)
    if nsquares > 0:
        N = int(np.ceil(np.sqrt(nsquares)))
        r_idx = min(
            len(composition.rhythms) - 1,
            int(mousex * N) + int(mousey * N) * N)

    print 'sk on', r_idx, len(composition.rhythms)

    if type == 'key-press':
        if button == 'n':
            rhythm_square = Square()
            rhythm_init()
            composition.append(rhythm_square)
            zoom_idx = ZOOM_LEVELS.index('rhythm')
        elif button == 'e':
            print 'export'
            out = np.concatenate([
                X.getArrangement().getSequencePreview().getArray(tape)
                for X in composition.rhythms
            ])
            numm.np2sound(out, 'export.wav')
        elif button == 'x':
            print 'delete', r_idx
            if len(composition.rhythms) == 0:
                return
            rhy = composition.rhythms.pop(r_idx)
            for i, g in enumerate(rhy.groups):
                rhy.remove(i)
                for s in g:
                    tape.unuse(s)
            if rhy == rhythm_square:
                rhythm_square = None
                audio_advance()
        elif button == 'c':
            rhythm_square = composition.rhythms[r_idx]
            rhythm_init()
            zoom_idx = ZOOM_LEVELS.index('rhythm')
        elif button == 'Left':
            rhy = composition.rhythms.pop(r_idx)
            n_idx = max(0, r_idx - 1)
            composition.rhythms.insert(n_idx, rhy)
        elif button == 'Right':
            rhy = composition.rhythms.pop(r_idx)
            n_idx = max(len(composition.rhythms), r_idx + 1)
            composition.rhythms.insert(n_idx, rhy)
        elif button == 'd':
            print 'DUPLICATE'
            rhy = composition.rhythms[r_idx]
            dupe = Square()
            dupe._fills = rhy._fills.copy()
            dupe._tones = rhy._tones.copy()
            dupe.theta = rhy.theta

            for g in rhy.groups:
                newg = []
                for s in g:
                    news = tape.getNearUnsed(s)
                    tape.use(news)
                    newg.append(news)
                dupe.append(newg)

            composition.append(dupe)
        elif button == 'space':
            structure_rhythm_idx = r_idx - 1
            audio_advance()
コード例 #14
0
ファイル: wolftones.py プロジェクト: strob/bsides
                    buffers[buf_idx] = buffers[buf_idx][amnt:]
                    # # Defer frames to avoid pitch-shifting
                    # if len(buffers) > 1:
                    #     buffers[buf_idx] = np.roll(buffers[buf_idx], -amnt*len(buffers), axis=0)
                if len(buffers) == 0:
                    return out
                buf_idx = (buf_idx + 1) % len(buffers)

if __name__=='__main__':
    import numm
    import sys
    buffers = [numm.sound2np(X) for X in sys.argv[1:]]

    nframes = sum([len(X) for X in buffers])

    # freqrot = [440, 800, 1500, 300]
    freqrot = [4400, 1800, 1500, 2222]

    comp = []
    f_idx = 0
    w_len = R/4
    while nframes > 0:
        amnt = min(nframes, w_len)
        comp.append((freqrot[f_idx], amnt))
        f_idx = (f_idx + 1) % len(freqrot)
        nframes -= amnt
        
    out = wolfcut(comp, buffers)

    numm.np2sound(out, 'wolf.wav')