コード例 #1
0
ファイル: PyPlotVoice.py プロジェクト: staltitia/PyPlotVoice
def readAudio( filename ):
	#returns list, [ samplingRate, values ]
	import os
	name, ext = os.path.splitext(filename)

	if ext == '.wav':	#reading a .wav
		return  wavfile.read(filename)
	else:
		#using numm to read a .webm
		values = numm.sound2np(filename)
		#average out stereo input
		#values = numpy.mean( values, axis=1 )
		#we use just the left channel
		values = numpy.split( values, [1], axis=1 )[0]
		#numm defaults to 44.1 kHz sampling rate
		#samplingRate = 44100

		#return [ samplingRate, values ]

		#dev machine is REALLY BAD, concessions made for test purposes
		toPad = numpy.zeros( values.size % 5 )
		if toPad.size > 0:
			values = numpy.concatenate( values, toPad )
		reshaped = values.reshape( ( values.size / 5, 5) )
		values = numpy.mean( reshaped, axis=1 )
		samplingRate = 44100 / 5
		return [ samplingRate, values ]
コード例 #2
0
ファイル: compose.py プロジェクト: strob/bsides
 def _get_array(self):
     return numm.sound2np(self.path)
コード例 #3
0
 def _get_array(self):
     return numm.sound2np(self.path)
コード例 #4
0
ファイル: convoflute.py プロジェクト: cosmc/convoflute
	elif (key == 's' and outY < 0.99):
		outY += 0.01
	print "outX: " + str(outX) + "  outY: " + str(outY)


if (__name__ == "__main__"):

	# The three command-line arguments are the audio chunk size (in samples)
	# and the filenames of the two files to analyze.
	chunkSize = int(sys.argv[1])
	fileName1 = sys.argv[2]
	fileName2 = sys.argv[3]

	# Extract the files to sound!
	print "Extracting " + fileName1 + " and " + fileName2 + "..."
	audio1 = numm.sound2np(fileName1)
	audio2 = numm.sound2np(fileName2)

	# Chunkulate the nparrays.
	print "Chunkulating..."
	chunks1 = chunkulate(audio1, chunkSize)
	chunks2 = chunkulate(audio2, chunkSize)
	print "Chunked " + fileName1 + " into " + str(chunks1.shape) + " chunks."
	print "Chunked " + fileName2 + " into " + str(chunks2.shape) + " chunks."

	# Window the chunks.
	print "Windowing the chunks..."
	windowed1 = chunks1 * np.array(  [np.array( [np.hamming(chunkSize),]*2 ).T,] * chunks1.shape[0]  )
	windowed2 = chunks2 * np.array(  [np.array( [np.hamming(chunkSize),]*2 ).T,] * chunks2.shape[0]  )

	# Run the thing!
コード例 #5
0
ファイル: wolftones.py プロジェクト: stymy/bsides
                    #done with this buffer
                    buffers.pop(buf_idx)
                else:
                    buffers[buf_idx] = buffers[buf_idx][amnt:]
                    # # Defer frames to avoid pitch-shifting
                    # if len(buffers) > 1:
                    #     buffers[buf_idx] = np.roll(buffers[buf_idx], -amnt*len(buffers), axis=0)
                if len(buffers) == 0:
                    return out
                buf_idx = (buf_idx + 1) % len(buffers)


if __name__ == '__main__':
    import numm
    import sys
    buffers = [numm.sound2np(X) for X in sys.argv[1:]]

    nframes = sum([len(X) for X in buffers])

    # freqrot = [440, 800, 1500, 300]
    freqrot = [4400, 1800, 1500, 2222]

    comp = []
    f_idx = 0
    w_len = R / 4
    while nframes > 0:
        amnt = min(nframes, w_len)
        comp.append((freqrot[f_idx], amnt))
        f_idx = (f_idx + 1) % len(freqrot)
        nframes -= amnt
コード例 #6
0
            src_np[int(R * st):int(R * (st + dur))] for (st, dur, _idx) in segs
        ]
        if len(segchunks) == 0:
            print 'zero-length cluster', idx
            continue
        segchunks = np.concatenate(segchunks)
        numm.np2sound(segchunks, outpattern % (int(idx)))


if __name__ == '__main__':
    import sys

    # keys = ["AvgTonalCentroid(6)", "AvgMFCC(13)", "AvgChroma(12)"]
    keys = ["AvgMFCC(13)"]
    nclusters = 36

    for src in sys.argv[1:]:
        src_np = None

        for key in keys:
            jsonfile = "%s-%s-%d.json" % (src, key, nclusters)
            if os.path.exists(jsonfile):
                continue

            if src_np is None:
                src_np = numm.sound2np(src)

            clusters = cluster(src, key=key, nbins=nclusters)
            wave(src_np, clusters,
                 "%s-%s-%d-%%06d.wav" % (src, key, nclusters))
コード例 #7
0
ファイル: dither.py プロジェクト: strob/bsides
import numm
import numpy as np

from zerocrossings import zerocrossings, next_zerocrossing

a = numm.sound2np('a.wav')
b = numm.sound2np('b.wav')

# a = np.int16(2**14 * np.sin(np.linspace(0, 2*np.pi*440, 44100)))
# b = np.int16(2**14 * np.sin(np.linspace(0, 2*np.pi*640, 44100)))

DITHER_FRAMES = 1024
next_ditherpt = 1024

dit_idx = 0
cur_snd = a

def flip():
    global cur_snd, dit_idx
    if cur_snd.data == a.data:
        cur_snd = b
    else:
        cur_snd = a

def audio_out(out):
    global dit_idx, next_ditherpt
    nrem = len(out)
    while nrem > 0:
        amnt = min(next_ditherpt-dit_idx, nrem)

        out_st = len(out)-nrem
コード例 #8
0
ファイル: mix.py プロジェクト: strob/connectomusic
import numm
import numpy as np

def mix(l, r):
    "Mix two stereo tracks"
    out = np.zeros((max(len(l), len(r)), 2), int)
    out[:len(l),0] = l[:,0]
    out[:len(l),1] = l[:,1]/2
    out[:len(r),0] += r[:,0]/2
    out[:len(r),1] += r[:,1]
    return (out / (out.max() / float(2**15-1))).astype(np.int16)

if __name__=='__main__':
    import sys
    res = mix(numm.sound2np(sys.argv[1]),
              numm.sound2np(sys.argv[2]))
    numm.np2sound(res, 'mixed.wav')
コード例 #9
0
ファイル: cluster.py プロジェクト: stymy/bsides
def wave(src_np, clusters, outpattern, R=44100):
    for idx, segs in clusters.items():
        segchunks = [src_np[int(R * st) : int(R * (st + dur))] for (st, dur, _idx) in segs]
        if len(segchunks) == 0:
            print "zero-length cluster", idx
            continue
        segchunks = np.concatenate(segchunks)
        numm.np2sound(segchunks, outpattern % (int(idx)))


if __name__ == "__main__":
    import sys

    # keys = ["AvgTonalCentroid(6)", "AvgMFCC(13)", "AvgChroma(12)"]
    keys = ["AvgMFCC(13)"]
    nclusters = 36

    for src in sys.argv[1:]:
        src_np = None

        for key in keys:
            jsonfile = "%s-%s-%d.json" % (src, key, nclusters)
            if os.path.exists(jsonfile):
                continue

            if src_np is None:
                src_np = numm.sound2np(src)

            clusters = cluster(src, key=key, nbins=nclusters)
            wave(src_np, clusters, "%s-%s-%d-%%06d.wav" % (src, key, nclusters))
コード例 #10
0
import numm
import numpy as np

from zerocrossings import zerocrossings, next_zerocrossing

a = numm.sound2np('a.wav')
b = numm.sound2np('b.wav')

# a = np.int16(2**14 * np.sin(np.linspace(0, 2*np.pi*440, 44100)))
# b = np.int16(2**14 * np.sin(np.linspace(0, 2*np.pi*640, 44100)))

DITHER_FRAMES = 1024
next_ditherpt = 1024

dit_idx = 0
cur_snd = a


def flip():
    global cur_snd, dit_idx
    if cur_snd.data == a.data:
        cur_snd = b
    else:
        cur_snd = a


def audio_out(out):
    global dit_idx, next_ditherpt
    nrem = len(out)
    while nrem > 0:
        amnt = min(next_ditherpt - dit_idx, nrem)
コード例 #11
0
ファイル: clusterplayer.py プロジェクト: stymy/bsides
import cluster
import numm
import random

R = 44100
# PADDING = R / 4                 # frames between segments
# SOURCE = 'snd/Dance_A.wav'
SOURCE = 'snd/Duran_A.wav'
NBINS = 50

cur_cluster = 0
cluster_idx = 0
paused = False
frame_idx = 0

audio = numm.sound2np(SOURCE)
clusters = cluster.cluster(SOURCE, NBINS)

for c in clusters.values():
    random.shuffle(c)


def get_segment(cluster, idx):
    idx = idx % len(clusters[cluster])
    start, duration = clusters[cluster][idx]
    return audio[int(R * start):int(R * (start + duration))]


def audio_out(a):
    global frame_idx, cluster_idx, paused
コード例 #12
0
ファイル: sndToNpy.py プロジェクト: strob/connectomusic
import numm
import numpy as np
import sys

for f in sys.argv[1:]:
    snd = numm.sound2np(f)
    np.save(f+'.npy', snd[:,0])
コード例 #13
0
ファイル: clusterplayer.py プロジェクト: strob/bsides
import cluster
import numm
import random

R = 44100
# PADDING = R / 4                 # frames between segments
# SOURCE = 'snd/Dance_A.wav'
SOURCE = 'snd/Duran_A.wav'
NBINS = 50

cur_cluster = 0
cluster_idx = 0
paused = False
frame_idx = 0

audio = numm.sound2np(SOURCE)
clusters = cluster.cluster(SOURCE, NBINS)

for c in clusters.values():
    random.shuffle(c)

def get_segment(cluster, idx):
    idx = idx % len(clusters[cluster])
    start, duration = clusters[cluster][idx]
    return audio[int(R*start):int(R*(start+duration))]

def audio_out(a):
    global frame_idx, cluster_idx, paused

    if paused:
        paused = False
コード例 #14
0
ファイル: wolftones.py プロジェクト: strob/bsides
                if amnt == len(buffers[buf_idx]):
                    #done with this buffer
                    buffers.pop(buf_idx)
                else:
                    buffers[buf_idx] = buffers[buf_idx][amnt:]
                    # # Defer frames to avoid pitch-shifting
                    # if len(buffers) > 1:
                    #     buffers[buf_idx] = np.roll(buffers[buf_idx], -amnt*len(buffers), axis=0)
                if len(buffers) == 0:
                    return out
                buf_idx = (buf_idx + 1) % len(buffers)

if __name__=='__main__':
    import numm
    import sys
    buffers = [numm.sound2np(X) for X in sys.argv[1:]]

    nframes = sum([len(X) for X in buffers])

    # freqrot = [440, 800, 1500, 300]
    freqrot = [4400, 1800, 1500, 2222]

    comp = []
    f_idx = 0
    w_len = R/4
    while nframes > 0:
        amnt = min(nframes, w_len)
        comp.append((freqrot[f_idx], amnt))
        f_idx = (f_idx + 1) % len(freqrot)
        nframes -= amnt