def play(canvas, audio_file, branches, threshold):
    player = Player(audio_file)
    beats = audio_file.analysis.beats
    lastBranch = max(branches.keys())
    width = (1440 - (xpad * 2)) / float(len(beats))
    cur = drawCurrentBeat(canvas, width, 0)
    canvas.update()
    player.play(beats[0], True) #play the first beat with extra frames for intro
    bran = None
    i = 1
    while i < len(beats):
        randomInt = random.randint(0,1)
        if bran is not None:
            canvas.delete(bran)
        if cur is not None:
            canvas.delete(cur)
        if i == lastBranch or (branches.has_key(i) and randomInt == 1):
            branchTo = random.choice(branches[i])
            if i == lastBranch or (branchTo[0] < lastBranch and branchTo[1] <= threshold):
                bran = drawCurrentBranch(canvas, width, i, branchTo[0])
                i = branchTo[0]
        cur = drawCurrentBeat(canvas, width, i)
        canvas.update()
        player.play(beats[i])
        i+=1
def main():
    audiofile = audio.LocalAudioFile(file1)
    player = Player(audiofile)
    beats = audiofile.analysis.beats
    for beat in beats:
        ratio = 1.25 - ((float(beat.absolute_context()[0]) / float(len(beats))) * .5)
        print ratio
        beat_audio = beat.render()
        scaled_beat = dirac.timeScale(beat_audio.data, ratio)
        ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape,
                                     sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
        player.play_from_AudioData(ts)
    player.close_stream()
def play(canvas, audio_file, branches, threshold):
    player = Player(audio_file)
    beats = audio_file.analysis.beats
    lastBranch = max(branches.keys())
    width = (1440 - (xpad * 2)) / float(len(beats))
    cur = drawCurrentBeat(canvas, width, 0)
    canvas.update()
    player.play(beats[0],
                True)  #play the first beat with extra frames for intro
    bran = None
    i = 1
    while i < len(beats):
        randomInt = random.randint(0, 1)
        if bran != None:
            canvas.delete(bran)
        if cur != None:
            canvas.delete(cur)
        if i == lastBranch or (branches.has_key(i) and randomInt == 1):
            branchTo = random.choice(branches[i])
            if i == lastBranch or (branchTo[0] < lastBranch
                                   and branchTo[1] <= threshold):
                bran = drawCurrentBranch(canvas, width, i, branchTo[0])
                i = branchTo[0]
        cur = drawCurrentBeat(canvas, width, i)
        canvas.update()
        player.play(beats[i])
        i += 1
Exemple #4
0
def main():
    file1 = "Mp3Songs/15 Sir Duke.m4a"
    audiofile = audio.LocalAudioFile(file1)
    player = Player()
    beats = audiofile.analysis.beats
    for beat in beats:
        ratio = 1.25 - (
            (float(beat.absolute_context()[0]) / float(len(beats))) * .5)
        player.shift_tempo_and_play(beat, ratio)
    player.close_stream()
def infinite_out_of_core(curr_md5):
    audio_file = PLAYLIST_DIR + os.sep + curr_md5 + '.mp3'
    curr_local_audio = {}
    curr_aq_player = Player()
    thread = None
    try:
        curr_local_audio = get_local_audio([audio_file])
        curr_edges = get_all_edges(curr_local_audio)
        curr_start_secs = {
            curr_md5: 0,
            'total': len(curr_local_audio[curr_md5].analysis.sections)
        }
        s = get_adjacency_matrix(curr_edges, curr_start_secs, THRESHOLD)
        fig = figure()
        ax = fig.add_subplot(111)
        sim = ax.spy(s, markersize=1)
        x = sorted(curr_start_secs.values() * 2)[1:]
        y = sorted(curr_start_secs.values() * 2)[:-1]
        boundaries = [0, 0]
        boundaries[0], = plot(x, y, marker='None', linestyle='-', color='gray')
        boundaries[1], = plot(y, x, marker='None', linestyle='-', color='gray')
        branch_cursor, = plot([], [],
                              color='cyan',
                              marker='s',
                              markersize=5,
                              linestyle='-')
        last_branch_cursor, = plot([], [],
                                   color='green',
                                   marker='s',
                                   markersize=5)
        cursor, = plot([], [],
                       color='magenta',
                       marker='s',
                       markersize=5,
                       linestyle='None')
        thread = DataLoadingThread(sim, boundaries, curr_edges,
                                   curr_local_audio, curr_aq_player,
                                   curr_start_secs, curr_md5)
        dt = 0.001
        playback = Playback(curr_edges,
                            curr_local_audio,
                            curr_aq_player,
                            curr_start_secs,
                            thread=thread)
        timer = fig.canvas.new_timer(interval=dt * 1000.0)
        timer.add_callback(playback.update, cursor, branch_cursor,
                           last_branch_cursor)
        timer.start()
        thread.start()
        show()
    finally:
        print "cleaning up"
        if thread is not None:
            thread.stop()
            if thread.isAlive():
                thread.join()
        print "closing aq_player stream"
        aq_player.close_stream()
        for laf in curr_local_audio.values():
            print "unloading local audio"
            laf.unload()
def infinite_playlist(playlist_name, playlist_directory=None):
    all_edges_file = PLAYLIST_DIR + "/" + playlist_name + ".play.pkl"
    all_edges = None
    if os.path.isfile(all_edges_file):
        print "loading playlist edges"
        with open(all_edges_file, 'rb') as input_:
            all_edges = pickle.load(input_)
        all_songs = [
            PLAYLIST_DIR + os.sep + md5 + '.mp3' for md5 in all_edges.keys()
        ]
    else:
        all_songs = get_all_songs(playlist_directory)
    print len(all_songs), "songs"
    aq_player = Player()
    local_audio = {}
    try:
        local_audio = get_local_audio(all_songs)
        start_secs = get_start_secs(local_audio)
        print start_secs['total'], "total sections"
        if not os.path.isfile(all_edges_file):
            all_edges = get_all_edges(local_audio)
            with open(all_edges_file, 'wb') as output:
                pickle.dump(all_edges, output)
        total_edges = 0
        for song_i in all_edges.keys():
            song_i_edges = all_edges[song_i]
            for sec_i in song_i_edges.keys():
                song_i_sec_i_edges = song_i_edges[sec_i]
                for _, song_j, sec_j, ts_j, ps_j in song_i_sec_i_edges:
                    total_edges += 1
        print total_edges, "total edges"
        s = get_adjacency_matrix(all_edges, start_secs, THRESHOLD)
        fig = figure()
        ax = fig.add_subplot(111)
        ax.spy(s, markersize=1)
        x = sorted(start_secs.values() * 2)[1:]
        y = sorted(start_secs.values() * 2)[:-1]
        boundaries = [0, 0]
        boundaries[0], = plot(x, y, marker='None', linestyle='-', color='gray')
        boundaries[1], = plot(y, x, marker='None', linestyle='-', color='gray')
        branch_cursor, = plot([], [],
                              color='cyan',
                              marker='s',
                              markersize=5,
                              linestyle='-')
        last_branch_cursor, = plot([], [],
                                   color='green',
                                   marker='s',
                                   markersize=5)
        cursor, = plot([], [],
                       color='magenta',
                       marker='s',
                       markersize=5,
                       linestyle='None')
        dt = 0.001
        playback = Playback(all_edges, local_audio, aq_player, start_secs)
        timer = fig.canvas.new_timer(interval=dt * 1000.0)
        timer.add_callback(playback.update, cursor, branch_cursor,
                           last_branch_cursor)
        timer.start()
        show()
    finally:
        print "cleaning up"
        print "closing aq_player stream"
        aq_player.close_stream()
        for laf in local_audio.values():
            print "unloading local audio"
            laf.unload()
def play(lafs, branches, canvases):
    """
    Determines the path the music takes by choosing songs, branches and beats.
    Most of the code is just updating variables:
        curr_song - current song being played (md5)
        curr_laf - current LocalAudioFile being used
        curr_beat - current beat being played
        curr_canvas - current canvas for the current song
        curr_width - current width of blue cursor
        next_branch - beat index for next transition to take place
        new_song - next song to be played (determined by the next branch to be taken)
        new_beat - where the next transition goes to in the next song
        cursor - blue bar that is updated on the canvas to show the song's current position
        bran - blue arc that is drawn on canvas to indicate a branch being taken
    These variables must be updated every time a transition is taken. Some must be updated with every beat.
    One day I may consolidate multiple dictionaries into one, using tuples to make the code neater.
    """
    player = Player()
    curr_song = random.choice(branches.keys())
    curr_canvas = canvases[curr_song]
    curr_canvas.pack()
    curr_canvas.update()
    curr_laf = lafs[curr_song][0]
    curr_width = (1440 - (xpad * 2)) / float(len(curr_laf.analysis.beats))
    curr_beat = 0
    next_branch = -1
    new_song = -1
    new_beat = -1
    cursor = None
    bran = None
    while True:
        try:
            # deletes last cursor / branch if necessary
            if cursor is not None:
                curr_canvas.delete(cursor)
            if bran is not None:
                curr_canvas.delete(bran)

            # looks for next branch if one hasn't been chosen
            if (curr_beat + LOOK_AHEAD
                ) in branches[curr_song].keys() and next_branch < 0:
                distance, new_song, new_beat = random.choice(
                    branches[curr_song][curr_beat + LOOK_AHEAD])
                if should_branch(
                        new_song, curr_song,
                        curr_laf.analysis.beats[curr_beat +
                                                LOOK_AHEAD].local_context()[0],
                        distance):
                    next_branch = curr_beat + LOOK_AHEAD

            # branches either within or out of the current song
            if curr_beat == next_branch:
                if new_song == curr_song:
                    bran = draw_current_branch(curr_canvas, curr_width,
                                               curr_beat, new_beat)
                    curr_beat = new_beat
                else:
                    curr_song = new_song
                    curr_beat = new_beat
                    curr_laf = lafs[curr_song][0]
                    curr_width = (1440 - (xpad * 2)) / float(
                        len(curr_laf.analysis.beats))
                    curr_canvas.forget()
                    curr_canvas = canvases[curr_song]
                    curr_canvas.pack()
                    curr_canvas.update()
                next_branch = -1
            cursor = draw_current_beat(curr_canvas, curr_width, curr_beat)
            curr_canvas.update()

            # plays current beat and shifts tempo first if desired
            if SHIFT_TEMP and next_branch > -1 and new_song != curr_song:
                curr_tempo = curr_laf.analysis.tempo
                new_tempo = lafs[new_song][0].analysis.tempo
                tempo_diff = (curr_tempo['value'] - new_tempo['value'])
                if tempo_diff > 20:
                    tempo_diff = 20  # anything more seems to speed up/slow down too much
                if tempo_diff < -20:
                    tempo_diff = -20  # anything more seems to speed up/slow down too much
                adjusted_diff = (tempo_diff /
                                 LOOK_AHEAD) * (LOOK_AHEAD -
                                                (next_branch - curr_beat) + 1)
                adjusted_diff *= new_tempo[
                    'confidence']  # I did this because transitions were too extreme
                ratio = curr_tempo['value'] / (curr_tempo['value'] -
                                               adjusted_diff)
                player.shift_tempo_and_play(curr_laf.analysis.beats[curr_beat],
                                            ratio)
            else:
                player.play(curr_laf.analysis.beats[curr_beat])
            curr_beat += 1

        # used for when a song reaches it's end and a new song must be chosen
        except IndexError:
            curr_song = random.choice(lafs.keys())
            curr_beat = 0
            curr_laf = lafs[curr_song][0]
            curr_width = (1440 -
                          (xpad * 2)) / float(len(curr_laf.analysis.beats))
            if cursor is not None:
                curr_canvas.delete(cursor)
            curr_canvas.forget()
            curr_canvas = canvases[curr_song]
            curr_canvas.pack()
            curr_canvas.update()
import echonest.remix.audio as audio
from aqplayer import Player

audio_file = audio.LocalAudioFile("lateralus30.wav")
bars = audio_file.analysis.bars

# creates a Player given an 'echonest.remix.audio.LocalAudioFile'
aq_player = Player(audio_file)

for bar in bars:
    # give play() any 'echonest.remix.audio.AudioQuantum' to be played (section, bar, beat, etc...)
    aq_player.play(bar)

# close the audiostream when done
aq_player.close_stream()
			elif j != len(sections) - 1 and k != len(sections) - 1:
				adjlists[j].append(k)
			k = k + 1
		j = j + 1
	collect = []
	duration = 0
	i = 0
	secdur = sections[i].duration
	while duration + secdur < length:
		collect.append(sections[i])
		duration = duration + secdur
		newi = random.choice(adjlists[i+1])
		while len(adjlists[newi+1]) == 0:
			newi = random.choice(adjlists[i+1])
		i = newi
		secdur = sections[i].duration
	collect.append(sections[2].children()[-1].children()[-1].children()[-1])
	collect.append(sections[3].children()[0].children()[0].children()[0])
	for section in collect:
		aqp.play(section)

if __name__ == '__main__':
	try:
		length = float(sys.argv[1])
	except:
		print usage
		sys.exit(-1)
	fwiw = audio.LocalAudioFile('FWIW.mp3')
	aqp = Player(fwiw)
	main(fwiw, length*60.0, aqp)
	aqp.closeStream()
Exemple #10
0
def main(input1, input2, input3, time):
    threshold = 250
    track1 = audio.LocalAudioFile(input1)
    track2 = audio.LocalAudioFile(input2)
    track3 = audio.LocalAudioFile(input3)
    fsegs = [[], [], []]
    segments = [
        track1.analysis.segments, track2.analysis.segments,
        track3.analysis.segments
    ]
    sections = [
        track1.analysis.sections, track2.analysis.sections,
        track3.analysis.sections
    ]
    for i in range(len(sections)):
        j = 0
        for section in sections[i]:
            while segments[i][j].start + segments[i][
                    j].duration < section.start:
                j += 1
            fsegs[i].append(segments[i][j])
    adjlists = [[], [], []]
    for i in range(len(sections)):
        for section in sections[i]:
            adjlists[i].append([])
    for i in range(len(sections)):
        for j in range(len(sections[i])):
            adjlists[i][j].append([i, j, 1.0, 0])
            p1, t1, lb1, lm1, d1 = fsegs[i][j].pitches, fsegs[i][
                j].timbre, fsegs[i][j].loudness_begin, fsegs[i][
                    j].loudness_max, fsegs[i][j].duration
            tp1, k1, m1, ts1, ln1 = sections[i][j].tempo, sections[i][
                j].key, sections[i][j].mode, sections[i][
                    j].time_signature, sections[i][j].loudness
            compoundUp1 = len(sections[i][j].children()[0].children()) % 3 == 0
            compoundDown1 = len(
                sections[i][j].children()[0].children()[0].children()) % 3 == 0
            while tp1 >= 120:
                tp1 = tp1 / (3 if compoundUp1 else 2)
            while tp1 < 40:
                tp1 = tp1 * (3 if compoundDown1 else 2)
            if tp1 < 60:
                tp1 = tp1 * 2
            for k in range(j + 1, len(sections[i])):
                p2, t2, lb2, lm2, d2 = fsegs[i][k].pitches, fsegs[i][
                    k].timbre, fsegs[i][k].loudness_begin, fsegs[i][
                        k].loudness_max, fsegs[i][k].duration
                tp2, k2, m2, ts2, ln2 = sections[i][k].tempo, sections[i][
                    k].key, sections[i][k].mode, sections[i][
                        k].time_signature, sections[i][k].loudness
                compoundUp2 = len(
                    sections[i][k].children()[0].children()) % 3 == 0
                compoundDown2 = len(sections[i][k].children()[0].children()
                                    [0].children()) % 3 == 0
                while tp2 >= 120:
                    tp2 = tp2 / (3 if compoundUp2 else 2)
                while tp2 < 40:
                    tp2 = tp2 * (3 if compoundDown2 else 2)
                if tp2 < 60:
                    tp2 = tp2 * 2
                dp, dt, db, dm, dd = 0, 0, abs(lb2 - lb1), abs(lm2 -
                                                               lm1), abs(d2 -
                                                                         d1)
                dtp, dk, dm, dts, dln = abs(tp2 - tp1), k2 - k1, abs(
                    m2 - m1), 1 / fractions.gcd(ts2, ts1), abs(ln2 - ln1)
                if dk < 6:
                    dk += 12
                if dk > 6:
                    dk -= 12
                for l in range(12):
                    dp = dp + (p2[l] - p1[l])**2
                    dt = dt + (t2[l] - t1[l])**2
                dp = dp**0.5
                pshift = False
                if dk != 0:
                    dps = 0
                    for l in range(12):
                        dps = dps + (p2[(i - dk) % 12] - p1[i])**2
                    dps = dps**0.5
                    if dps < dp:
                        dp = dps
                        pshift = True
                dist = dp * 10 + (dt**
                                  0.5) + db + dm + dd * 100 + dtp * 10 + abs(
                                      dk) * 10 + dm + dts + dln
                if dist < threshold:
                    adjlists[i][j].append(
                        [i, k, tp2 / tp1 * 1.0, (dk if pshift else 0)])
                    adjlists[i][k].append(
                        [i, j, tp1 / tp2 * 1.0, (-dk if pshift else 0)])
    for i1 in range(len(sections)):
        for i2 in range(i1 + 1, len(sections)):
            for j in range(len(sections[i1])):
                p1, t1, lb1, lm1, d1 = fsegs[i1][j].pitches, fsegs[i1][
                    j].timbre, fsegs[i1][j].loudness_begin, fsegs[i1][
                        j].loudness_max, fsegs[i1][j].duration
                tp1, k1, m1, ts1, ln1 = sections[i1][j].tempo, sections[i1][
                    j].key, sections[i1][j].mode, sections[i1][
                        j].time_signature, sections[i1][j].loudness
                compoundUp1 = len(
                    sections[i][j].children()[0].children()) % 3 == 0
                compoundDown1 = len(sections[i][j].children()[0].children()
                                    [0].children()) % 3 == 0
                while tp1 >= 120:
                    tp1 = tp1 / (3 if compoundUp1 else 2)
                while tp1 < 40:
                    tp1 = tp1 * (3 if compoundDown1 else 2)
                if tp1 < 60:
                    tp1 = tp1 * 2
                for k in range(len(sections[i2])):
                    p2, t2, lb2, lm2, d2 = fsegs[i2][k].pitches, fsegs[i2][
                        k].timbre, fsegs[i2][k].loudness_begin, fsegs[i2][
                            k].loudness_max, fsegs[i2][k].duration
                    tp2, k2, m2, ts2, ln2 = sections[i2][k].tempo, sections[
                        i2][k].key, sections[i2][k].mode, sections[i2][
                            k].time_signature, sections[i2][k].loudness
                    compoundUp2 = len(
                        sections[i][k].children()[0].children()) % 3 == 0
                    compoundDown2 = len(sections[i][k].children()
                                        [0].children()[0].children()) % 3 == 0
                    while tp2 >= 120:
                        tp2 = tp2 / (3 if compoundUp2 else 2)
                    while tp2 < 40:
                        tp2 = tp2 * (3 if compoundDown2 else 2)
                    if tp2 < 60:
                        tp2 = tp2 * 2
                    dp, dt, db, dm, dd = 0, 0, abs(lb2 -
                                                   lb1), abs(lm2 -
                                                             lm1), abs(d2 - d1)
                    dtp, dk, dm, dts, dln = abs(tp2 - tp1), k2 - k1, abs(
                        m2 - m1), 1 / fractions.gcd(ts2, ts1), abs(ln2 - ln1)
                    if dk < 6:
                        dk += 12
                    if dk > 6:
                        dk -= 12
                    for l in range(12):
                        dp = dp + (p2[l] - p1[l])**2
                        dt = dt + (t2[l] - t1[l])**2
                    dp = dp**0.5
                    pshift = False
                    if dk != 0:
                        dps = 0
                        for l in range(12):
                            dps = dps + (p2[(i - dk) % 12] - p1[i])**2
                        dps = dps**0.5
                        if dps < dp:
                            dp = dps
                            pshift = True
                    dist = dp * 10 + (
                        dt**0.5) + db + dm + dd * 100 + dtp * 10 + abs(
                            dk) * 10 + dm + dts + dln
                    if dist < threshold:
                        adjlists[i1][j].append(
                            [i2, k, tp2 / tp1 * 1.0, (dk if pshift else 0)])
                        adjlists[i2][k].append(
                            [i1, j, tp1 / tp2 * 1.0, (-dk if pshift else 0)])
    collect = []
    duration = 0
    i = [random.choice([0, 1, 2]), 0, 1.0, 0]
    secdur = sections[i[0]][i[1]].duration
    newi = i
    curr_pshift = 0
    while duration + secdur < time:
        if i[1] + 1 == len(sections[i[0]]):
            newi = [random.choice([0, 1, 2]), 0, 1, 0]
            pshift = 0
        else:
            newi = random.choice(adjlists[i[0]][i[1] + 1])
            pshift = newi[3] - curr_pshift
        tshift = newi[2]
        if pshift > 6:
            pshift -= 12
        if pshift < -6:
            pshift += 12
        for bar in sections[i[0]][i[1]].children():
            if bar == sections[i[0]][i[1]].children()[-1]:
                numbeats = len(bar.children())
                stepratio = tshift**(1.0 / numbeats)
                for i, beat in enumerate(bar.children()):
                    collect.append([beat, stepratio**i, curr_pshift])
            else:
                for beat in bar.children():
                    collect.append([beat, 1.0, curr_pshift])
        duration = duration + secdur
        i = newi
        secdur = sections[i[0]][i[1]].duration
        curr_pshift = pshift
    aqp = Player()
    for beat in collect:
        aqp.shift_and_play(beat[0], beat[1], beat[2])
    aqp.close_stream()
def main(input_one, input_two):
	track_one = audio.LocalAudioFile(input_one)
	track_two = audio.LocalAudioFile(input_two)
	section_one = track_one.analysis.sections[0]
	section_two = track_two.analysis.sections[-1]
	tempo_one = section_one.tempo
	tempo_two = section_two.tempo
	tempo_diff = tempo_two - tempo_one
	bars_one = section_one.children()
	collect = []
	for bar in bars_one:
		if bar == bars_one[-1]:
			numbeats = len(bar.children())
			step = tempo_diff/numbeats
			for i, beat in enumerate(bar.children()):
				beat_audio = beat.render()
				ratio = (tempo_one + step*(i+1))/(tempo_one + step*i)
				scaled_beat = dirac.timeScale(beat_audio.data, ratio)
				new_beat = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, sampleRate=track_one.sampleRate, numChannels=scaled_beat.shape[1])
				collect.append(new_beat)
			break
		for beat in bar.children():
			collect.append(beat.render())
	out_data_one = audio.assemble(collect, numChannels=2)
	out_name_one = input_one.split('.')[0]+'-stretch.mp3'
	out_data_one.encode(out_name_one)
	play_one = audio.LocalAudioFile(out_name_one)
	aqp_one = Player(play_one)
	aqp_two = Player(track_two)
	beats_one = play_one.analysis.beats
	for beat in beats_one:
		aqp_one.play(beat)
	aqp_one.closeStream()
	aqp_two.play(section_two)
	aqp_two.closeStream()
Exemple #12
0
def infinite_out_of_core(curr_md5):
    from aqplayer import Player
    audio_file = PLAYLIST_DIR + os.sep + curr_md5 + '.mp3'
    curr_local_audio = {}
    curr_aq_players = {}
    thread = None
    try:
        curr_local_audio = get_local_audio([audio_file])
        curr_edges = get_all_edges(curr_local_audio)
        for md5, laf in curr_local_audio.iteritems():
            curr_aq_players[md5] = Player(laf)

        curr_start_beats = {
            curr_md5: 0,
            'total': len(curr_local_audio[curr_md5].analysis.beats)
        }

        s = get_adjacency_matrix(curr_edges, curr_start_beats, THRESHOLD)

        from matplotlib.pyplot import figure, plot, show
        fig = figure()
        ax = fig.add_subplot(111)
        sim = ax.spy(s, markersize=1)

        x = sorted(curr_start_beats.values() * 2)[1:]
        y = sorted(curr_start_beats.values() * 2)[:-1]
        boundaries = [0, 0]
        boundaries[0], = plot(x, y, marker='None', linestyle='-', color='gray')
        boundaries[1], = plot(y, x, marker='None', linestyle='-', color='gray')
        branch_cursor, = plot([], [],
                              color='cyan',
                              marker='s',
                              markersize=5,
                              linestyle='-')
        last_branch_cursor, = plot([], [],
                                   color='green',
                                   marker='s',
                                   markersize=5)
        cursor, = plot([], [],
                       color='magenta',
                       marker='s',
                       markersize=5,
                       linestyle='None')

        # create a thread to start loading new data
        from re import search, match
        thread = DataLoadingThread(sim, boundaries, curr_edges,
                                   curr_local_audio, curr_aq_players,
                                   curr_start_beats, curr_md5)

        # start playing
        # start_md5 = u'0bda1f637253fdeb3cd8e4fb7a3f3683'
        playback = Playback(curr_edges,
                            curr_local_audio,
                            curr_aq_players,
                            curr_start_beats,
                            thread=thread)
        timer = fig.canvas.new_timer(interval=DT * 1000.0)
        timer.add_callback(playback.update, cursor, branch_cursor,
                           last_branch_cursor)

        timer.start()
        thread.start()

        show()

    finally:
        print "cleaning up"
        if thread is not None:
            thread.stop()
            if thread.isAlive():
                thread.join()
        for player in curr_aq_players.values():
            print "closing aq_player stream"
            player.close_stream()
        for laf in curr_local_audio.values():
            print "unloading local audio"
            laf.unload()
Exemple #13
0
    def run(self):
        from aqplayer import Player
        from scipy.sparse import find
        from re import match, search
        print "thread is running"
        while True:
            edge_files = [
                f for f in os.listdir(PLAYLIST_DIR)
                if search(r"" + self.curr_md5 + ".*\.edges.pkl", f) is not None
            ]
            edge_files = edge_files[:50]
            for edge_file in edge_files:
                print "load edge_file:", edge_file
                new_md5 = None
                m = match(r"" + self.curr_md5 + "_([a-z0-9]{32})", edge_file)
                if m is not None:
                    new_md5 = m.group(1)
                m = match(r"([a-z0-9]{32})_" + self.curr_md5, edge_file)
                if m is not None:
                    new_md5 = m.group(1)

                # skip if the new_md5 is the current one.
                if new_md5 == self.curr_md5:
                    continue

                audio_file = PLAYLIST_DIR + os.sep + new_md5 + '.mp3'
                self.local_audio[new_md5] = audio.LocalAudioFile(audio_file)

                new_edges = get_edges(self.local_audio[self.curr_md5],
                                      self.local_audio[new_md5])
                update_all_edges(self.edges, new_edges)

                new_edges = get_edges(self.local_audio[new_md5],
                                      self.local_audio[new_md5])
                update_all_edges(self.edges, new_edges)

                new_edges = get_edges(self.local_audio[new_md5],
                                      self.local_audio[self.curr_md5])
                update_all_edges(self.edges, new_edges)

                self.aq_players[new_md5] = Player(self.local_audio[new_md5])
                self.start_beats[new_md5] = self.start_beats['total']
                self.start_beats['total'] += len(
                    self.local_audio[new_md5].analysis.beats)
                s = get_adjacency_matrix(self.edges, self.start_beats,
                                         THRESHOLD)
                fs = find(s)

                # update sim
                self.sim.set_data(fs[0], fs[1])
                self.sim.figure.gca().set_xlim([0, self.start_beats['total']])
                self.sim.figure.gca().set_ylim([self.start_beats['total'], 0])

                # update boundaries
                x = sorted(self.start_beats.values() * 2)[1:]
                y = sorted(self.start_beats.values() * 2)[:-1]
                self.boundaries[0].set_xdata(x)
                self.boundaries[0].set_ydata(y)
                self.boundaries[1].set_xdata(y)
                self.boundaries[1].set_ydata(x)

                print "************** REDRAW SELF-SIMILARITY ********************"
                self.sim.figure.canvas.draw()

                # song change, better stop this.
                if self.ejecting() or self.stopping():
                    break

            # wait for a song change
            self._ejecting.wait()

            if self.stopping():
                break

            # update the stuffs.
            self.update()
            self._ejecting.clear()
Exemple #14
0
def infinite_playlist(playlist_name, playlist_directory=None):
    all_edges_file = PLAYLIST_DIR + "/" + playlist_name + ".play.pkl"
    all_edges = None
    if os.path.isfile(all_edges_file):
        print "loading playlist edges"
        with open(all_edges_file, 'rb') as input_:
            all_edges = pickle.load(input_)
        all_songs = [
            PLAYLIST_DIR + os.sep + md5 + '.mp3' for md5 in all_edges.keys()
        ]
    else:
        all_songs = get_all_songs(playlist_directory)

    print len(all_songs), "songs"

    aq_players = {}
    local_audio = {}
    try:
        local_audio = get_local_audio(all_songs)
        start_beats = get_start_beats(local_audio)
        print start_beats['total'], "total beats"

        if not os.path.isfile(all_edges_file):
            all_edges = get_all_edges(local_audio)
            with open(all_edges_file, 'wb') as output:
                pickle.dump(all_edges, output)
        """
        # for debugging
        import json
        with open('all_edges.json', 'w') as output:
            json.dump(all_edges, output)
        """

        total_edges = 0
        for song_i in all_edges.keys():
            song_i_edges = all_edges[song_i]
            for beat_i in song_i_edges.keys():
                song_i_beat_i_edges = song_i_edges[beat_i]
                for _, song_j, beat_j in song_i_beat_i_edges:
                    total_edges += 1
        print total_edges, "total edges"

        s = get_adjacency_matrix(all_edges, start_beats, THRESHOLD)

        for md5, laf in local_audio.iteritems():
            aq_players[md5] = Player(laf)

        from matplotlib.pyplot import figure, plot, show
        fig = figure()
        ax = fig.add_subplot(111)
        ax.spy(s, markersize=1)
        # plot lines around song boundaries
        x = sorted(start_beats.values() * 2)[1:]
        y = sorted(start_beats.values() * 2)[:-1]
        boundaries = [0, 0]
        boundaries[0], = plot(x, y, marker='None', linestyle='-', color='gray')
        boundaries[1], = plot(y, x, marker='None', linestyle='-', color='gray')

        branch_cursor, = plot([], [],
                              color='cyan',
                              marker='s',
                              markersize=5,
                              linestyle='-')
        last_branch_cursor, = plot([], [],
                                   color='green',
                                   marker='s',
                                   markersize=5)
        cursor, = plot([], [],
                       color='magenta',
                       marker='s',
                       markersize=5,
                       linestyle='None')

        # start playing
        # start_md5 = u'0bda1f637253fdeb3cd8e4fb7a3f3683'
        playback = Playback(all_edges, local_audio, aq_players, start_beats)
        timer = fig.canvas.new_timer(interval=DT * 1000.0)
        timer.add_callback(playback.update, cursor, branch_cursor,
                           last_branch_cursor)
        timer.start()
        show()

    finally:
        print "cleaning up"
        for player in aq_players.values():
            print "closing aq_player stream"
            player.close_stream()

        for laf in local_audio.values():
            print "unloading local audio"
            laf.unload()