Exemplo n.º 1
0
def zepto(length_seconds):	
	print "zeptoify"
	
	section = False
	if section==False:
		total_duration = song.analysis.duration
		segments = song.analysis.segments
	else:
		total_duration = section.duration
		#order by length)	
		segments = song.analysis.segments.that(are_contained_by(section))
		
	length_order = segments.ordered_by(duration)
	# grab the one with the highest timbre[6] value from every 4 segment
	i = 0

	length = len(segments)
	unused_segs = audio.AudioQuantumList()
	while total_duration > length_seconds:
		print_seg_durations(length_order)

		sorted_segs = audio.AudioQuantumList()

		while i < length:
			j = 0
			four_segs = audio.AudioQuantumList()
			# append the next four segments
			while j < 4: 
				if(j+i < length):
					four_segs.append(length_order[j + i])
					j += 1
				else: 
					break
			# order the four segments by timbre value 6
			timbre_segs = four_segs.ordered_by(timbre_value(6))
			# Remove the worst candidate while the total time is less than 30secs
			for k in range(0, j-1):
				sorted_segs.append(timbre_segs[k])
			unused_segs.append(timbre_segs[j-1])

			deduction = timbre_segs[j-1].duration
			total_duration = total_duration - deduction
			if total_duration < length_seconds:
				sorted_segs.extend(length_order[i:])
				break
			i = i + 4 
		length_order = copy.copy(sorted_segs)
		length = len(length_order)
		print "I think the total duration is " + str(total_duration)
		print "However the toal duration is actually " + str(get_total_duration(length_order))
		i = 0
	fixed_order = length_order.ordered_by(duration)
	fixed_order.reverse()

	#print_seg_durations(fixed_order)
	print "total duration: " + str(total_duration)
	#return [fixed_order, unused_segs]
	return fixed_order.render()
Exemplo n.º 2
0
def get_loops(fileobj, output_name="out.mp3", bars_count=8, bars_start=1):
    print "analyzing"
    audio_file = audio.LocalAudioFile(fileobj.name)
    print "done"
    
    print "%d bars" % len(audio_file.analysis.bars)

    collect = audio.AudioQuantumList()
    
    bars = audio_file.analysis.bars
    repeats = 1
    if len(bars)-bars_start < bars_count:
        bars_count = 4
    if len(bars)-bars_start < bars_count:
        bars_count = 1

    print "actual bar count was %d" % (bars_count)
    for y in xrange(repeats):
        for x in xrange(bars_count):
            collect.append(audio_file.analysis.bars[bars_start+x])
    
    out = audio.getpieces(audio_file, collect)
    output_temp = tempfile.NamedTemporaryFile(mode="w+b", suffix=".mp3")
    out.encode(output_temp.name)
    
    # Do it again
    new_one = audio.LocalAudioFile(output_temp.name)
    analysis = json.loads(urllib.urlopen(new_one.analysis.pyechonest_track.analysis_url).read())
    
    
    return (output_temp, analysis)
Exemplo n.º 3
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = audio.AudioQuantumList()
    for bar in bars:
        collect.append(bar)
        collect.append(bar)
    out = audio.getpieces(audiofile, collect)
    out.encode(output_filename)
Exemplo n.º 4
0
def split_file_into_bars(track_name, bar_list):
    i = 0
    for bars in bar_list:
        four_bar_chunk = audio.AudioQuantumList()
        for bar in bars:
            four_bar_chunk.append(bar)
        
        audiofile = audio.LocalAudioFile("music/tracks/"+track_name+".mp3")
        out = audio.getpieces(audiofile, four_bar_chunk)
        i = i + 1
        out.encode("music/output/"+track_name+"-chunk-"+str(i))
Exemplo n.º 5
0
 def getSamples(self, section, pitch, target="beats"):
     """
         The EchoNest-y workhorse. Finds all beats/bars in a given section, of a given pitch.
     """
     sample_list = audio.AudioQuantumList()
     if target == "beats":
         sample_list.extend(
             [b for x in section.children() for b in x.children()])
     elif target == "bars":
         sample_list.extend(section.children())
     return sample_list.that(
         overlap_ends_of(
             self.original.analysis.segments.that(
                 have_pitch_max(pitch)).that(
                     overlap_starts_of(sample_list))))
Exemplo n.º 6
0
def beatrepeat(section):
	print "original with beat repeat"

	beats = song.analysis.beats.that(are_contained_by(section))
	tatums = song.analysis.beats.that(are_contained_by(section))
	br = audio.AudioQuantumList()

	for _ in range(2): 
		br.append(song[beats[0]])
		br.append(song[beats[1]])
		br.append(song[beats[2]])
		br.append(song[beats[3]])
	for _ in range(2): 
		br.append(song[beats[0]])
		br.append(song[beats[1]])
	for _ in range(2): 
		br.append(song[beats[0]])
	for _ in range(2): 
		br.append(song[tatums[0]])

	return br
Exemplo n.º 7
0
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.
    # You can manipulate this just like any other Python list!
    bars = audiofile.analysis.bars

    # This makes a new list of "AudioQuantums".
    # Those are just any discrete chunk of audio:  bars, beats, etc.
    collect = audio.AudioQuantumList()

    # This loop puts the first item in the children of each bar into the new list.
    # A bar's children are beats!  Simple as that.
    for bar in bars:
        collect.append(bar.children()[0])

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)

    # This writes the newly created audio to the given file.
    out.encode(output_filename)
Exemplo n.º 8
0
def beatrepeat_and_tempo_warp(section, bpm):
	print "beatrepeat_and_tempo_warp"
	beats = song.analysis.beats.that(are_contained_by(section))
	tatums = song.analysis.beats.that(are_contained_by(section))
	br = audio.AudioQuantumList()
	
	new_beat_duration = 60.0/bpm
	beats = song.analysis.beats.that(are_contained_by(section))
	
	new_beats = []
	for beat in beats:
		ratio = beat.duration / new_beat_duration
		new_beat = st.shiftTempo(song[beat], ratio)
		new_beats.append(new_beat)
	out = audio.assemble(new_beats)
	return out
	
	
	for _ in range(2): 
		br.append(song[beats[-4]])
		br.append(song[beats[-3]])
		br.append(song[beats[-2]])
		br.append(song[beats[-1]])
Exemplo n.º 9
0
def main(input_filename, output_filename):
    choices = 0
    song = audio.LocalAudioFile(input_filename)
    meter = song.analysis.time_signature.values()[1]
    meter_conf = song.analysis.time_signature.values()[0]
    tempo_conf = song.analysis.tempo.values()[0]
    sections = song.analysis.sections
    last_segment = song.analysis.segments[len(song.analysis.segments) - 1]
    sl = len(sections)
    print "meter confidence"
    print meter_conf
    print "meter"
    print song.analysis.time_signature
    print "number of sections"
    print sl
    outchunks = audio.AudioQuantumList()
    if (meter_conf > 0.2):
        outchunks = strong_meter(choices, song, meter, sections, sl, outchunks)
    else:
        outchunks = weak_meter(choices, song, sections, sl, outchunks)
    outchunks.append(last_segment)
    out = audio.getpieces(song, outchunks)
    out.encode(output_filename)
Exemplo n.º 10
0
def main(input_filename1, input_filename2, output_filename):
    audiofile1 = audio.LocalAudioFile(input_filename1)
    audiofile2 = audio.LocalAudioFile(input_filename2)

    beats1 = audiofile1.analysis.beats
    beats2 = audiofile2.analysis.beats

    l = min([len(beats1), len(beats2)])

    collect = audio.AudioQuantumList()
    out = None
    for i in xrange(l):
        if i % 2 == 1:
            beat = beats1[i - offset]
            next = audio.getpieces(audiofile1, [beat])
        else:
            beat = beats2[i]
            next = audio.getpieces(audiofile2, [beat])
        if out == None:
            out = next
        else:
            out.append(next)

    out.encode(output_filename)
Exemplo n.º 11
0
def main(units, inputFile, outputFile):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(inputFile)

    # This makes a new list of "AudioQuantums".
    # Those are just any discrete chunk of audio:  bars, beats, etc
    collect = audio.AudioQuantumList()

    # If the analysis can't find any bars, stop!
    # (This might happen with really ambient music)
    if not audiofile.analysis.bars:
        print "No bars found in this analysis!"
        print "No output."
        sys.exit(-1)

    # This loop puts all but the last of each bar into the new list!
    for b in audiofile.analysis.bars[0:-1]:
        collect.extend(b.children()[0:-1])

        # If we're using tatums instead of beats, we want all but the last half (round down) of the last beat
        # A tatum is the smallest rhythmic subdivision of a beat -- http://en.wikipedia.org/wiki/Tatum_grid
        if units.startswith("tatum"):
            half = -(len(b.children()[-1].children()) // 2)
            collect.extend(b.children()[-1].children()[0:half])

    # Endings were rough, so leave everything after the start of the final bar intact:
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    collect.append(last)

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)

    # This writes the newly created audio to the given file.
    out.encode(outputFile)
Exemplo n.º 12
0
	for ci in range(0,len(centroids2)):
		if ci in [li[1] for li in centroid_pairs]:
			continue
		centroid2 = array(centroids2[ci])
		euclidian_distance = norm(centroid1-centroid2)
		if euclidian_distance < min_distance[0]:
			min_distance = [euclidian_distance, ci]
	centroid_pairs.append([c,min_distance[1]])

print centroid_pairs
# now we have a list of paired up cluster indices. Cool.

# Just so we're clear, we're rebuilding the structure of song1 with segments from song2

# prepare song2 clusters, 
segclusters2 = [audio.AudioQuantumList()]*len(centroids2)
for s2 in range(0,len(idx2)):
	segment2 = song2.analysis.segments[s2]
	cluster2 = idx2[s2]
	segment2.numpytimbre = array(segment2.timbre)
	segclusters2[cluster2].append(segment2)
	
# for each segment1 in song1, find the timbrely closest segment2 in song2 belonging to the cluster2 with which segment1's cluster1 is paired. 
for s in range(0,len(idx)):
	segment1 = song.analysis.segments[s]
	cluster1 = idx[s]
	cluster2 = [li[1] for li in centroid_pairs if li[0]==cluster1][0]

	if(best_match>0):
		# slower, less varied version. Good for b's which are percussion loops
		
Exemplo n.º 13
0
    def compileSection(self, j, section, hats):
        """
            Compiles one "section" of dubstep - that is, one section (verse/chorus) of the original song,
            but appropriately remixed as dubstep.

            Chooses appropriate samples from the section of the original song in three keys (P1, m3, m7)
            then plays them back in order in the generic "dubstep" pattern (all 8th notes):

            |                         |                         :|
            |: 1  1  1  1  1  1  1  1 | m3 m3 m3 m3 m7 m7 m7 m7 :| x2
            |                         |                         :|

            On the first iteration, the dubstep bar is mixed with a "splash" sound - high-passed percussion or whatnot.
            On the second iteration, hats are mixed in on the offbeats and the wubs break on the last beat to let the
            original song's samples shine through for a second, before dropping back down in the next section.

            If samples are missing of one pitch, the searchSamples algorithm tries to find samples
            a fifth from that pitch that will sound good. (If none exist, it keeps trying, in fifths up the scale.)
            
            If the song is not 4/4, the resulting remix is sped up or slowed down by the appropriate amount.
            (That can get really wonky, but sounds cool sometimes, and fixes a handful of edge cases.)
        """
        onebar = audio.AudioQuantumList()

        s1 = self.searchSamples(j, self.tonic)
        s2 = self.searchSamples(j, (self.tonic + 3) % 12)
        s3 = self.searchSamples(j, (self.tonic + 9) % 12)

        biggest = max([s1, s2, s3])  #for music that's barely tonal
        if not biggest:
            for i in xrange(0, 12):
                biggest = self.searchSamples(j, self.tonic + i)
                if biggest:
                    break

        if not biggest:
            raise Exception('Missing samples in section %s of the song!' % j +
                            1)

        if not s1: s1 = biggest
        if not s2: s2 = biggest
        if not s3: s3 = biggest

        if self.template['target'] == "tatums":
            f = 4
            r = 2
        elif self.template['target'] == "beats":
            f = 2
            r = 2
        elif self.template['target'] == "bars":
            f = 1
            r = 1
        for k in xrange(0, r):
            for i in xrange(0, 4 * f):
                onebar.append(s1[i % len(s1)])
            for i in xrange(4 * f, 6 * f):
                onebar.append(s2[i % len(s2)])
            for i in xrange(6 * f, 8 * f):
                onebar.append(s3[i % len(s3)])
        if self.original.analysis.time_signature == 4:
            orig_bar = self.st.shiftTempo(
                audio.getpieces(self.original, onebar),
                self.template['tempo'] / self.tempo)
        else:
            orig_bar = audio.getpieces(self.original, onebar)
            orig_bar = self.st.shiftTempo(
                orig_bar,
                len(orig_bar) /
                ((44100 * 16 * 2 * 60.0) / self.template['tempo']))
        if orig_bar.numChannels == 1:
            orig_bar = self.mono_to_stereo(orig_bar)
        mixfactor = self.mixfactor(onebar)
        a = self.truncatemix(
            audio.mix(
                audio.AudioData(self.sample_path +
                                self.template['wubs'][self.tonic],
                                sampleRate=44100,
                                numChannels=2,
                                verbose=False),
                audio.AudioData(
                    self.sample_path +
                    self.template['splashes'][(j + 1) %
                                              len(self.template['splashes'])],
                    sampleRate=44100,
                    numChannels=2,
                    verbose=False)), orig_bar, mixfactor)
        b = self.truncatemix(
            audio.mix(
                audio.AudioData(self.sample_path +
                                self.template['wub_breaks'][self.tonic],
                                sampleRate=44100,
                                numChannels=2,
                                verbose=False), hats), orig_bar, mixfactor)
        return (a, b)
Exemplo n.º 14
0
    def compileIntro(self):
        """
            Compiles the dubstep introduction. Returns an AudioData of the first 8 bars.
            (8 bars at 140 bpm = ~13.71 seconds of audio)
            If song is not 4/4, tries to even things out by speeding it up by the appropriate amount.

            Pattern:
                first 4 bars of song
                first beat of 1st bar x 4   (quarter notes)
                first beat of 2nd bar x 4   (quarter notes)
                first beat of 3rd bar x 8   (eighth notes)
                first beat of 4th bar x 8   (sixteenth notes)
                third beat of 4th bar x 8   (sixteenth notes)
        """
        out = audio.AudioQuantumList()
        intro = audio.AudioData(self.sample_path + self.template['intro'],
                                sampleRate=44100,
                                numChannels=2,
                                verbose=False)

        #   First 4 bars of song
        custom_bars = []

        if not self.beats or len(self.beats) < 16:
            #   Song is not long or identifiable enough
            #   Take our best shot at making something
            self.tempo = 60.0 * 16.0 / self.original.duration
            for i in xrange(0, 4):
                bar = []
                for j in xrange(0, 4):
                    length = self.original.duration / 16.0
                    start = ((i * 4) + j) * length
                    bar.append(
                        audio.AudioQuantum(start, length, None, 0,
                                           self.original.source))
                custom_bars.append(bar)
        else:
            for i in xrange(0, 4):
                custom_bars.append(self.beats[i * 4:(i * 4) + 4])
        out.extend([x for bar in custom_bars for x in bar])

        #   First beat of first bar x 4
        for i in xrange(0, 4):
            out.append(custom_bars[0][0])

        #   First beat of second bar x 4
        for i in xrange(0, 4):
            out.append(custom_bars[1][0])

        beatone = custom_bars[2][0]
        beattwo = custom_bars[3][0]
        beatthree = custom_bars[3][2]

        #   First beat of third bar x 8
        for x in xrange(0, 8):
            out.append(
                audio.AudioQuantum(beatone.start, beatone.duration / 2, None,
                                   beatone.confidence, beatone.source))

        #   First beat of fourth bar x 8
        for x in xrange(0, 8):
            out.append(
                audio.AudioQuantum(beattwo.start, beattwo.duration / 4, None,
                                   beattwo.confidence, beattwo.source))

        #   Third beat of fourth bar x 8
        for x in xrange(0, 8):
            out.append(
                audio.AudioQuantum(beatthree.start, beatthree.duration / 4,
                                   None, beatthree.confidence,
                                   beatthree.source))

        if self.original.analysis.time_signature == 4:
            shifted = self.st.shiftTempo(audio.getpieces(self.original, out),
                                         self.template['tempo'] / self.tempo)
        else:
            shifted1 = audio.getpieces(self.original, out)
            shifted = self.st.shiftTempo(
                shifted1,
                len(shifted1) /
                ((44100 * 16 * 2 * 60.0) / self.template['tempo']))
            shifted1.unload()
        if shifted.numChannels == 1:
            shifted = self.mono_to_stereo(shifted)
        return self.truncatemix(intro, shifted, self.mixfactor(out))
Exemplo n.º 15
0
def dnbify(randombeat):
	print "dnbify"
	
	dnbfile = "mp3/breaks/RC4_Breakbeat_175 (%i).mp3" % randombeat
	dnbloop = audio.LocalAudioFile(dnbfile)
	
	# how many different groups will we cluster our data into?
	num_clusters = 5

	mix = 1.0
		
	dnbouts = []
	for layer in range(0, 2):
		# best_match = 1  # slower, less varied version. Good for b's which are percussion loops
		# best_match = 0 # faster, more varied version, picks a random segment from that cluster. Good for b's which are sample salads. 
		best_match = layer
		print "layer"
		print layer
		
		song1 = dnbloop
		song2 = song
		
		dnbout = audio.AudioData(shape=out_shape, sampleRate=sample_rate,numChannels=num_channels)
		
		# here we just grab the segments that overlap the section
		sectionsegments = song1.analysis.segments
		#for _ in range(3):
		#	sectionsegments.extend(song1.analysis.segments)
		sectionsegments2 = song2.analysis.segments #.that(overlap(section));
		

		# this is just too easy
		# song.analysis.segments.timbre is a list of all 12-valued timbre vectors. 
		# must be converted to a numpy.array() so that kmeans(data, n) is happy
		data = array(sectionsegments.timbre)
		data2 = array(sectionsegments2.timbre)
		
		"""
		# grab timbre data
		# must be converted to a numpy.array() so that kmeans(data, n) is happy
		data = array(song1.analysis.segments.timbre)
		data2 = array(song2.analysis.segments.timbre)
		"""
		
		
		# computing K-Means with k = num_clusters
		centroids,_ = kmeans(data,num_clusters)
		centroids2,_ = kmeans(data2,num_clusters)
		# assign each sample to a cluster
		idx,_ = vq(data,centroids)
		idx2,_ = vq(data2,centroids2)
		
		collection = []
		for c in range(0, num_clusters):
			ccount = 0
			for i in idx:
				if i==c: 
					ccount += 1
			collection.append([ccount, c])
		collection.sort()
		# list of cluster indices from largest to smallest

		centroid_pairs = []
		for _,c in collection:
			centroid1 = array(centroids[c])
			min_distance = [9999999999,0]
			for ci in range(0,len(centroids2)):
				if ci in [li[1] for li in centroid_pairs]:
					continue
				centroid2 = array(centroids2[ci])
				euclidian_distance = norm(centroid1-centroid2)
				if euclidian_distance < min_distance[0]:
					min_distance = [euclidian_distance, ci]
			centroid_pairs.append([c,min_distance[1]])

		print centroid_pairs
		# now we have a list of paired up cluster indices. Cool.

		# Just so we're clear, we're rebuilding the structure of song1 with segments from song2

		# prepare song2 clusters, 
		segclusters2 = [audio.AudioQuantumList()]*len(centroids2)
		for s2 in range(0,len(idx2)):
			segment2 = song2.analysis.segments[s2]
			cluster2 = idx2[s2]
			segment2.numpytimbre = array(segment2.timbre)
			segclusters2[cluster2].append(segment2)

		
		# for each segment1 in song1, find the timbrely closest segment2 in song2 belonging to the cluster2 with which segment1's cluster1 is paired. 
		for s in range(0,len(idx)):
			segment1 = song1.analysis.segments[s]
			cluster1 = idx[s]
			cluster2 = [li[1] for li in centroid_pairs if li[0]==cluster1][0]

			if(best_match>0):
				# slower, less varied version. Good for b's which are percussion loops
				
				"""
				# there's already a function for this, use that instead: timbre_distance_from
				timbre1 = array(segment1.timbre)
				min_distance = [9999999999999,0]
				for seg in segclusters2[cluster2]:
					timbre2 = seg.numpytimbre
					euclidian_distance = norm(timbre2-timbre1)
					if euclidian_distance < min_distance[0]:
						min_distance = [euclidian_distance, seg]
				bestmatchsegment2 = min_distance[1]
				# we found the segment2 in song2 that best matches segment1
				"""
				
				bestmatches = segclusters2[cluster2].ordered_by(timbre_distance_from(segment1))
				
				if(best_match > 1):
					# if best_match > 1, it randomly grabs from the top best_matches.
					maxmatches = max(best_match, len(bestmatches))
					bestmatchsegment2 = choice(bestmatches[0:maxmatches])
				else:
					# if best_match == 1, it grabs the exact best match
					bestmatchsegment2 = bestmatches[0]
			else:
				# faster, more varied version, picks a random segment from that cluster. Good for sample salads. 
				bestmatchsegment2 = choice(segclusters2[cluster2])
				
			reference_data = song1[segment1]
			segment_data = song2[bestmatchsegment2]
			
			# what to do when segments lengths aren't equal? (almost always)
			# do we add silence? or do we stretch the samples?
			add_silence = True
			
			# This is the add silence solution:
			if add_silence: 
				if reference_data.endindex > segment_data.endindex:
					# we need to add silence, because segment1 is longer
					if num_channels > 1:
						silence_shape = (reference_data.endindex,num_channels)
					else:
						silence_shape = (reference_data.endindex,)
					new_segment = audio.AudioData(shape=silence_shape,
											sampleRate=out.sampleRate,
											numChannels=segment_data.numChannels)
					new_segment.append(segment_data)
					new_segment.endindex = len(new_segment)
					segment_data = new_segment
				elif reference_data.endindex < segment_data.endindex:
					# we need to cut segment2 shorter, because segment2 is shorter
					index = slice(0, int(reference_data.endindex), 1)
					segment_data = audio.AudioData(None, segment_data.data[index], sampleRate=segment_data.sampleRate)
			else: 		  
				# TODO: stretch samples to fit.
				# haven't written this part yet.
				segment_data = segment_data

			# mix the original and the remix
			mixed_data = audio.mix(segment_data,reference_data,mix=mix)
			dnbout.append(mixed_data)
		
		dnbouts.append(dnbout)
	
	print "YEA"
	mixed_dnbouts = audio.AudioData(shape=out_shape, sampleRate=sample_rate,numChannels=num_channels)
	print len(dnbouts[0])
	print len(dnbouts[1])
	#for s in range(0,len(dnbouts[0])):
	#	print s
#		print dnbouts[0]
	#	print dnbouts[1]
	mixed_data = audio.mix(dnbouts[0], dnbouts[1], 0.5)
	mixed_dnbouts.append(mixed_data)
	print "woah"
	
	dnbrepeatout = audio.AudioData(shape=out_shape, sampleRate=sample_rate,numChannels=num_channels)
	for _ in range(4):
		dnbrepeatout.append(mixed_dnbouts)
	print "oh okay"
	return dnbrepeatout