コード例 #1
0
ファイル: main.py プロジェクト: ironmann250/AutoDJ
def mashability(song1, song2):
    """
	Returns how well song1 transitions into song2 using cosine matrix similarity
	and FFT semitone bin approximation matrices
	"""
    # If the tempo differs by more than thirty then we should never make that transition
    if abs(song1.bpm - song2.bpm) > 30:
        return 1
    sample_length = MIX_LENGTH  #beats per sample
    beats1 = song1.AudioFile.analysis.beats[song1.mix_out:song1.mix_out +
                                            sample_length]
    beats2 = song2.AudioFile.analysis.beats[song1.mix_in:song1.mix_in +
                                            sample_length]
    data1 = audio.getpieces(song1.AudioFile, beats1)
    data2 = audio.getpieces(song2.AudioFile, beats2)
    data1.encode("temp1.mp3")
    data2.encode("temp2.mp3")
    y1, sr1 = librosa.load("temp1.mp3")
    y2, sr2 = librosa.load("temp2.mp3")
    S1 = np.abs(librosa.stft(y1, n_fft=4096))
    chroma1 = librosa.feature.chroma_stft(S=S1, sr=sr1)
    S2 = np.abs(librosa.stft(y2, n_fft=4096))
    chroma2 = librosa.feature.chroma_stft(S=S2, sr=sr2)
    # im = librosa.display.specshow(chroma1,x_axis = "time",y_axis = "chroma")
    # im2 = librosa.display.specshow(chroma2,x_axis = "time",y_axis = "chroma")
    # plt.show()
    orthogonal_arr = []
    for i in range(min(chroma1.shape[1], chroma2.shape[1])):
        orthogonal_arr.append(dst.cosine(chroma1[:, i], chroma2[:, i]))
    return sum(orthogonal_arr) / len(orthogonal_arr)
コード例 #2
0
ファイル: main.py プロジェクト: vivjay30/AutoDJ
def renderList(songList, outFile):
    """
	Takes a list of songs and outputs them to outFile
	Has to beatmatch and cross fade
	Assumes songList >= 2
	"""
    mixOutSeg = None
    currAudio = None
    prevSong = None
    for i in range(len(songList)):
        currSong = songList[i]
        currBeats = currSong.AudioFile.analysis.beats
        # This happens on the first iteration, nothing to mix in so just play until mix out
        if not mixOutSeg:
            currAudio = audio.getpieces(currSong.AudioFile, currBeats[currSong.mix_in : currSong.mix_out])
        else:
            mixInSeg = currBeats[currSong.mix_in : currSong.mix_in + MIX_LENGTH]
            transitionSeg = makeTransition(mixInSeg, mixOutSeg, currSong, prevSong)
            transitionSeg.encode("outfiles/transition.mp3")
            mainSeg = audio.getpieces(currSong.AudioFile, currBeats[currSong.mix_in + MIX_LENGTH : currSong.mix_out])
            currAudio = audio.assemble([currAudio, transitionSeg, mainSeg])
        mixOutSeg = currBeats[currSong.mix_out : currSong.mix_out + MIX_LENGTH]
        prevSong = currSong

        # currAudio = audio.assemble([currAudio, mixOutSeg])
    currAudio.encode(outFile)
コード例 #3
0
ファイル: main.py プロジェクト: vivjay30/AutoDJ
def mashability(song1, song2):
    """
	Returns how well song1 transitions into song2 using cosine matrix similarity
	and FFT semitone bin approximation matrices
	"""
    # If the tempo differs by more than thirty then we should never make that transition
    if abs(song1.bpm - song2.bpm) > 30:
        return 1
    sample_length = MIX_LENGTH  # beats per sample
    beats1 = song1.AudioFile.analysis.beats[song1.mix_out : song1.mix_out + sample_length]
    beats2 = song2.AudioFile.analysis.beats[song1.mix_in : song1.mix_in + sample_length]
    data1 = audio.getpieces(song1.AudioFile, beats1)
    data2 = audio.getpieces(song2.AudioFile, beats2)
    data1.encode("temp1.mp3")
    data2.encode("temp2.mp3")
    y1, sr1 = librosa.load("temp1.mp3")
    y2, sr2 = librosa.load("temp2.mp3")
    S1 = np.abs(librosa.stft(y1, n_fft=4096))
    chroma1 = librosa.feature.chroma_stft(S=S1, sr=sr1)
    S2 = np.abs(librosa.stft(y2, n_fft=4096))
    chroma2 = librosa.feature.chroma_stft(S=S2, sr=sr2)
    # im = librosa.display.specshow(chroma1,x_axis = "time",y_axis = "chroma")
    # im2 = librosa.display.specshow(chroma2,x_axis = "time",y_axis = "chroma")
    # plt.show()
    orthogonal_arr = []
    for i in range(min(chroma1.shape[1], chroma2.shape[1])):
        orthogonal_arr.append(dst.cosine(chroma1[:, i], chroma2[:, i]))
    return sum(orthogonal_arr) / len(orthogonal_arr)
コード例 #4
0
ファイル: main.py プロジェクト: ironmann250/AutoDJ
def renderList(songList, outFile):
    """
	Takes a list of songs and outputs them to outFile
	Has to beatmatch and cross fade
	Assumes songList >= 2
	"""
    mixOutSeg = None
    currAudio = None
    prevSong = None
    for i in range(len(songList)):
        currSong = songList[i]
        currBeats = currSong.AudioFile.analysis.beats
        # This happens on the first iteration, nothing to mix in so just play until mix out
        if not mixOutSeg:
            currAudio = audio.getpieces(
                currSong.AudioFile,
                currBeats[currSong.mix_in:currSong.mix_out])
        else:
            mixInSeg = currBeats[currSong.mix_in:currSong.mix_in + MIX_LENGTH]
            transitionSeg = makeTransition(mixInSeg, mixOutSeg, currSong,
                                           prevSong)
            transitionSeg.encode("outfiles/transition.mp3")
            mainSeg = audio.getpieces(
                currSong.AudioFile,
                currBeats[currSong.mix_in + MIX_LENGTH:currSong.mix_out])
            currAudio = audio.assemble([currAudio, transitionSeg, mainSeg])
        mixOutSeg = currBeats[currSong.mix_out:currSong.mix_out + MIX_LENGTH]
        prevSong = currSong

    # currAudio = audio.assemble([currAudio, mixOutSeg])
    currAudio.encode(outFile)
コード例 #5
0
  def write_sample(self, path, prefix):
    suffix = 0
    if self.samples is not None:
      for sample in self.samples:
	audio.getpieces(self.audio_file, [sample]).encode(path+prefix+str(suffix)+".wav")
	suffix += 1
    else:
      print "Analyze first"
コード例 #6
0
ファイル: main.py プロジェクト: ironmann250/AutoDJ
def makeTransition(inData, outData, inSong, outSong):
    """
	Takes two arrays of AudioQuantum objects and returns a single AudioData object
	with a linear crossfade and a beatmatch
	"""
    # The number of beats has to be the same
    assert (len(inData) == len(outData))

    # If the tempos are the same then it is easy
    if inSong.bpm == outSong.bpm:
        mixInSeg = audio.getpieces(inSong.AudioFile, inData)
        mixOutSeg = audio.getpieces(outSong.AudioFile, outData)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transition_length = 60.0 / 128.0 * MIX_LENGTH
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0],
                                         transition_length,
                                         mode="linear").render()
        return transitionSeg

    # Else we iterate over each one
    else:
        transitionTime = 0
        tempoDiff = inSong.bpm - outSong.bpm
        marginalInc = float(tempoDiff) / float(MIX_LENGTH)
        inCollect = []
        outCollect = []
        # Rather than a linear slowdown, it is a step function where each beat slows down by marginalInc
        for i in range(MIX_LENGTH):
            inAudio = inData[i].render()
            outAudio = outData[i].render()
            # We scale the in and out beats so that they are at the same temp
            inScaled = dirac.timeScale(
                inAudio.data, inSong.bpm / (outSong.bpm + i * marginalInc))
            outScaled = dirac.timeScale(
                outAudio.data, outSong.bpm / (outSong.bpm + i * marginalInc))
            transitionTime += 60 / (outSong.bpm + i * marginalInc)
            ts = audio.AudioData(ndarray=inScaled,
                                 shape=inScaled.shape,
                                 sampleRate=inSong.AudioFile.sampleRate,
                                 numChannels=inScaled.shape[1])
            ts2 = audio.AudioData(ndarray=outScaled,
                                  shape=outScaled.shape,
                                  sampleRate=outSong.AudioFile.sampleRate,
                                  numChannels=outScaled.shape[1])
            inCollect.append(ts)
            outCollect.append(ts2)
        # Collect the audio and crossfade it
        mixInSeg = audio.assemble(inCollect, numChannels=2)
        mixOutSeg = audio.assemble(outCollect, numChannels=2)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0],
                                         transitionTime,
                                         mode="exponential").render()
        return transitionSeg
コード例 #7
0
    def __init__(self, mp3):
        self.mp3 = mp3
        self.audio_file = audio.LocalAudioFile(self.mp3)
        self.analysis = self.audio_file.analysis
        self.beats = self.analysis.beats
        self.beats.reverse()

        #print self.audio_file.analysis.id
        print audio
        audio.getpieces(self.audio_file, self.beats).encode("remix.mp3")
コード例 #8
0
 def write_sample(self, path, prefix):
     suffix = 0
     if self.samples is not None:
         for sample in self.samples:
             audio.getpieces(self.audio_file,
                             [sample]).encode(path + prefix + str(suffix) +
                                              ".wav")
             suffix += 1
     else:
         print "Analyze first"
コード例 #9
0
ファイル: main.py プロジェクト: vivjay30/AutoDJ
def makeTransition(inData, outData, inSong, outSong):
    """
	Takes two arrays of AudioQuantum objects and returns a single AudioData object
	with a linear crossfade and a beatmatch
	"""
    # The number of beats has to be the same
    assert len(inData) == len(outData)

    # If the tempos are the same then it is easy
    if inSong.bpm == outSong.bpm:
        mixInSeg = audio.getpieces(inSong.AudioFile, inData)
        mixOutSeg = audio.getpieces(outSong.AudioFile, outData)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transition_length = 60.0 / 128.0 * MIX_LENGTH
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0], transition_length, mode="linear").render()
        return transitionSeg

        # Else we iterate over each one
    else:
        transitionTime = 0
        tempoDiff = inSong.bpm - outSong.bpm
        marginalInc = float(tempoDiff) / float(MIX_LENGTH)
        inCollect = []
        outCollect = []
        # Rather than a linear slowdown, it is a step function where each beat slows down by marginalInc
        for i in range(MIX_LENGTH):
            inAudio = inData[i].render()
            outAudio = outData[i].render()
            # We scale the in and out beats so that they are at the same temp
            inScaled = dirac.timeScale(inAudio.data, inSong.bpm / (outSong.bpm + i * marginalInc))
            outScaled = dirac.timeScale(outAudio.data, outSong.bpm / (outSong.bpm + i * marginalInc))
            transitionTime += 60 / (outSong.bpm + i * marginalInc)
            ts = audio.AudioData(
                ndarray=inScaled,
                shape=inScaled.shape,
                sampleRate=inSong.AudioFile.sampleRate,
                numChannels=inScaled.shape[1],
            )
            ts2 = audio.AudioData(
                ndarray=outScaled,
                shape=outScaled.shape,
                sampleRate=outSong.AudioFile.sampleRate,
                numChannels=outScaled.shape[1],
            )
            inCollect.append(ts)
            outCollect.append(ts2)
            # Collect the audio and crossfade it
        mixInSeg = audio.assemble(inCollect, numChannels=2)
        mixOutSeg = audio.assemble(outCollect, numChannels=2)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0], transitionTime, mode="exponential").render()
        return transitionSeg
コード例 #10
0
ファイル: sorting.py プロジェクト: hibikutek/remix
def main(units, key, input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)

    # Define the sorting function
    if key == "duration":

        def sorting_function(chunk):
            return chunk.duration

    if key == "confidence":

        def sorting_function(chunk):
            if units != "segments":
                return chunk.confidence
            else:
                # Segments have no confidence, so we grab confidence from the tatum
                return chunk.tatum.confidence

    if key == "loudness":

        def sorting_function(chunk):
            return chunk.mean_loudness()

    sorted_chunks = sorted(chunks, key=sorting_function, reverse=reverse)

    out = audio.getpieces(audiofile, sorted_chunks)
    out.encode(output_filename)
コード例 #11
0
ファイル: tonic.py プロジェクト: BoldBigflank/remix-examples
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    tonic = audiofile.analysis.key['value']
    
    chunks = audiofile.analysis.__getattribute__(units)
    
    # Get the segments    
    all_segments = audiofile.analysis.segments
    
    # Find tonic segments
    tonic_segments = audio.AudioQuantumList(kind="segment")
    for segment in all_segments:
        pitches = segment.pitches
        if pitches.index(max(pitches)) == tonic:
            tonic_segments.append(segment)

    # Find each chunk that matches each segment
    out_chunks = audio.AudioQuantumList(kind=units) 
    for chunk in chunks:
        for segment in tonic_segments:
            if chunk.start >= segment.start and segment.end >= chunk.start:
                out_chunks.append(chunk)
                break
    
    out = audio.getpieces(audiofile, out_chunks)
    out.encode(outputFile)
コード例 #12
0
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    tonic = audiofile.analysis.key['value']

    chunks = audiofile.analysis.__getattribute__(units)

    # Get the segments
    all_segments = audiofile.analysis.segments

    # Find tonic segments
    tonic_segments = audio.AudioQuantumList(kind="segment")
    for segment in all_segments:
        pitches = segment.pitches
        if pitches.index(max(pitches)) == tonic:
            tonic_segments.append(segment)

    # Find each chunk that matches each segment
    out_chunks = audio.AudioQuantumList(kind=units)
    for chunk in chunks:
        for segment in tonic_segments:
            if chunk.start >= segment.start and segment.end >= chunk.start:
                out_chunks.append(chunk)
                break

    out = audio.getpieces(audiofile, out_chunks)
    out.encode(outputFile)
コード例 #13
0
ファイル: kleptamatic.py プロジェクト: hannahmimi/kleptamatic
def mixSample(path1, path2):
	audio_file1 = audio.LocalAudioFile(path1)
	audio_file2 = audio.LocalAudioFile(path2)
	
	branches = twoSamples(audio_file1, audio_file2, threshold = 250)

	# get the beats of interest
	branch, collect = [], []
	for each in branches: 
		branch.append(each)
		#branch.append(branches[each][0])

	# beats re-ordered for rendering
	for each in branch: 
		collect.append(beats[each])

	out = audio.getpieces(audio_file1, collect)

	name1 = path1.split('/')
	name2 = path2.split('/')

	output_file = "./mixed/" + name1[len(name1)-1].split('.')[0] + "_" + name2[len(name2)-1].split('.')[0]+ "_out.mp3"

	out.encode(output_file)

	return 
コード例 #14
0
ファイル: sorting.py プロジェクト: wilttang/remix-examples
def main(units, key, input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)

    # Define the sorting function
    if key == 'duration':

        def sorting_function(chunk):
            return chunk.duration

    if key == 'confidence':

        def sorting_function(chunk):
            if units != 'segments':
                return chunk.confidence
            else:
                # Segments have no confidence, so we grab confidence from the tatum
                return chunk.tatum.confidence

    if key == 'loudness':

        def sorting_function(chunk):
            return chunk.mean_loudness()

    sorted_chunks = sorted(chunks, key=sorting_function, reverse=reverse)

    out = audio.getpieces(audiofile, sorted_chunks)
    out.encode(output_filename)
コード例 #15
0
ファイル: index.py プロジェクト: Harrison-M/beat-sorter
def upload():
    if request.method == 'POST':
        #Get the file
        audiofile = request.files['audio']
        fname = 'tmp/' + str(long(time.time())) + secure_filename(audiofile.filename)
        audiofile.save(fname)

        remixfile = audio.LocalAudioFile(fname)
        beats = remixfile.analysis.beats

        #https://github.com/echonest/remix/blob/master/examples/sorting/sorting.py
        def sorting_function(chunk):
            return chunk.mean_loudness()

        sortedbeats = sorted(beats, key=sorting_function)

        out = audio.getpieces(remixfile, sortedbeats)

        audioname = str(long(time.time())) + 'sorted' + secure_filename(audiofile.filename) + '.mp3'
        outfname = 'tmp/' + audioname
        out.encode(outfname, mp3=True)

        #Upload to rackspace
        chksum = pyrax.utils.get_checksum(outfname)
        cf.upload_file("beatsorter", outfname, etag=chksum)

        #os.remove(fname)
        #os.remove(outfname)
        return redirect(url_for('getaudiofile', filename=audioname))
コード例 #16
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = audio.AudioQuantumList()
    for bar in bars:
        collect.append(bar.children()[0])
    out = audio.getpieces(audiofile, collect)
    out.encode(output_filename)
コード例 #17
0
    def reverse_audio(self, input_filename, output_filename):
        """
        description:
        A method that quasi-reverses an audio file by splitting it at each beat and playing the
        pieces in reverse.

        inputs:
        -- str input_filename -- The name of the input audio file.
        -- str output_filename -- The name of the audio file to be outputted.

        return info:
        -- return type -- void
        """
        audio_file = self.extractor.get_audio_file(input_filename)
        beats = audio_file.analysis.beats
        beats.reverse()
        audio.getpieces(audio_file, beats).encode(self.audio_files_dir + '/' + output_filename)
コード例 #18
0
ファイル: remix-synth.py プロジェクト: tkell/timbresurf-chuck
def main(input_filename):
    audiofile = audio.LocalAudioFile(input_filename)

    if granularity == "segment":
        all_audio = audiofile.analysis.segments
    elif granularity == "tatum":
        all_audio = audiofile.analysis.tatums
    elif granularity == "beat":
        all_audio = audiofile.analysis.beats
    elif granularity == "bar":
        all_audio = audiofile.analysis.bars

    all_segments = audiofile.analysis.segments

    output_text_filename = "%ss%s" % (granularity, ".timbre")
    f = open(output_text_filename, 'w')
    counter = 0
    for chunk in all_audio:
        output_filename = "%s_%s.wav" % (granularity, counter)
        counter = counter + 1
        
        collect = audio.AudioQuantumList()
        collect.append(chunk)
        out = audio.getpieces(audiofile, collect)
        out.encode(output_filename)     

        # Now I need to write things
        # I am going to take timbre values 1 through 7, as 0 is just amplitude.
        temp_timbre = []
        if granularity == "segment":
            temp_timbre = [chunk.timbre[1:7]] # This is needed to make things work with the numpy array stuff

        # Work out how to get averages here
        # There must be a better way to get segments from an audioQuanta...
        if granularity != "segment":
            for segment in all_segments:
                if segment.start >= chunk.start and segment.start < chunk.get_end():
                    temp_timbre.append(segment.timbre[1:7])
                elif segment.start > chunk.get_end():
                    break
            # This is if we have no segments that starts in the chunk
            if not temp_timbre:
                for segment in all_segments:
                    if segment.start < chunk.start and segment.end > chunk.get_end():
                        temp_timbre.append(segment.timbre[1:7])
                        break
        
        temp_timbre = numpy.array(temp_timbre)
        if temp_timbre.size == 0:
            temp_timbre = numpy.array([[0, 0, 0, 0, 0, 0]])
        timbre_list = list(temp_timbre.mean(axis=0))
        timbre_list = [str(math.floor(t)) for t in timbre_list]

        # Yes, I am writing one number per line.  Shhh.  ChucK's string reading abilities are awful
        for timbre in timbre_list:
            f.write("%s\n" % timbre) 
    
    f.close()
コード例 #19
0
    def reverse_audio(self, input_filename, output_filename):
        """
        description:
        A method that quasi-reverses an audio file by splitting it at each beat and playing the
        pieces in reverse.

        inputs:
        -- str input_filename -- The name of the input audio file.
        -- str output_filename -- The name of the audio file to be outputted.

        return info:
        -- return type -- void
        """
        audio_file = self.extractor.get_audio_file(input_filename)
        beats = audio_file.analysis.beats
        beats.reverse()
        audio.getpieces(audio_file, beats).encode(self.audio_files_dir + '/' +
                                                  output_filename)
コード例 #20
0
ファイル: engine.py プロジェクト: ddesroches/arranger
    def _write_output_file(self, section_list, output_file):
        if section_list:
            collect = audio.AudioQuantumList()
            for s in section_list:
                collect.append(self.sections[s])
        else:
            collect = self.collect

        out = audio.getpieces(self.audiofile, collect)
        out.encode(output_file)
コード例 #21
0
def reverse(sInputFileName):
    sOutputFileName = 'music/reversed.' + sInputFileName[:-3] + '.wav'
    audioFile = audio.LocalAudioFile(sInputFileName)
    sToReverse = 'segments'
    if sToReverse == 'beats' :
        chunks = audioFile.analysis.beats
    elif sToReverse == 'segments' :
        chunks = audioFile.analysis.segments
    chunks.reverse()
    reversedAudio = audio.getpieces(audioFile, chunks)
    reversedAudio.encode(sOutputFileName)
コード例 #22
0
def main(toReverse, inputFilename, outputFilename):
    audioFile = audio.LocalAudioFile(inputFilename)
    if toReverse == 'beats' :
        chunks = audioFile.analysis.beats
    elif toReverse == 'segments' :
        chunks = audioFile.analysis.segments
    else :
        print usage
        return
    chunks.reverse()
    reversedAudio = audio.getpieces(audioFile, chunks)
    reversedAudio.encode(outputFilename)
コード例 #23
0
ファイル: reverse.py プロジェクト: wilttang/remix-examples
def main(toReverse, inputFilename, outputFilename):
    audioFile = audio.LocalAudioFile(inputFilename)
    if toReverse == 'beats':
        chunks = audioFile.analysis.beats
    elif toReverse == 'segments':
        chunks = audioFile.analysis.segments
    else:
        print usage
        return
    chunks.reverse()
    reversedAudio = audio.getpieces(audioFile, chunks)
    reversedAudio.encode(outputFilename)
コード例 #24
0
def main(input_filename, output_filename, index):
    audio_file = audio.LocalAudioFile(input_filename)
    beats = audio_file.analysis.beats
    collect = audio.AudioQuantumList()
    for beat in beats:
        tata = beat.children()
        if len(tata)>1:
            tat = tata[index]
        else:
            tat = tata[0]
        collect.append(tat)
    out = audio.getpieces(audio_file, collect)
    out.encode(output_filename)
コード例 #25
0
def main(units, key, input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)
    key = int(key)
    
    def sorting_function(chunk):
        pitches = chunk.mean_pitches()
        return pitches.index(max(pitches)) - key % 12

    sorted_chunks = sorted(chunks, key=sorting_function, reverse=reverse)

    out = audio.getpieces(audiofile, sorted_chunks)
    out.encode(output_filename)
コード例 #26
0
def main(units, key, input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)
    key = int(key)

    def sorting_function(chunk):
        pitches = chunk.mean_pitches()
        return pitches.index(max(pitches)) - key % 12

    sorted_chunks = sorted(chunks, key=sorting_function, reverse=reverse)

    out = audio.getpieces(audiofile, sorted_chunks)
    out.encode(output_filename)
コード例 #27
0
def make_song_snippet(beat1_index, beat2_index, laf1, laf2):
    beats1 = laf1.analysis.beats
    beats2 = laf2.analysis.beats
    bars1 = laf1.analysis.bars
    bars2 = laf2.analysis.bars
    md51 = laf1.analysis.pyechonest_track.md5
    md52 = laf2.analysis.pyechonest_track.md5
    distance = bd.get_beat_distance(beats1[beat1_index], beats2[beat2_index])
    bar1 = beats1[beat1_index].parent().absolute_context()[0]
    bar2 = beats2[beat2_index].parent().absolute_context()[0]
    if bar1 - 2 >= 0:
        starting_beat = bars1[bar1 - 2].children()[0].absolute_context()[0]
    else:
        starting_beat = 0
    if bar2 + 3 < len(bars2):
        last_beat = bars2[bar2 + 3].children()[0].absolute_context()[0]
    else:
        last_beat = len(beats2)
    out = audio.getpieces(beats1.get_source(), beats1[starting_beat:beat1_index])
    out += audio.getpieces(beats2.get_source(), beats2[beat2_index:last_beat])
    out.encode(OUTPUT_DIR + str(md51) + "_beat_" + str(beat1_index) + "_" + str(md52) +
               "_beat_" + str(beat2_index) + "_" + str(distance) + ".wav")
コード例 #28
0
ファイル: filter.py プロジェクト: hibikutek/remix
def main(units, key, value, input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)

    if key == "pitch":
        value = int(value)
    if key == "pitches":
        value = eval(value)
        if type(value) != list:
            print usage
            sys.exit(-1)
    if key == "duration":
        value = eval(value)
        duration_start = value[0]
        duration_end = value[1]
    if key == "louder" or key == "softer":
        value = float(value)

    filtered_chunks = []
    for chunk in chunks:
        if key == "pitch":
            pitches = chunk.mean_pitches()
            if pitches.index(max(pitches)) == value:
                filtered_chunks.append(chunk)

        if key == "pitches":
            max_indexes = []
            pitches = chunk.mean_pitches()
            max_pitches = sorted(pitches, reverse=True)
            for pitch in max_pitches:
                max_indexes.append(pitches.index(pitch))

            if set(value) == set(max_indexes[0 : len(value)]):
                filtered_chunks.append(chunk)

        if key == "duration":
            if chunk.start < duration_end and chunk.end > duration_start:
                filtered_chunks.append(chunk)
            elif chunk.start > duration_end:
                break

        if key == "louder":
            if chunk.mean_loudness() > value:
                filtered_chunks.append(chunk)

        if key == "softer":
            if chunk.mean_loudness() < value:
                filtered_chunks.append(chunk)

    out = audio.getpieces(audiofile, filtered_chunks)
    out.encode(output_filename)
コード例 #29
0
ファイル: filter.py プロジェクト: wilttang/remix-examples
def main(units, key, value, input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)
    
    if key == 'pitch':
        value = int(value);
    if key == 'pitches':
        value = eval(value)
        if type(value) != list:
            print usage
            sys.exit(-1)
    if key == 'duration':
        value = eval(value)
        duration_start = value[0]
        duration_end = value[1]
    if key == 'louder' or key == 'softer':
        value = float(value)
    
    filtered_chunks = []
    for chunk in chunks:
        if key == 'pitch':      
            pitches = chunk.mean_pitches()
            if pitches.index(max(pitches)) == value:        
                filtered_chunks.append(chunk)
   
        if key == 'pitches':
            max_indexes = []
            pitches = chunk.mean_pitches()
            max_pitches = sorted(pitches, reverse=True)
            for pitch in max_pitches:
                 max_indexes.append(pitches.index(pitch)) 
            
            if set(value) == set(max_indexes[0:len(value)]):
                filtered_chunks.append(chunk)

        if key == 'duration':
            if chunk.start < duration_end and chunk.end > duration_start:
                filtered_chunks.append(chunk)
            elif chunk.start > duration_end:
                break

        if key == 'louder':
            if chunk.mean_loudness() > value:
                filtered_chunks.append(chunk)

        if key == 'softer':
            if chunk.mean_loudness() < value:
                filtered_chunks.append(chunk)

    out = audio.getpieces(audiofile, filtered_chunks)
    out.encode(output_filename)
コード例 #30
0
def main(input_filename, output_filename):
    # Returns results of the input track.
    audiofile = audio.LocalAudioFile(input_filename)
    # Gets a list of every section in the track.
    sections = audiofile.analysis.sections
    # New list for AudioQuantums.
    collect = audio.AudioQuantumList()
    # Loops through the first item in the children of each section into the new list.
    for sec in sections:
        collect.append(sec.children()[0])
    # Audio defined in collect from analyzed audio file.
    out = audio.getpieces(audiofile, collect)
    # Writes the new audio.
    out.encode(output_filename)
コード例 #31
0
ファイル: sorting_timbre.py プロジェクト: MattHulse/remix
def main(units, timbre_bin, input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)
    timbre_bin = int(timbre_bin)
    
    # For any chunk, return the timbre value of the given bin
    def sorting_function(chunk):
        timbre = chunk.mean_timbre()
        return timbre[timbre_bin]

    sorted_chunks = sorted(chunks, key=sorting_function, reverse=reverse)

    out = audio.getpieces(audiofile, sorted_chunks)
    out.encode(output_filename)
コード例 #32
0
ファイル: one_segment.py プロジェクト: mugetsu7/echonest
def main(input_filename, output_filename):
    # load audio file
    audiofile = audio.LocalAudioFile(input_filename)
    # get the beats (Audio Quanta)
    beats = audiofile.analysis.beats
    # create a new empty list of Audio Quanta
    collect = audio.AudioQuantumList()
    # add the first segment in each beat in sequence
    for beat in beats:
        # beat.children are the segments in this beat
        # beat.children()[0] is the first segment
        collect.append(beat.children()[0])
    # Get the raw audio data for the audio quanta and store them in 'out'
    out = audio.getpieces(audiofile, collect)
    # encode the raw audio as the appropriate file type (using en-ffmpeg)
    out.encode(output_filename)
コード例 #33
0
ファイル: one_segment.py プロジェクト: styresdc/echonest
def main(input_filename, output_filename):
    # load audio file
    audiofile = audio.LocalAudioFile(input_filename)
    # get the beats (Audio Quanta)
    beats = audiofile.analysis.beats
    # create a new empty list of Audio Quanta
    collect = audio.AudioQuantumList()
    # add the first segment in each beat in sequence
    for beat in beats:
        # beat.children are the segments in this beat
        # beat.children()[0] is the first segment
        collect.append(beat.children()[0])
    # Get the raw audio data for the audio quanta and store them in 'out'
    out = audio.getpieces(audiofile, collect)
    # encode the raw audio as the appropriate file type (using en-ffmpeg)
    out.encode(output_filename)
コード例 #34
0
def main(units, timbre_bin, input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)
    timbre_bin = int(timbre_bin)
    
    # For any chunk, return the timbre value of the given bin
    def sorting_function(chunk):
        timbre = chunk.mean_timbre()
        return timbre[timbre_bin]

    sorted_chunks = sorted(chunks, key=sorting_function, reverse=reverse)

    import pdb
    #pdb.set_trace()

    out = audio.getpieces(audiofile, sorted_chunks)
    out.encode(output_filename)
コード例 #35
0
ファイル: song.py プロジェクト: tgfbikes/python
def generate_new_song2(chunks, segments, input_filename, output_filename, audiofile, length=300):
    out_chunks = audio.AudioQuantumList(kind="beats") 
    #start at beginning of song
    #play for a while
    #leave first 20 segments alone?
    for i in range(20):
        out_chunks.append(segments[i])

    last_segment_played = segments[i]
    #how many times to do this?
    # I randomly put 300 segments here
    while len(out_chunks) < length: 
        next_segment_to_play = last_segment_played.get_next_segment_to_play()
        last_segment_played = next_segment_to_play
        out_chunks.append(next_segment_to_play)

    out = audio.getpieces(audiofile, out_chunks)
    out.encode(output_filename)
コード例 #36
0
ファイル: main.py プロジェクト: ecanzonieri/MarkovsMix
def main():

    #### We can't do this for multiple songs.
    songs = glob.glob("songs/*.mp3")
    filename = generate(songs)
    beats = []
    audiofile = audio.LocalAudioFile(filename)
    beats = audiofile.analysis.beats
    print "Number of beats %s" % len(beats)

    samples = beats[::SAMPLING_STEP]
    print "Number of samples to build cluster model %s" % len(samples)
    cl = cluster.KMeansClustering(samples, distance_beats)
    clusters = cl.getclusters(K)
    print "Clustering completed"

    for c in clusters:
        c.centroid = None
    pickle.dump(clusters, open("clustering.c", "wb"))
    print "Pickled Cluster Model"

    for c in clusters:
        c.centroid = cluster.centroid(c)
    print "Reset the centroids"

    training_input = []
    for beat in beats:
        training_input.append(get_cluster_index(beat, clusters))
    print("Training markovModel")
    markov_model = MarkovModel()
    markov_model.learn_ngram_distribution(training_input, NGRAM)

    #### We should have his function as iterator.
    print "Generating bunch of music"
    output_list = markov_model.generate_a_bunch_of_text(len(training_input))
    generated_beats = audio.AudioQuantumList()
    print "Coming back to beats"
    for index in output_list:
        generated_beats.append(get_beats_back(index, clusters))

    #### We can't do this for multiple songs.
    print "Saving an Amazing Song"
    out = audio.getpieces(audiofile, generated_beats)
    out.encode("bunch_of_music.wav")
コード例 #37
0
def getClosestBar(filename):
    song0 = audio.LocalAudioFile("dhorse1.wav")
    song = audio.LocalAudioFile(filename)
    sections = song.analysis.sections
    bars = song.analysis.bars
    sectionStart = [q.start for q in sections][1:]
    barStart = [b.start for b in bars]
    VIPBars = []
    for start in sectionStart:
        for i in xrange(len(barStart) - 1):
            if barStart[i] < start and barStart[i + 1] >= start:
                VIPBars += [i]
    #need to split the audio file based on the bar partitiion now
    for barVal in VIPBars:
        if barVal == VIPBars[0]: continue
        smallBar = audio.getpieces(song, bars[barVal - 3:barVal + 3])
        smallBar.encode("smallBar.wav")
        smallBar = audio.LocalAudioFile("smallBar.wav")
        print smallBar.analysis.segments
        return featureAnalysis.main(smallBar, song0)
コード例 #38
0
ファイル: pieces.py プロジェクト: MikeiLL/wp-python
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    duration = audiofile.analysis.duration
    segments = audiofile.analysis.segments
    bars = audiofile.analysis.bars
    beats = audiofile.analysis.beats
    print duration, " ", len(segments)
    beats_in = beats_out = 0
    
    if beats:
        in_flow = {"music": abridge(bars, segments, 4), "pre_bar_beats": pre_post(beats, bars)[0],
                   "post_bar_beats": pre_post(beats, bars)[1]}
    else:
        in_flow = {"music": segments, "pre_bar_beats": 0,
                   "post_bar_beats": 0}
        
    print in_flow['pre_bar_beats'], "pre-bar beats", in_flow['post_bar_beats'], "post-bar beats"
    
    out = audio.getpieces(audiofile, in_flow["music"])
    out.encode(output_filename)
コード例 #39
0
ファイル: utilities.py プロジェクト: ErinCoughlan/mashathon
def SendMP3OfSong(nextSong, nextSongStartBeat):
    collect = audio.AudioQuantumList()
    audiofile = audio.LocalAudioFile(str(nextSong.filePath))

    oldBeats = audiofile.analysis.beats
    beats = []
    i = 0

    for beat in oldBeats:
        if i >= nextSongStartBeat:
            collect.append(beat.children()[0])
        i += 1

    for beat in beats:
        collect.append(beat.children()[0])

    out = audio.getpieces(audiofile, collect)

    # TODO: actual path here
    out.encode("/tmp/".join(nextSong.title))
コード例 #40
0
def main(infile, outfile, choices=4):
    audiofile = audio.LocalAudioFile(infile)
    meter = audiofile.analysis.time_signature['value']
    sections = audiofile.analysis.sections
    output = audio.AudioQuantumList()

    for section in sections:
        beats = []
        bars = section.children()
        for bar in bars:
            beats.extend(bar.children())

        beat_array = []
        for m in range(meter):
            metered_beats = []
            for b in beats:
                if beats.index(b) % meter == m:
                    metered_beats.append(b)
            beat_array.append(metered_beats)

        # Always start with the first beat
        output.append(beat_array[0][0])
        for x in range(1, len(bars) * meter):
            meter_index = x % meter
            next_candidates = beat_array[meter_index]

            def sorting_function(chunk, target_chunk=output[-1]):
                timbre = chunk.mean_timbre()
                target_timbre = target_chunk.mean_timbre()
                timbre_distance = numpy.linalg.norm(
                    numpy.array(timbre) - numpy.array(target_timbre))
                return timbre_distance

            next_candidates = sorted(next_candidates, key=sorting_function)
            next_index = random.randint(0,
                                        min(choices,
                                            len(next_candidates) - 1))
            output.append(next_candidates[next_index])

    out = audio.getpieces(audiofile, output)
    out.encode(outfile)
コード例 #41
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    '''
    This line got the bars of the song in the previous version:
    bars = audiofile.analysis.bars
    
    Now, this line gets the beats in the song:
    '''
    beats = audiofile.analysis.beats
    collect = audio.AudioQuantumList()
    '''
    This loop got the first beat in each bar and appended them to a list:
    for bar in bars:
        collect.append(bar.children()[0])
        
    Now, this loop gets the first segment in each beat and appends them to the list:
    '''
    for b in beats:
        collect.append(b.children()[0])
    out = audio.getpieces(audiofile, collect)
    out.encode(output_filename)
コード例 #42
0
ファイル: sorting_pk2.py プロジェクト: paulkarayan/nestmix
def main(units, input_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)
    timbre_range = range(11)
    tonic = audiofile.analysis.key['value']

    
    # For any chunk, return the timbre value of the given bin
    def sorting_function(chunk):
        timbre = chunk.mean_timbre()
        return timbre[timbre_bin]

    #loop through all 11 bins of timbre and output them
    for timbre_bin in timbre_range:
        print(timbre_bin)
        sorted_chunks = sorted(chunks, key=sorting_function, reverse=reverse)

        #would be nice to output string keys rather than bins
        # just zip range 0-11 to    (c, c-sharp, d, e-flat, e, f, f-sharp, g, a-flat, a, b-flat, b)
        
        out = audio.getpieces(audiofile, sorted_chunks)
        out.encode('chopped_%s_%s_%s' % (units,timbre_bin,tonic))
コード例 #43
0
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    collect = audio.AudioQuantumList()
    if not audiofile.analysis.bars:
        print "No bars found in this analysis!"
        print "No output."
        sys.exit(-1)
    for b in audiofile.analysis.bars[0:-1]:                
        # all but the last beat
        collect.extend(b.children()[0:-1])
        if units.startswith("tatum"):
            # all but the last half (round down) of the last beat
            half = - (len(b.children()[-1].children()) // 2)
            collect.extend(b.children()[-1].children()[0:half])
    # endings were rough, so leave everything after the start
    # of the final bar intact:
    last = audio.AudioQuantum(audiofile.analysis.bars[-1].start,
                              audiofile.analysis.duration - 
                                audiofile.analysis.bars[-1].start)
    collect.append(last)
    out = audio.getpieces(audiofile, collect)
    out.encode(outputFile)
コード例 #44
0
def main(infile, outfile, choices=4, bars=40):
    audiofile = audio.LocalAudioFile(infile)
    meter = audiofile.analysis.time_signature['value']
    fade_in = audiofile.analysis.end_of_fade_in
    fade_out = audiofile.analysis.start_of_fade_out

    beats = []
    for b in audiofile.analysis.beats:
        if b.start > fade_in or b.end < fade_out:
            beats.append(b)
    output = audio.AudioQuantumList()

    beat_array = []
    for m in range(meter):
        metered_beats = []
        for b in beats:
            if beats.index(b) % meter == m:
                metered_beats.append(b)
        beat_array.append(metered_beats)

    # Always start with the first beat
    output.append(beat_array[0][0])
    for x in range(1, bars * meter):
        meter_index = x % meter
        next_candidates = beat_array[meter_index]

        def sorting_function(chunk, target_chunk=output[-1]):
            timbre = chunk.mean_pitches()
            target_timbre = target_chunk.mean_pitches()
            timbre_distance = numpy.linalg.norm(
                numpy.array(timbre) - numpy.array(target_timbre))
            return timbre_distance

        next_candidates = sorted(next_candidates, key=sorting_function)
        next_index = random.randint(0, min(choices, len(next_candidates) - 1))
        output.append(next_candidates[next_index])

    out = audio.getpieces(audiofile, output)
    out.encode(outfile)
コード例 #45
0
ファイル: one.py プロジェクト: DrawMusic/remix
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.  
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.  
    # You can manipulate this just like any other Python list!
    bars = audiofile.analysis.bars

    # This makes a new list of "AudioQuantums".  
    # Those are just any discrete chunk of audio:  bars, beats, etc.
    collect = audio.AudioQuantumList()

    # This loop puts the first item in the children of each bar into the new list. 
    # A bar's children are beats!  Simple as that. 
    for bar in bars:
        collect.append(bar.children()[0])

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)
    
    # This writes the newly created audio to the given file.  
    out.encode(output_filename)
コード例 #46
0
ファイル: lopside.py プロジェクト: wilttang/remix-examples
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    collect = audio.AudioQuantumList()
    if not audiofile.analysis.bars:
        print "No bars found in this analysis!"
        print "No output."
        sys.exit(-1)
    for b in audiofile.analysis.bars[0:-1]:
        # all but the last beat
        collect.extend(b.children()[0:-1])
        if units.startswith("tatum"):
            # all but the last half (round down) of the last beat
            half = -(len(b.children()[-1].children()) // 2)
            collect.extend(b.children()[-1].children()[0:half])
    # endings were rough, so leave everything after the start
    # of the final bar intact:
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    collect.append(last)
    out = audio.getpieces(audiofile, collect)
    out.encode(outputFile)
コード例 #47
0
ファイル: one.py プロジェクト: yojanpatel/remix
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.
    # You can manipulate this just like any other Python list!
    bars = audiofile.analysis.bars

    # This makes a new list of "AudioQuantums".
    # Those are just any discrete chunk of audio:  bars, beats, etc.
    collect = audio.AudioQuantumList()

    # This loop puts the first item in the children of each bar into the new list.
    # A bar's children are beats!  Simple as that.
    for bar in bars:
        collect.append(bar.children()[0])

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)

    # This writes the newly created audio to the given file.
    out.encode(output_filename)
コード例 #48
0
def main(infile, outfile, choices=4, bars=40):
    audiofile = audio.LocalAudioFile(infile)
    meter = audiofile.analysis.time_signature['value']
    fade_in = audiofile.analysis.end_of_fade_in
    fade_out = audiofile.analysis.start_of_fade_out

    beats = []
    for b in audiofile.analysis.beats:
        if b.start > fade_in or b.end < fade_out:
            beats.append(b)
    output = audio.AudioQuantumList()
    
    beat_array = []
    for m in range(meter):
        metered_beats = []
        for b in beats:
            if beats.index(b) % meter == m:
                metered_beats.append(b)
        beat_array.append(metered_beats)
    
    # Always start with the first beat
    output.append(beat_array[0][0]);
    for x in range(1, bars * meter):
        meter_index = x % meter
        next_candidates = beat_array[meter_index]

        def sorting_function(chunk, target_chunk=output[-1]):
            timbre = chunk.mean_pitches()
            target_timbre = target_chunk.mean_pitches()
            timbre_distance = numpy.linalg.norm(numpy.array(timbre) - numpy.array(target_timbre))
            return timbre_distance

        next_candidates = sorted(next_candidates, key=sorting_function)
        next_index = random.randint(0, min(choices, len(next_candidates) -1 ))
        output.append(next_candidates[next_index])
    
    out = audio.getpieces(audiofile, output)
    out.encode(outfile)
コード例 #49
0
def do_something(sFileToConvert, nTempoToHit, nPitchOffset, nLoudness, sGenre):#dJoined):
  #audiofile.save() #apparently can cache analysis. 
  fFile = open('music/' + sGenre, 'r')
  aTimbres = cPickle.load(fFile)
  fFile.close()
  #print('using timbres:' + str(aTimbres))

  mod = my_mod_class()
  track = audio.LocalAudioFile(sFileToConvert, verbose='verbose')
  #need to set the track's shape to match the two channel sound?
  beats = track.analysis.beats
  out_shape = (len(track.data),2)
  anNewBeats = audio.AudioData(shape=out_shape, numChannels=2, sampleRate=44100)
  #print(track.analysis.key)
  for i, beat in enumerate(beats):
    #data = track[beat].data
    anNewBeats.append( mod.shiftPitchSemiTones( track[beat], int( nPitchOffset *1.5) ) )
    #convert from key to pitch

  track.data = anNewBeats
  segments = track.analysis.segments
  print(track.analysis.tempo['value'])
  fScaleTime = nTempoToHit / track.analysis.tempo['value']
  chunks = []
  for segment in segments:
    segment.duration = segment.duration /fScaleTime
    anTimbreDistances = []
    for anTimbre in aTimbres:
      timbre_diff = numpy.subtract(segment.timbre,anTimbre)
      anTimbreDistances.append(numpy.sum(numpy.square(timbre_diff)))
    segment.timbre = aTimbres[anTimbreDistances.index(min(anTimbreDistances))]
    chunks.append(segment)
  print(track.analysis.tempo['value'])
  
  modAudio = audio.getpieces(track.data, chunks)
  audio.fadeEdges(modAudio.data)
  #modAudio = audio.fadeEdges(modAudio.data)
  modAudio.encode('niceout.wav')
コード例 #50
0
def main(infile, outfile, choices=4):
    audiofile = audio.LocalAudioFile(infile)
    meter = audiofile.analysis.time_signature['value']
    sections = audiofile.analysis.sections
    output = audio.AudioQuantumList()

    for section in sections:
        beats = []
        bars = section.children()
        for bar in bars:
            beats.extend(bar.children())
    
        beat_array = []
        for m in range(meter):
            metered_beats = []
            for b in beats:
                if beats.index(b) % meter == m:
                    metered_beats.append(b)
            beat_array.append(metered_beats)

        # Always start with the first beat
        output.append(beat_array[0][0]);
        for x in range(1, len(bars) * meter):
            meter_index = x % meter
            next_candidates = beat_array[meter_index]

            def sorting_function(chunk, target_chunk=output[-1]):
                timbre = chunk.mean_timbre()
                target_timbre = target_chunk.mean_timbre()
                timbre_distance = numpy.linalg.norm(numpy.array(timbre) - numpy.array(target_timbre))
                return timbre_distance

            next_candidates = sorted(next_candidates, key=sorting_function)
            next_index = random.randint(0, min(choices, len(next_candidates) - 1))
            output.append(next_candidates[next_index])

    out = audio.getpieces(audiofile, output)
    out.encode(outfile)
コード例 #51
0
def main(toReverse, inputFilename, outputFilename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audioFile = audio.LocalAudioFile(inputFilename)

    # Checks what sort of reversing we're doing.
    if toReverse == 'beats':
        # This gets a list of every beat in the track.
        chunks = audioFile.analysis.beats
    elif toReverse == 'segments':
        # This gets a list of every segment in the track.
        # Segments are the smallest chunk of audio that Remix deals with
        chunks = audioFile.analysis.segments
    else:
        print usage
        return

    # Reverse the list!
    chunks.reverse()

    # This assembles the pieces of audio defined in chunks from the analyzed audio file.
    reversedAudio = audio.getpieces(audioFile, chunks)
    # This writes the newly created audio to the given file.
    reversedAudio.encode(outputFilename)
コード例 #52
0
def main(units, inputFile, outputFile):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(inputFile)

    # This makes a new list of "AudioQuantums".
    # Those are just any discrete chunk of audio:  bars, beats, etc
    collect = audio.AudioQuantumList()

    # If the analysis can't find any bars, stop!
    # (This might happen with really ambient music)
    if not audiofile.analysis.bars:
        print "No bars found in this analysis!"
        print "No output."
        sys.exit(-1)

    # This loop puts all but the last of each bar into the new list!
    for b in audiofile.analysis.bars[0:-1]:
        collect.extend(b.children()[0:-1])

        # If we're using tatums instead of beats, we want all but the last half (round down) of the last beat
        # A tatum is the smallest rhythmic subdivision of a beat -- http://en.wikipedia.org/wiki/Tatum_grid
        if units.startswith("tatum"):
            half = -(len(b.children()[-1].children()) // 2)
            collect.extend(b.children()[-1].children()[0:half])

    # Endings were rough, so leave everything after the start of the final bar intact:
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    collect.append(last)

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)

    # This writes the newly created audio to the given file.
    out.encode(outputFile)
コード例 #53
0
ファイル: kleptamatic.py プロジェクト: hannahmimi/kleptamatic
def renderSample(path_to_audio_file): 
	audio_file = audio.LocalAudioFile(path_to_audio_file)
	beats = audio_file.analysis.beats
	branches = similarSamples(audio_file, threshold = 250)

	# get the beats of interest
	branch, collect = [], []
	for each in branches: 
		branch.append(each)
		#branch.append(branches[each][0])

	# beats re-ordered for rendering
	for each in branch: 
		collect.append(beats[each])

	out = audio.getpieces(audio_file, collect)

	name = path_to_audio_file.split('/')

	output_file = "./mixed/" + name[len(name)-1].split('.')[0] + "_out.mp3"

	out.encode(output_file)

	return 
コード例 #54
0
ファイル: drums.py プロジェクト: yojanpatel/remix
def main(input_filename, output_filename, break_filename, break_parts, measures, mix):

    # This takes the input tracks, sends them to the analyzer, and returns the results.  
    audiofile = audio.LocalAudioFile(input_filename)
    sample_rate = audiofile.sampleRate
    breakfile = audio.LocalAudioFile(break_filename)

    # This converts the break to stereo, if it is mono
    if breakfile.numChannels == 1:
        breakfile = mono_to_stereo(breakfile)

    # This gets the number of channels in the main file
    num_channels = audiofile.numChannels

    # This splits the break into each beat
    drum_data = split_break(breakfile, break_parts)
    hits_per_beat = int(break_parts/(4 * measures))
    # This gets the bars from the input track
    bars = audiofile.analysis.bars
    
    # This creates the 'shape' of new array.
    # (Shape is a tuple (x, y) that indicates the length per channel of the audio file)
    out_shape = (len(audiofile)+100000,num_channels)
    # This creates a new AudioData array to write data to
    out = audio.AudioData(shape=out_shape, sampleRate=sample_rate,
                            numChannels=num_channels)
    if not bars:
        # If the analysis can't find any bars, stop!
        # (This might happen with really ambient music)
        print "Didn't find any bars in this analysis!"
        print "No output."
        sys.exit(-1)

    # This is where the magic happens:
    # For every beat in every bar except the last bar, 
    # map the tatums of the break to the tatums of the beat
    for bar in bars[:-1]:
        # This gets the beats in the bar, and loops over them
        beats = bar.children()
        for i in range(len(beats)):
            # This gets the index of matching beat in the break
            try:
                break_index = ((bar.local_context()[0] %\
                                measures) * 4) + (i % 4)
            except ValueError:
                break_index = i % 4
            # This gets the tatums from the beat of the break
            tats = range((break_index) * hits_per_beat,
                        (break_index + 1) * hits_per_beat)
            # This gets the number of samples in each tatum
            drum_samps = sum([len(drum_data[x]) for x in tats])

            # This gets the number of sample and the shape of the beat from the original track
            beat_samps = len(audiofile[beats[i]])
            beat_shape = (beat_samps,num_channels)
            
            # This get the shape of each tatum
            tat_shape = (float(beat_samps/hits_per_beat),num_channels)
        
            # This creates the new AudioData that will be filled with chunks of the drum break
            beat_data= audio.AudioData(shape=beat_shape,
                                        sampleRate=sample_rate,
                                        numChannels=num_channels)
            for j in tats:
                # This creates an audioData for each tatum
                tat_data= audio.AudioData(shape=tat_shape,
                                            sampleRate=sample_rate,
                                            numChannels=num_channels)
                # This corrects for length / timing:
                # If the original is shorter than the break, truncate drum hits to fit beat length
                if drum_samps > beat_samps/hits_per_beat:
                    tat_data.data = drum_data[j].data[:len(tat_data)]
                # If the original is longer, space out drum hits to fit beat length
                elif drum_samps < beat_samps/hits_per_beat:
                    tat_data.append(drum_data[j])

                # This adds each new tatum to the new beat.
                tat_data.endindex = len(tat_data)
                beat_data.append(tat_data)
                del(tat_data)

            # This corrects for rounding errors
            beat_data.endindex = len(beat_data)

            # This mixes the new beat data with the input data, and appends it to the final file
            mixed_beat = audio.mix(beat_data, audiofile[beats[i]], mix=mix)
            del(beat_data)
            out.append(mixed_beat)

    # This works out the last beat and appends it to the final file
    finale = bars[-1].start + bars[-1].duration
    last = audio.AudioQuantum(audiofile.analysis.bars[-1].start,
                            audiofile.analysis.duration - 
                              audiofile.analysis.bars[-1].start)
    last_data = audio.getpieces(audiofile,[last])
    out.append(last_data)
    
    # This writes the newly created audio to the given file.  
    out.encode(output_filename)
コード例 #55
0
ファイル: drums.py プロジェクト: wilttang/remix-examples
def main(input_filename, output_filename, break_filename, break_parts,
         measures, mix):
    audiofile = audio.LocalAudioFile(input_filename)
    sample_rate = audiofile.sampleRate
    breakfile = audio.LocalAudioFile(break_filename)
    if breakfile.numChannels == 1:
        breakfile = mono_to_stereo(breakfile)
    num_channels = audiofile.numChannels
    drum_data = split_break(breakfile, break_parts)
    hits_per_beat = int(break_parts / (4 * measures))
    bars = audiofile.analysis.bars
    out_shape = (len(audiofile) + 100000, num_channels)
    out = audio.AudioData(shape=out_shape,
                          sampleRate=sample_rate,
                          numChannels=num_channels)
    if not bars:
        print "Didn't find any bars in this analysis!"
        print "No output."
        sys.exit(-1)
    for bar in bars[:-1]:
        beats = bar.children()
        for i in range(len(beats)):
            try:
                break_index = ((bar.local_context()[0] %\
                                measures) * 4) + (i % 4)
            except ValueError:
                break_index = i % 4
            tats = range((break_index) * hits_per_beat,
                         (break_index + 1) * hits_per_beat)
            drum_samps = sum([len(drum_data[x]) for x in tats])
            beat_samps = len(audiofile[beats[i]])
            beat_shape = (beat_samps, num_channels)
            tat_shape = (float(beat_samps / hits_per_beat), num_channels)
            beat_data = audio.AudioData(shape=beat_shape,
                                        sampleRate=sample_rate,
                                        numChannels=num_channels)
            for j in tats:
                tat_data = audio.AudioData(shape=tat_shape,
                                           sampleRate=sample_rate,
                                           numChannels=num_channels)
                if drum_samps > beat_samps / hits_per_beat:
                    # truncate drum hits to fit beat length
                    tat_data.data = drum_data[j].data[:len(tat_data)]
                elif drum_samps < beat_samps / hits_per_beat:
                    # space out drum hits to fit beat length
                    #temp_data = add_fade_out(drum_data[j])
                    tat_data.append(drum_data[j])
                tat_data.endindex = len(tat_data)
                beat_data.append(tat_data)
                del (tat_data)
            # account for rounding errors
            beat_data.endindex = len(beat_data)
            mixed_beat = audio.mix(beat_data, audiofile[beats[i]], mix=mix)
            del (beat_data)
            out.append(mixed_beat)
    finale = bars[-1].start + bars[-1].duration
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    last_data = audio.getpieces(audiofile, [last])
    out.append(last_data)
    out.encode(output_filename)
コード例 #56
0
"""Reverse a song by playing its beats forward starting from the end of the song"""
import echonest.remix.audio as audio

# Easy around wrapper mp3 decoding and Echo Nest analysis
audio_file = audio.LocalAudioFile("file_example_WAV_1MG.wav")

# You can manipulate the beats in a song as a native python list
beats = audio_file.analysis.beats
beats.reverse()

# And render the list as a new audio file!
audio.getpieces(audio_file,
                beats).encode("file_example_WAV_1MGBackwardsByBeat.wav")
コード例 #57
0
ファイル: drums_trap.py プロジェクト: mo-hit/trapremix
def main(input_filename, output_filename, break_filename, break_parts,
         measures, mix, samples_dir):
    print break_filename
    audiofile = audio.LocalAudioFile(input_filename)
    sample_rate = audiofile.sampleRate
    breakfile = audio.LocalAudioFile(break_filename)
    if breakfile.numChannels == 1:
        breakfile = mono_to_stereo(breakfile)
    num_channels = audiofile.numChannels
    drum_data = split_break(breakfile, break_parts)
    hits_per_beat = int(break_parts / (4 * measures))
    bars = audiofile.analysis.bars
    out_shape = (len(audiofile) + 100000, num_channels)
    out = audio.AudioData(shape=out_shape,
                          sampleRate=sample_rate,
                          numChannels=num_channels)
    if not bars:
        print "Didn't find any bars in this analysis!"
        print "No output."
        sys.exit(-1)
    for bar in bars[:-1]:
        beats = bar.children()
        for i in range(len(beats)):
            try:
                break_index = ((bar.local_context()[0] %\
                                measures) * 4) + (i % 4)
            except ValueError:
                break_index = i % 4
            tats = range((break_index) * hits_per_beat,
                         (break_index + 1) * hits_per_beat)
            drum_samps = sum([len(drum_data[x]) for x in tats])
            beat_samps = len(audiofile[beats[i]])
            beat_shape = (beat_samps, num_channels)
            tat_shape = (float(beat_samps / hits_per_beat), num_channels)
            beat_data = audio.AudioData(shape=beat_shape,
                                        sampleRate=sample_rate,
                                        numChannels=num_channels)
            for j in tats:
                tat_data = audio.AudioData(shape=tat_shape,
                                           sampleRate=sample_rate,
                                           numChannels=num_channels)
                if drum_samps > beat_samps / hits_per_beat:
                    # truncate drum hits to fit beat length
                    tat_data.data = drum_data[j].data[:len(tat_data)]
                elif drum_samps < beat_samps / hits_per_beat:
                    # space out drum hits to fit beat length
                    #temp_data = add_fade_out(drum_data[j])
                    tat_data.append(drum_data[j])
                tat_data.endindex = len(tat_data)
                beat_data.append(tat_data)
                del (tat_data)
            # account for rounding errors
            beat_data.endindex = len(beat_data)
            mixed_beat = audio.mix(beat_data, audiofile[beats[i]], mix=mix)
            del (beat_data)
            out.append(mixed_beat)

    finale = bars[-1].start + bars[-1].duration
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    last_data = audio.getpieces(audiofile, [last])
    out.append(last_data)
    samples_avail = os.listdir(samples_dir)

    #throw down some classic trap samples
    #the outfile should be the same length as the output file, so just go through that file instead
    for section in audiofile.analysis.sections[1:]:

        overlay_sound_file = pickSample(samples_avail)
        soundpath = os.path.join(str(samples_dir), overlay_sound_file)
        print soundpath
        sample = audio.LocalAudioFile(soundpath)

        # Mix the audio
        volume = 0.9
        pan = 0
        startsample = int(section.start * out.sampleRate)
        seg = sample[0:]
        seg.data *= (volume - (pan * volume), volume + (pan * volume)
                     )  # pan + volume
        if out.data.shape[0] - startsample > seg.data.shape[0]:
            out.data[startsample:startsample + len(seg.data)] += seg.data[0:]

    out.encode(output_filename)
コード例 #58
0
ファイル: bcluster.py プロジェクト: blacker/elvin
def render_clusters(afile, clusters, filename):
    for i, clust in enumerate(clusters):
        out = audio.getpieces(afile, clust)
        filename = '%s-cluster-%i.wav' % (filename.split('.'), i)
        out.encode(filename)