コード例 #1
0
ファイル: main.py プロジェクト: ironmann250/AutoDJ
def makeTransition(inData, outData, inSong, outSong):
    """
	Takes two arrays of AudioQuantum objects and returns a single AudioData object
	with a linear crossfade and a beatmatch
	"""
    # The number of beats has to be the same
    assert (len(inData) == len(outData))

    # If the tempos are the same then it is easy
    if inSong.bpm == outSong.bpm:
        mixInSeg = audio.getpieces(inSong.AudioFile, inData)
        mixOutSeg = audio.getpieces(outSong.AudioFile, outData)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transition_length = 60.0 / 128.0 * MIX_LENGTH
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0],
                                         transition_length,
                                         mode="linear").render()
        return transitionSeg

    # Else we iterate over each one
    else:
        transitionTime = 0
        tempoDiff = inSong.bpm - outSong.bpm
        marginalInc = float(tempoDiff) / float(MIX_LENGTH)
        inCollect = []
        outCollect = []
        # Rather than a linear slowdown, it is a step function where each beat slows down by marginalInc
        for i in range(MIX_LENGTH):
            inAudio = inData[i].render()
            outAudio = outData[i].render()
            # We scale the in and out beats so that they are at the same temp
            inScaled = dirac.timeScale(
                inAudio.data, inSong.bpm / (outSong.bpm + i * marginalInc))
            outScaled = dirac.timeScale(
                outAudio.data, outSong.bpm / (outSong.bpm + i * marginalInc))
            transitionTime += 60 / (outSong.bpm + i * marginalInc)
            ts = audio.AudioData(ndarray=inScaled,
                                 shape=inScaled.shape,
                                 sampleRate=inSong.AudioFile.sampleRate,
                                 numChannels=inScaled.shape[1])
            ts2 = audio.AudioData(ndarray=outScaled,
                                  shape=outScaled.shape,
                                  sampleRate=outSong.AudioFile.sampleRate,
                                  numChannels=outScaled.shape[1])
            inCollect.append(ts)
            outCollect.append(ts2)
        # Collect the audio and crossfade it
        mixInSeg = audio.assemble(inCollect, numChannels=2)
        mixOutSeg = audio.assemble(outCollect, numChannels=2)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0],
                                         transitionTime,
                                         mode="exponential").render()
        return transitionSeg
コード例 #2
0
ファイル: main.py プロジェクト: vivjay30/AutoDJ
def makeTransition(inData, outData, inSong, outSong):
    """
	Takes two arrays of AudioQuantum objects and returns a single AudioData object
	with a linear crossfade and a beatmatch
	"""
    # The number of beats has to be the same
    assert len(inData) == len(outData)

    # If the tempos are the same then it is easy
    if inSong.bpm == outSong.bpm:
        mixInSeg = audio.getpieces(inSong.AudioFile, inData)
        mixOutSeg = audio.getpieces(outSong.AudioFile, outData)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transition_length = 60.0 / 128.0 * MIX_LENGTH
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0], transition_length, mode="linear").render()
        return transitionSeg

        # Else we iterate over each one
    else:
        transitionTime = 0
        tempoDiff = inSong.bpm - outSong.bpm
        marginalInc = float(tempoDiff) / float(MIX_LENGTH)
        inCollect = []
        outCollect = []
        # Rather than a linear slowdown, it is a step function where each beat slows down by marginalInc
        for i in range(MIX_LENGTH):
            inAudio = inData[i].render()
            outAudio = outData[i].render()
            # We scale the in and out beats so that they are at the same temp
            inScaled = dirac.timeScale(inAudio.data, inSong.bpm / (outSong.bpm + i * marginalInc))
            outScaled = dirac.timeScale(outAudio.data, outSong.bpm / (outSong.bpm + i * marginalInc))
            transitionTime += 60 / (outSong.bpm + i * marginalInc)
            ts = audio.AudioData(
                ndarray=inScaled,
                shape=inScaled.shape,
                sampleRate=inSong.AudioFile.sampleRate,
                numChannels=inScaled.shape[1],
            )
            ts2 = audio.AudioData(
                ndarray=outScaled,
                shape=outScaled.shape,
                sampleRate=outSong.AudioFile.sampleRate,
                numChannels=outScaled.shape[1],
            )
            inCollect.append(ts)
            outCollect.append(ts2)
            # Collect the audio and crossfade it
        mixInSeg = audio.assemble(inCollect, numChannels=2)
        mixOutSeg = audio.assemble(outCollect, numChannels=2)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0], transitionTime, mode="exponential").render()
        return transitionSeg
コード例 #3
0
ファイル: main.py プロジェクト: vivjay30/AutoDJ
def renderList(songList, outFile):
    """
	Takes a list of songs and outputs them to outFile
	Has to beatmatch and cross fade
	Assumes songList >= 2
	"""
    mixOutSeg = None
    currAudio = None
    prevSong = None
    for i in range(len(songList)):
        currSong = songList[i]
        currBeats = currSong.AudioFile.analysis.beats
        # This happens on the first iteration, nothing to mix in so just play until mix out
        if not mixOutSeg:
            currAudio = audio.getpieces(currSong.AudioFile, currBeats[currSong.mix_in : currSong.mix_out])
        else:
            mixInSeg = currBeats[currSong.mix_in : currSong.mix_in + MIX_LENGTH]
            transitionSeg = makeTransition(mixInSeg, mixOutSeg, currSong, prevSong)
            transitionSeg.encode("outfiles/transition.mp3")
            mainSeg = audio.getpieces(currSong.AudioFile, currBeats[currSong.mix_in + MIX_LENGTH : currSong.mix_out])
            currAudio = audio.assemble([currAudio, transitionSeg, mainSeg])
        mixOutSeg = currBeats[currSong.mix_out : currSong.mix_out + MIX_LENGTH]
        prevSong = currSong

        # currAudio = audio.assemble([currAudio, mixOutSeg])
    currAudio.encode(outFile)
コード例 #4
0
def main(directory, inputfile_name, output_filename):
    semitones = []
    noteList = [[0 for x in range(12)] for x in range(6)]
    collect = audio.AudioQuantumList()
    final = audio.AudioQuantumList()
    #createAllNotes()
    initNoteList(noteList)
    print len(noteList)
    print noteList[0][0].analysis.segments.timbre
    audiofile = audio.LocalAudioFile(input_filename)
    songSegments = audiofile.analysis.segments
    bmp = 10000.0
    bmpi = 0
    bmt = 10000.0
    bmti = 0
    #print len(songSegments)
    for i in range(len(songSegments)):
        for j in range(12):
            noteSegments = noteList[0][j].analysis.segments
            pDist = distFinder.cosine(songSegments[i].pitches, noteSegments[len(noteSegments) / 2].pitches)
            if pDist < bmp:
                bmp = pDist
                bmpi = j
        for k in range(6):
            noteSegments = noteList[k][bmpi].analysis.segments
            tDist = distFinder.cosine(songSegments[i].timbre[1], noteSegments[len(noteSegments) / 2].timbre[1])
            if tDist < bmt:
                bmt = tDist
                bmti = k 
        print str(i / len(songSegments)) + '%'
        matchDuration(noteList[bmti][bmpi].analysis.segments, songSegments[i], collect)
        bmp = 10000.0
        bmt = 10000.0
    out = audio.assemble(collect)
    out.encode(output_filename)
コード例 #5
0
ファイル: action.py プロジェクト: tkell/remix
def render(actions, filename, verbose=True):
    """Calls render on each action in actions, concatenates the results, 
    renders an audio file, and returns a path to the file"""
    pieces = [a.render() for a in actions]
    # TODO: allow numChannels and sampleRate to vary.
    out = assemble(pieces, numChannels=2, sampleRate=44100, verbose=verbose)
    return out, out.encode(filename)
コード例 #6
0
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # Just a local alias to the soundtouch library, which handles the pitch shifting.
    soundtouch = modify.Modify()

    # This gets a list of every bar in the track.
    # You can manipulate this just like any other Python list!
    beats = audiofile.analysis.beats

    # The output array
    collect = []

    # This loop streches each beat by a varying ratio, and then re-assmbles them.
    for beat in beats:
        # Find out where in the bar the beat is, and calculate a ratio based on that.
        context = beat.local_context()
        ratio = (math.cos(math.pi * 2 * context[0] / float(context[1])) /
                 2) + 1
        # Stretch the beat!  SoundTouch returns an AudioData object
        new = soundtouch.shiftTempo(audiofile[beat], ratio)
        # Append the stretched beat to the list of beats
        collect.append(new)

    # Assemble and write the output data
    out = audio.assemble(collect)
    out.encode(output_filename)
コード例 #7
0
def changeToCorrectTempo(audioFile, targetTempo):
    #takes in an audioFile, targetTempo, returns audioFile @ correct tempo
    currentTempo = audioFile.analysis.tempo['value']
    bars = audioFile.analysis.bars
    collect = []

    # This loop streches each beat by a varying ratio, and then re-assmbles them.
    for bar in bars:
        # Get the beats in the bar
        beats = bar.children()
        for beat in beats:
            # Note that dirac can't compress by less than 0.5!

            ratio = currentTempo / targetTempo
            #formula for changing currentTempo to targetTempo

            # Get the raw audio data from the beat and scale it by the ratio
            # dirac only works on raw data, and only takes floating-point ratios
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(beat_audio.data, ratio)
            # Create a new AudioData object from the scaled data
            ts = audio.AudioData(ndarray=scaled_beat,
                                 shape=scaled_beat.shape,
                                 sampleRate=audioFile.sampleRate,
                                 numChannels=scaled_beat.shape[1])
            # Append the new data to the output list!
            collect.append(ts)

    # Assemble and write the output data
    output = audio.assemble(collect, numChannels=2)
    return output
コード例 #8
0
ファイル: cycle_soundtouch.py プロジェクト: DrawMusic/remix
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # Just a local alias to the soundtouch library, which handles the pitch shifting.
    soundtouch = modify.Modify()

    # This gets a list of every bar in the track.  
    # You can manipulate this just like any other Python list!
    beats = audiofile.analysis.beats

    # The output array
    collect = []

    # This loop streches each beat by a varying ratio, and then re-assmbles them.
    for beat in beats:
        # Find out where in the bar the beat is, and calculate a ratio based on that.
        context = beat.local_context()
        ratio = (math.cos(math.pi * 2 * context[0]/float(context[1])) / 2) + 1
        # Stretch the beat!  SoundTouch returns an AudioData object
        new = soundtouch.shiftTempo(audiofile[beat], ratio)
        # Append the stretched beat to the list of beats
        collect.append(new)
    
    # Assemble and write the output data
    out = audio.assemble(collect)
    out.encode(output_filename)
コード例 #9
0
def main(input_one, input_two):
	track_one = audio.LocalAudioFile(input_one)
	track_two = audio.LocalAudioFile(input_two)
	section_one = track_one.analysis.sections[0]
	section_two = track_two.analysis.sections[-1]
	tempo_one = section_one.tempo
	tempo_two = section_two.tempo
	tempo_diff = tempo_two - tempo_one
	bars_one = section_one.children()
	collect = []
	for bar in bars_one:
		if bar == bars_one[-1]:
			numbeats = len(bar.children())
			step = tempo_diff/numbeats
			for i, beat in enumerate(bar.children()):
				beat_audio = beat.render()
				ratio = (tempo_one + step*(i+1))/(tempo_one + step*i)
				scaled_beat = dirac.timeScale(beat_audio.data, ratio)
				new_beat = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, sampleRate=track_one.sampleRate, numChannels=scaled_beat.shape[1])
				collect.append(new_beat)
			break
		for beat in bar.children():
			collect.append(beat.render())
	out_data_one = audio.assemble(collect, numChannels=2)
	out_name_one = input_one.split('.')[0]+'-stretch.mp3'
	out_data_one.encode(out_name_one)
	play_one = audio.LocalAudioFile(out_name_one)
	aqp_one = Player(play_one)
	aqp_two = Player(track_two)
	beats_one = play_one.analysis.beats
	for beat in beats_one:
		aqp_one.play(beat)
	aqp_one.closeStream()
	aqp_two.play(section_two)
	aqp_two.closeStream()
コード例 #10
0
ファイル: action.py プロジェクト: Mdelvalle/remix
def render(actions, filename, verbose=True):
    """Calls render on each action in actions, concatenates the results, 
    renders an audio file, and returns a path to the file"""
    pieces = [a.render() for a in actions]
    # TODO: allow numChannels and sampleRate to vary.
    out = assemble(pieces, numChannels=2, sampleRate=44100, verbose=verbose)
    return out, out.encode(filename)
コード例 #11
0
ファイル: main.py プロジェクト: ironmann250/AutoDJ
def renderList(songList, outFile):
    """
	Takes a list of songs and outputs them to outFile
	Has to beatmatch and cross fade
	Assumes songList >= 2
	"""
    mixOutSeg = None
    currAudio = None
    prevSong = None
    for i in range(len(songList)):
        currSong = songList[i]
        currBeats = currSong.AudioFile.analysis.beats
        # This happens on the first iteration, nothing to mix in so just play until mix out
        if not mixOutSeg:
            currAudio = audio.getpieces(
                currSong.AudioFile,
                currBeats[currSong.mix_in:currSong.mix_out])
        else:
            mixInSeg = currBeats[currSong.mix_in:currSong.mix_in + MIX_LENGTH]
            transitionSeg = makeTransition(mixInSeg, mixOutSeg, currSong,
                                           prevSong)
            transitionSeg.encode("outfiles/transition.mp3")
            mainSeg = audio.getpieces(
                currSong.AudioFile,
                currBeats[currSong.mix_in + MIX_LENGTH:currSong.mix_out])
            currAudio = audio.assemble([currAudio, transitionSeg, mainSeg])
        mixOutSeg = currBeats[currSong.mix_out:currSong.mix_out + MIX_LENGTH]
        prevSong = currSong

    # currAudio = audio.assemble([currAudio, mixOutSeg])
    currAudio.encode(outFile)
コード例 #12
0
ファイル: automash.py プロジェクト: thisfred/automashup
def addBarsToAudio(clInfo, sectionSongData, sectionParentQnt, indexBars):
    # The strategy for bars logic is:
    #  + if it comes in empty, initialise it.
    #  + pick some number of bars (eg 2, or random small) to use as a pool
    #  + cycle through them including the first bar of of the next section.
    #    So you reset on the second bar of each section.
    #  + to reset, change clusters with some prob, and randomly pick bars from
    #    the cluster.

    # secAData is section audio data
    # for each bar in this section:
    unmixedBars = []
    print '\taddBarsToAudio: section has %d children:' % len(
        sectionParentQnt.children())
    for i, barDestQnt in enumerate(sectionParentQnt.children()):
        # first, potentially update our pool bars
        # add the bar after the selected one
        if indexBars == None or i == 1:
            # move along.  list of cluster idx, bar idx's
            barSongs = [None, None, None, None]
            if indexBars == None:
                indexBars = [None, None, None, None, None]
            while None in barSongs:
                # advance the cluster
                newIndexBars = [
                    clInfo['bars'].nextCluster(indexBars[0]), None, None, None,
                    None
                ]
                for j in range(1, len(indexBars)):
                    # for each pool...
                    if j == 2 or j == 4:
                        newIndexBars[j] = min(newIndexBars[j - 1] + 1,
                                              clInfo['bars'].nbRegions() -
                                              1)  # continuity!
                    else:
                        newIndexBars[j] = clInfo['bars'].nextRegion(newIndexBars[0],\
                                                                        indexBars[j] )
                    # try loading the data
                    barSongs[j-1] = getSongFromCache( clInfo['bars'].getFilenameOfRegion(\
                            newIndexBars[j] ) )
            # update the var
            indexBars = newIndexBars
        # assertion: these bars cannot give no data
        # use this info to get the bars we want, alternate.
        npool = len(indexBars) - 1
        poolIdx = i % npool
        fnSrc = clInfo['bars'].getFilenameOfRegion(indexBars[1 + poolIdx])
        barSong = getSongFromCache(fnSrc)
        assert barSong != None
        barSong = barSong.m_adata
        barSrcIdxIntoSong = clInfo['bars'].getSongRegionIdx(indexBars[1 +
                                                                      poolIdx])
        barSrcQnt = barSong.analysis.bars[barSrcIdxIntoSong]
        barSrcAData = barSong[barSrcQnt]
        #   mix the bar into the section
        unmixedBars.append(barSrcAData)
        #print '\t\ti=',i,', indexBars=', indexBars
    # return the result
    return (audio.assemble(unmixedBars), indexBars)
コード例 #13
0
ファイル: automash.py プロジェクト: peterklipfel/automashup
def addBarsToAudio( clInfo, sectionSongData, sectionParentQnt, indexBars ):
    # The strategy for bars logic is:
    #  + if it comes in empty, initialise it.
    #  + pick some number of bars (eg 2, or random small) to use as a pool
    #  + cycle through them including the first bar of of the next section.
    #    So you reset on the second bar of each section.
    #  + to reset, change clusters with some prob, and randomly pick bars from
    #    the cluster.

    # secAData is section audio data
    # for each bar in this section:
    unmixedBars = []
    print '\taddBarsToAudio: section has %d children:' % len(sectionParentQnt.children())
    for i, barDestQnt in enumerate(sectionParentQnt.children()):
        # first, potentially update our pool bars
        # add the bar after the selected one
        if indexBars == None or i==1:
            # move along.  list of cluster idx, bar idx's
            barSongs = [None,None,None,None]
            if indexBars == None:
                indexBars = [None,None,None,None,None]
            while None in barSongs:
                # advance the cluster
                newIndexBars = [clInfo['bars'].nextCluster( indexBars[0] ), None, None, None, None ]
                for j in range(1,len(indexBars)):
                    # for each pool...
                    if j==2 or j==4:
                        newIndexBars[j] = min(newIndexBars[j-1]+1,clInfo['bars'].nbRegions()-1) # continuity!
                    else:
                        newIndexBars[j] = clInfo['bars'].nextRegion(newIndexBars[0],\
                                                                        indexBars[j] )
                    # try loading the data
                    barSongs[j-1] = getSongFromCache( clInfo['bars'].getFilenameOfRegion(\
                            newIndexBars[j] ) )
            # update the var
            indexBars = newIndexBars
        # assertion: these bars cannot give no data
        # use this info to get the bars we want, alternate.
        npool = len(indexBars)-1
        poolIdx = i%npool
        fnSrc = clInfo['bars'].getFilenameOfRegion( indexBars[1+poolIdx] )
        barSong = getSongFromCache( fnSrc )
        assert barSong != None
        barSong = barSong.m_adata
        barSrcIdxIntoSong = clInfo['bars'].getSongRegionIdx( indexBars[1+poolIdx] )
        barSrcQnt = barSong.analysis.bars[ barSrcIdxIntoSong ]
        barSrcAData = barSong[ barSrcQnt ]
        #   mix the bar into the section
        unmixedBars.append( barSrcAData )
        #print '\t\ti=',i,', indexBars=', indexBars
    # return the result
    return ( audio.assemble( unmixedBars ), indexBars )
コード例 #14
0
def createAllNotes():
    allNotes = audio.AudioQuantumList()
    semitones = audio.AudioQuantumList()
    createSemitones(directory, semitones)
    for i in range(4):
        addOctave(semitones, i, allNotes)
    for i in range(1, 3):
        addOctave(semitones, i * -1, allNotes)
    for i in range(len(allNotes)):
        note = audio.AudioQuantumList()
        note.append(allNotes[i])
        out = audio.assemble(note)
        out.encode(str(i) + ".mp3")
コード例 #15
0
def createAllNotes():
    allNotes =  audio.AudioQuantumList()
    semitones = audio.AudioQuantumList()
    createSemitones(directory, semitones) 
    for i in range(4):
        addOctave(semitones, i, allNotes)
    for i in range(1,3):
        addOctave(semitones, i*-1, allNotes)
    for i in range(len(allNotes)):
        note = audio.AudioQuantumList()
        note.append(allNotes[i])
        out = audio.assemble(note)
        out.encode(str(i) + ".mp3") 
コード例 #16
0
def main(mp3_list, transition_ratio, segment_temp_change_limit, output_file, delay, compare_tempo, algorithm):
    track_analysis = []
    for i in range(0,len(mp3_list)):
        track_analysis.append( (track.track_from_filename(mp3_list[i])))

    for t in track_analysis:
        t.get_analysis()

    print "continuing..."
    #Reorders mp3_list and generates the transitions
    transitions, mp3_list = generate_transitions(mp3_list, transition_ratio, delay, compare_tempo, algorithm, track_analysis)

    print mp3_list
    print transitions

    #generate the array of audio quantums
    first_index, _ = transitions[0]
    collects = []
    collects.append(beatshift.tempo_shift(mp3_list[0],(0,first_index),segment_temp_change_limit,mp3_list[1],delay))

    for i in range(1,len(transitions)):
        end_segment, _ = transitions[i]
        _, start_segment = transitions[i-1]

        if (start_segment >= end_segment): #if loopback needed
            loop_trans = generate_loopback(transitions[i-1],transitions[i],mp3_list,i,delay,compare_tempo)
            start_trans, end_trans = loop_trans

            collects.append(song_loopback(start_segment, end_trans, mp3_list[i],delay))

            start_segment = start_trans
        print mp3_list[i]
        print mp3_list[i+1]
        print (start_segment, end_segment)
        collects.append(beatshift.tempo_shift(mp3_list[i],(start_segment,end_segment),segment_temp_change_limit,mp3_list[i+1],delay))

    _, last_index = transitions[len(transitions)-1]
    last_song = audio.LocalAudioFile(mp3_list[len(mp3_list)-1])

    col_append = []
    for i in range(last_index, len(last_song.analysis.segments)):
        col_append.append(last_song.analysis.segments[i].render())

    collects.append(col_append)

    #write to file
    #the sum(collects, []) takes the list of lists of quantum and converts it
    #to a single list of quantums
    out = audio.assemble(sum(collects, []), numChannels=2)
    out.encode(output_file)
コード例 #17
0
def main(input_filename, output_filename, ratio):
    audiofile = audio.LocalAudioFile(input_filename)
    beats = audiofile.analysis.beats
    collect = []

    for beat in beats:
        beat_audio = beat.render()
        scaled_beat = dirac.timeScale(beat_audio.data, ratio)
        ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, 
                        sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
        collect.append(ts)

    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
コード例 #18
0
ファイル: cycle_soundtouch.py プロジェクト: DrawMusic/remix
def main(input_filename, output_filename):

    audiofile = audio.LocalAudioFile(input_filename)
    soundtouch = modify.Modify()
    beats = audiofile.analysis.beats
    collect = []

    for beat in beats:
        context = beat.local_context()
        ratio = (math.cos(math.pi * 2 * context[0]/float(context[1])) / 2) + 1
        new = soundtouch.shiftTempo(audiofile[beat], ratio)
        collect.append(new)
    
    out = audio.assemble(collect)
    out.encode(output_filename)
コード例 #19
0
def main(input, semitones):
    track = audio.LocalAudioFile(input)
    collect = []
    for section in track.analysis.sections:
        section_data = section.render().data
        new_data = PyPitch.shiftPitchSemiTones(section_data, semitones)
        ts = audio.AudioData(ndarray=new_data,
                             shape=new_data.shape,
                             sampleRate=track.sampleRate,
                             numChannels=new_data.shape[1])
        collect.append(ts)
    out = audio.assemble(collect, numChannels=2)
    out.encode(
        input.split('.')[0] + '_' + ('d' if semitones < 0 else 'u') +
        str(abs(semitones)) + '.mp3')
コード例 #20
0
def main(input_filename, output_filename):

    audiofile = audio.LocalAudioFile(input_filename)
    soundtouch = modify.Modify()
    beats = audiofile.analysis.beats
    collect = []

    for beat in beats:
        context = beat.local_context()
        ratio = (math.cos(math.pi * 2 * context[0]/float(context[1])) / 2) + 1
        new = soundtouch.shiftTempo(audiofile[beat], ratio)
        collect.append(new)
    
    out = audio.assemble(collect)
    out.encode(output_filename)
コード例 #21
0
def main(input_filename, output_filename, ratio):
    audiofile = audio.LocalAudioFile(input_filename)
    beats = audiofile.analysis.beats
    collect = []

    for beat in beats:
        beat_audio = beat.render()
        scaled_beat = dirac.timeScale(beat_audio.data, ratio)
        ts = audio.AudioData(ndarray=scaled_beat,
                             shape=scaled_beat.shape,
                             sampleRate=audiofile.sampleRate,
                             numChannels=scaled_beat.shape[1])
        collect.append(ts)

    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
コード例 #22
0
ファイル: cycle.py プロジェクト: MattHulse/remix
def main():
    try:
        in_filename = sys.argv[1]
        out_filename = sys.argv[2]
    except Exception:
        print USAGE
        sys.exit(-1)
    afile = audio.LocalAudioFile(in_filename)
    st = modify.Modify()
    beats = afile.analysis.beats
    collect = []
    for beat in beats:
        context = beat.local_context()
        ratio = (math.cos(math.pi * 2 * context[0]/float(context[1])) / 2) + 1
        new = st.shiftTempo(afile[beat], ratio)
        collect.append(new)
    out = audio.assemble(collect)
    out.encode(out_filename)
コード例 #23
0
ファイル: swing.py プロジェクト: MattHulse/remix
def main(inputFilename, outputFilename, swing):
    
    infile = audio.LocalAudioFile(inputFilename)
    tats = infile.analysis.tatums
    st = modify.Modify()
    collect = []
    
    for x in tats:
        y, z = x.local_context()
        if y < z/2.:
            ratio = swing / (((z + 1) // 2) / float(z))
        else:
            ratio = (1. - swing) / (1 - ((z + 1) // 2) / float(z))
        new = st.shiftTempo(infile[x], 1./ratio)
        print "Expected:\t%1.3f\tActual:  \t%1.3f" % (x.duration * ratio, float(len(new))/new.sampleRate)
        collect.append(new)
    out = audio.assemble(collect)
    out.encode(outputFilename)
コード例 #24
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = []

    for bar in bars:
        bar_ratio = (bars.index(bar) % 4) / 2.0
        beats = bar.children()
        for beat in beats:
            beat_index = beat.local_context()[0]
            ratio = beat_index / 2.0 + 0.5
            ratio = ratio + bar_ratio # dirac can't compress by less than 0.5!
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(beat_audio.data, ratio)
            ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, 
                            sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
            collect.append(ts)

    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
コード例 #25
0
def main(directory, inputfile_name, output_filename):
    semitones = []
    noteList = [[0 for x in range(12)] for x in range(6)]
    collect = audio.AudioQuantumList()
    final = audio.AudioQuantumList()
    #createAllNotes()
    initNoteList(noteList)
    print len(noteList)
    print noteList[0][0].analysis.segments.timbre
    audiofile = audio.LocalAudioFile(input_filename)
    songSegments = audiofile.analysis.segments
    bmp = 10000.0
    bmpi = 0
    bmt = 10000.0
    bmti = 0
    #print len(songSegments)
    for i in range(len(songSegments)):
        for j in range(12):
            noteSegments = noteList[0][j].analysis.segments
            pDist = distFinder.cosine(
                songSegments[i].pitches,
                noteSegments[len(noteSegments) / 2].pitches)
            if pDist < bmp:
                bmp = pDist
                bmpi = j
        for k in range(6):
            noteSegments = noteList[k][bmpi].analysis.segments
            tDist = distFinder.cosine(
                songSegments[i].timbre[1],
                noteSegments[len(noteSegments) / 2].timbre[1])
            if tDist < bmt:
                bmt = tDist
                bmti = k
        print str(i / len(songSegments)) + '%'
        matchDuration(noteList[bmti][bmpi].analysis.segments, songSegments[i],
                      collect)
        bmp = 10000.0
        bmt = 10000.0
    out = audio.assemble(collect)
    out.encode(output_filename)
コード例 #26
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = audio.AudioQuantumList()
    count = 0
    for bar in bars:
        try:
            beat = bar.children()[1]
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(
                beat_audio.data, 1.2) if count == 1 else dirac.timeScale(
                    beat_audio.data, 1.0)
            ts = audio.AudioData(ndarray=scaled_beat,
                                 shape=scaled_beat.shape,
                                 sampleRate=audiofile.sampleRate,
                                 numChannels=scaled_beat.shape[1])
            collect.append(ts)
            count = (count + 1) % 3
        except IndexError:
            pass
    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
コード例 #27
0
ファイル: cycle_dirac.py プロジェクト: yojanpatel/remix
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.
    bars = audiofile.analysis.bars

    # The output array
    collect = []

    # This loop streches each beat by a varying ratio, and then re-assmbles them.
    for bar in bars:
        # Caculate a stretch ratio that repeats every four bars.
        bar_ratio = (bars.index(bar) % 4) / 2.0
        # Get the beats in the bar
        beats = bar.children()
        for beat in beats:
            # Find out where in the bar the beat is.
            beat_index = beat.local_context()[0]
            # Calculate a stretch ratio based on where in the bar the beat is
            ratio = beat_index / 2.0 + 0.5
            # Note that dirac can't compress by less than 0.5!
            ratio = ratio + bar_ratio
            # Get the raw audio data from the beat and scale it by the ratio
            # dirac only works on raw data, and only takes floating-point ratios
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(beat_audio.data, ratio)
            # Create a new AudioData object from the scaled data
            ts = audio.AudioData(ndarray=scaled_beat,
                                 shape=scaled_beat.shape,
                                 sampleRate=audiofile.sampleRate,
                                 numChannels=scaled_beat.shape[1])
            # Append the new data to the output list!
            collect.append(ts)

    # Assemble and write the output data
    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
コード例 #28
0
ファイル: cycle_dirac.py プロジェクト: DrawMusic/remix
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.  
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.  
    bars = audiofile.analysis.bars

    # The output array
    collect = []

    # This loop streches each beat by a varying ratio, and then re-assmbles them.
    for bar in bars:
        # Caculate a stretch ratio that repeats every four bars.
        bar_ratio = (bars.index(bar) % 4) / 2.0
        # Get the beats in the bar
        beats = bar.children()
        for beat in beats:
            # Find out where in the bar the beat is.
            beat_index = beat.local_context()[0]
            # Calculate a stretch ratio based on where in the bar the beat is
            ratio = beat_index / 2.0 + 0.5
            # Note that dirac can't compress by less than 0.5!
            ratio = ratio + bar_ratio 
            # Get the raw audio data from the beat and scale it by the ratio
            # dirac only works on raw data, and only takes floating-point ratios
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(beat_audio.data, ratio)
            # Create a new AudioData object from the scaled data
            ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, 
                            sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
            # Append the new data to the output list!
            collect.append(ts)

    # Assemble and write the output data
    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
コード例 #29
0
    # if audio_file2.analysis.bars[i].confidence > 0.15:
    #   minlen = min(audio_file1.data.shape[0], audio_file2.data.shape[0])
    #   # audiofile = audio_file2
    #   # audiofile.data = audiofile.data[:minlen,:] + audio_file1.data[:minlen,:]

    if random.randrange(100) < 70:
        alist.append(
            audio.mix(
                audio_file2[beats2[i]], interesting_segments[random.randrange(
                    len(interesting_segments))]))
    else:
        alist.append(audio_file2[beats2[i]])

    # else:
    # alist.append( audio_file1[ beats1[i] ] )
    i += 1

# construct output waveform from these audiodata objects.
afout = audio.assemble(alist)

# Write output file
filename = "play" + str(int(time.time())) + ".mp3"

afout.encode(filename)

music = pyglet.media.load(filename)

music.play()

print "pingPong execution time: ", time.time() - start_time, " seconds"
コード例 #30
0
ファイル: pingPong.py プロジェクト: peterklipfel/automashup
    # add next beat from song 1
    # alist.append( audio_file2[ beats2[i] ] )
    # add next beat from song 2
    # if audio_file2.analysis.bars[i].confidence > 0.15:
    #   minlen = min(audio_file1.data.shape[0], audio_file2.data.shape[0])
    #   # audiofile = audio_file2
    #   # audiofile.data = audiofile.data[:minlen,:] + audio_file1.data[:minlen,:]

    if random.randrange(100) < 70:
      alist.append( audio.mix(audio_file2[ beats2[i] ], interesting_segments[random.randrange(len(interesting_segments))] ) )
    else:
      alist.append(audio_file2[beats2[i]])

    # else:
      # alist.append( audio_file1[ beats1[i] ] )
    i += 1

# construct output waveform from these audiodata objects.
afout = audio.assemble( alist )

# Write output file
filename = "play"+str(int(time.time()))+".mp3"

afout.encode( filename )

music = pyglet.media.load(filename)

music.play()

print "pingPong execution time: ", time.time() - start_time, " seconds"
コード例 #31
0
ファイル: test.py プロジェクト: ironmann250/AutoDJ
"""
#reload(audio)
audio_file = audio.LocalAudioFile("mp3/Calibria.mp3")

beats = audio_file.analysis.beats[128:159]


collect = []
for beat in beats:
	beat_audio = beat.render()
	scaled_beat = dirac.timeScale(beat_audio.data, 1.2)
	ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, 
                sampleRate=audio_file.sampleRate, numChannels=scaled_beat.shape[1])
	collect.append(ts)
print collect
out = audio.assemble(collect, numChannels=2)


# audio_file2 = audio.LocalAudioFile("mp3/Bastille.mp3")
# beats2 = audio_file2.analysis.beats[128:159]


# data1 = audio.getpieces(audio_file, beats)
# # print type(data1)
# # print isinstance(data1, audio.AudioData)
# #out = modify.Modify().shiftTempo(data1, 1)
# data2 = audio.getpieces(audio_file2, beats2)
# out = action.Crossfade([data1, data2], [0.0, 0.0], 30).render()


# data1.encode("Testing1.mp3")