示例#1
0
	def g(self, d, gain, rate):
		s = 44100
		if gain is not None:
			return limit(multiply(dirac.timeScale(d, rate, s, self.quality),
						 float32(gain)))
		else:
			return dirac.timeScale(d, rate, s, self.quality)
示例#2
0
 def g(self, d, gain, rate):
     s = 44100
     if gain is not None:
         return limit(multiply(dirac.timeScale(d, rate, s, self.quality),
                      float32(gain)))
     else:
         return dirac.timeScale(d, rate, s, self.quality)
示例#3
0
def main():
    
    try:
        in_filename = sys.argv[1]
    except Exception:
        print USAGE
        sys.exit(-1)

    afile = audio.LocalAudioFile(in_filename)
    vecin = afile.data
    
    # ------------------------------------------------------------------------------------
    # Single time-stretch
    # ------------------------------------------------------------------------------------
    rate = 1.25 # 25% slower
    quality = 0 # fast processing
    
    print "Time Stretching with single rate", rate
    # arguments are:
    # 1) a numpy array of size N x C, where N is a number of samples, and C the number of channels (1 or 2)
    # 2) a float representing the time-stretching rate, e.g. 0.5 for twice as short, and 2.0 for twice as long
    # 3) an integer representing the sample rate of the input signal, e.g. 44100
    # 4) an optional integer representing the processing quality and speed between the default 0 (fastest, lowest quality) and 2 (slowest, highest quality)
    vecout = dirac.timeScale(vecin, rate, afile.sampleRate, quality)
    
    # output audio
    out = audio.AudioData(ndarray=vecout, shape=vecout.shape, sampleRate=afile.sampleRate, numChannels=vecout.shape[1])
    out_filename = 'out_single_rate.wav'
    print "Writing file", out_filename
    out.encode(out_filename)
    
    # ------------------------------------------------------------------------------------
    # Varying time-stretch
    # ------------------------------------------------------------------------------------
    # This example will linearly change the speed from 'rate' to 1.0 in 'numChunks' chunks
    numChunks = 16 # divide the signal into 16 chunks
    sizeChunk = int(vecin.shape[0] / numChunks)
    incRate = (1.0-rate) / (numChunks-1)
    
    # first tuple must start at index 0
    index = 0
    rates = [(index, rate)]
    for i in xrange(numChunks-1):
        index += sizeChunk
        rate += incRate
        rates.append((index, rate))
    
    print "Time Stretching with list of rates", rates
    # arguments are:
    # 1) a numpy array of size N x C, where N is a number of samples, and C the number of channels (1 or 2)
    # 2) a list of tuples each representing (a sample index, a rate). First index must be 0.
    # 3) an integer representing the sample rate of the input signal, e.g. 44100
    # 4) an optional integer representing the processing quality and speed between the default 0 (fastest, lowest quality) and 2 (slowest, highest quality)
    vecout = dirac.timeScale(vecin, rates, afile.sampleRate, quality)
    out = audio.AudioData(ndarray=vecout, shape=vecout.shape, sampleRate=afile.sampleRate, numChannels=vecout.shape[1])

    out_filename = 'out_list_rates.wav'
    print "Writing file", out_filename
    out.encode(out_filename)
示例#4
0
def makeTransition(inData, outData, inSong, outSong):
    """
	Takes two arrays of AudioQuantum objects and returns a single AudioData object
	with a linear crossfade and a beatmatch
	"""
    # The number of beats has to be the same
    assert (len(inData) == len(outData))

    # If the tempos are the same then it is easy
    if inSong.bpm == outSong.bpm:
        mixInSeg = audio.getpieces(inSong.AudioFile, inData)
        mixOutSeg = audio.getpieces(outSong.AudioFile, outData)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transition_length = 60.0 / 128.0 * MIX_LENGTH
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0],
                                         transition_length,
                                         mode="linear").render()
        return transitionSeg

    # Else we iterate over each one
    else:
        transitionTime = 0
        tempoDiff = inSong.bpm - outSong.bpm
        marginalInc = float(tempoDiff) / float(MIX_LENGTH)
        inCollect = []
        outCollect = []
        # Rather than a linear slowdown, it is a step function where each beat slows down by marginalInc
        for i in range(MIX_LENGTH):
            inAudio = inData[i].render()
            outAudio = outData[i].render()
            # We scale the in and out beats so that they are at the same temp
            inScaled = dirac.timeScale(
                inAudio.data, inSong.bpm / (outSong.bpm + i * marginalInc))
            outScaled = dirac.timeScale(
                outAudio.data, outSong.bpm / (outSong.bpm + i * marginalInc))
            transitionTime += 60 / (outSong.bpm + i * marginalInc)
            ts = audio.AudioData(ndarray=inScaled,
                                 shape=inScaled.shape,
                                 sampleRate=inSong.AudioFile.sampleRate,
                                 numChannels=inScaled.shape[1])
            ts2 = audio.AudioData(ndarray=outScaled,
                                  shape=outScaled.shape,
                                  sampleRate=outSong.AudioFile.sampleRate,
                                  numChannels=outScaled.shape[1])
            inCollect.append(ts)
            outCollect.append(ts2)
        # Collect the audio and crossfade it
        mixInSeg = audio.assemble(inCollect, numChannels=2)
        mixOutSeg = audio.assemble(outCollect, numChannels=2)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0],
                                         transitionTime,
                                         mode="exponential").render()
        return transitionSeg
示例#5
0
文件: main.py 项目: vivjay30/AutoDJ
def makeTransition(inData, outData, inSong, outSong):
    """
	Takes two arrays of AudioQuantum objects and returns a single AudioData object
	with a linear crossfade and a beatmatch
	"""
    # The number of beats has to be the same
    assert len(inData) == len(outData)

    # If the tempos are the same then it is easy
    if inSong.bpm == outSong.bpm:
        mixInSeg = audio.getpieces(inSong.AudioFile, inData)
        mixOutSeg = audio.getpieces(outSong.AudioFile, outData)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transition_length = 60.0 / 128.0 * MIX_LENGTH
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0], transition_length, mode="linear").render()
        return transitionSeg

        # Else we iterate over each one
    else:
        transitionTime = 0
        tempoDiff = inSong.bpm - outSong.bpm
        marginalInc = float(tempoDiff) / float(MIX_LENGTH)
        inCollect = []
        outCollect = []
        # Rather than a linear slowdown, it is a step function where each beat slows down by marginalInc
        for i in range(MIX_LENGTH):
            inAudio = inData[i].render()
            outAudio = outData[i].render()
            # We scale the in and out beats so that they are at the same temp
            inScaled = dirac.timeScale(inAudio.data, inSong.bpm / (outSong.bpm + i * marginalInc))
            outScaled = dirac.timeScale(outAudio.data, outSong.bpm / (outSong.bpm + i * marginalInc))
            transitionTime += 60 / (outSong.bpm + i * marginalInc)
            ts = audio.AudioData(
                ndarray=inScaled,
                shape=inScaled.shape,
                sampleRate=inSong.AudioFile.sampleRate,
                numChannels=inScaled.shape[1],
            )
            ts2 = audio.AudioData(
                ndarray=outScaled,
                shape=outScaled.shape,
                sampleRate=outSong.AudioFile.sampleRate,
                numChannels=outScaled.shape[1],
            )
            inCollect.append(ts)
            outCollect.append(ts2)
            # Collect the audio and crossfade it
        mixInSeg = audio.assemble(inCollect, numChannels=2)
        mixOutSeg = audio.assemble(outCollect, numChannels=2)
        # mixInSeg.encode("outfiles/Mixinseg.mp3")
        # mixOutSeg.encode("outfiles/MixOutseg.mp3")
        transitionSeg = action.Crossfade([mixOutSeg, mixInSeg], [0.0, 0.0], transitionTime, mode="exponential").render()
        return transitionSeg
示例#6
0
def newTempo(song, oldTempo, newTempo):
    # ratio = newTempo / oldTempo
    ratio = oldTempo / newTempo
    scaled = dirac.timeScale(song.data, ratio)
    out = audio.AudioData(ndarray = scaled, shape = scaled.shape,
                sampleRate = song.sampleRate, numChannels = scaled.shape[1])
    return out
示例#7
0
def changeToCorrectTempo(audioFile, targetTempo):
    #takes in an audioFile, targetTempo, returns audioFile @ correct tempo
    currentTempo = audioFile.analysis.tempo['value']
    bars = audioFile.analysis.bars
    collect = []

    # This loop streches each beat by a varying ratio, and then re-assmbles them.
    for bar in bars:
        # Get the beats in the bar
        beats = bar.children()
        for beat in beats:
            # Note that dirac can't compress by less than 0.5!

            ratio = currentTempo / targetTempo
            #formula for changing currentTempo to targetTempo

            # Get the raw audio data from the beat and scale it by the ratio
            # dirac only works on raw data, and only takes floating-point ratios
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(beat_audio.data, ratio)
            # Create a new AudioData object from the scaled data
            ts = audio.AudioData(ndarray=scaled_beat,
                                 shape=scaled_beat.shape,
                                 sampleRate=audioFile.sampleRate,
                                 numChannels=scaled_beat.shape[1])
            # Append the new data to the output list!
            collect.append(ts)

    # Assemble and write the output data
    output = audio.assemble(collect, numChannels=2)
    return output
示例#8
0
def do_work(track, options):
    
    verbose = bool(options.verbose)
    
    # This gets the swing factor
    swing = float(options.swing)
    if swing < -0.9: swing = -0.9
    if swing > +0.9: swing = +0.9
    
    # If there's no swing, return the original tune
    if swing == 0:
        return Playback(track, 0, track.analysis.duration)
    
    # This gets the beat and the where the beats strt
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # compute rates
    rates = []
    # This is where the magic happens:
    # For each beat, compute how much to stretch / compress each half of each beat
    for beat in beats[:-1]:
        # This adds swing:
        if 0 < swing:
            rate1 = 1+swing
            dur = beat.duration/2.0
            stretch = dur * rate1
            rate2 = (beat.duration-stretch)/dur
        # This removes swing
        else:
            rate1 = 1 / (1+abs(swing))
            dur = (beat.duration/2.0) / rate1
            stretch = dur * rate1
            rate2 = (beat.duration-stretch)/(beat.duration-dur)
        # This builds the list of swing rates for each beat
        start1 = int(beat.start * track.sampleRate)
        start2 = int((beat.start+dur) * track.sampleRate)
        rates.append((start1-offset, rate1))
        rates.append((start2-offset, rate2))
        if verbose:
            args = (beats.index(beat), dur, beat.duration-dur, stretch, beat.duration-stretch)
            print "Beat %d — split [%.3f|%.3f] — stretch [%.3f|%.3f] seconds" % args
    
    # This gets all the audio, from the
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate),:]
    # This block does the time stretching
    if verbose: 
        print "\nTime stretching..."
    # Dirac is a timestretching tool that comes with remix.
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
    # This builds the timestretched AudioData object
    ts = AudioData(ndarray=vecout, shape=vecout.shape, 
                    sampleRate=track.sampleRate, numChannels=vecout.shape[1], 
                    verbose=verbose)
     # Create playback objects (just a collection of audio) for the first and last beat
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start, track.analysis.duration-beats[-1].start)
    
    # Return the first beat, the timestreched beats, and the last beat
    return [pb1, ts, pb2]
def main(input_one, input_two):
	track_one = audio.LocalAudioFile(input_one)
	track_two = audio.LocalAudioFile(input_two)
	section_one = track_one.analysis.sections[0]
	section_two = track_two.analysis.sections[-1]
	tempo_one = section_one.tempo
	tempo_two = section_two.tempo
	tempo_diff = tempo_two - tempo_one
	bars_one = section_one.children()
	collect = []
	for bar in bars_one:
		if bar == bars_one[-1]:
			numbeats = len(bar.children())
			step = tempo_diff/numbeats
			for i, beat in enumerate(bar.children()):
				beat_audio = beat.render()
				ratio = (tempo_one + step*(i+1))/(tempo_one + step*i)
				scaled_beat = dirac.timeScale(beat_audio.data, ratio)
				new_beat = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, sampleRate=track_one.sampleRate, numChannels=scaled_beat.shape[1])
				collect.append(new_beat)
			break
		for beat in bar.children():
			collect.append(beat.render())
	out_data_one = audio.assemble(collect, numChannels=2)
	out_name_one = input_one.split('.')[0]+'-stretch.mp3'
	out_data_one.encode(out_name_one)
	play_one = audio.LocalAudioFile(out_name_one)
	aqp_one = Player(play_one)
	aqp_two = Player(track_two)
	beats_one = play_one.analysis.beats
	for beat in beats_one:
		aqp_one.play(beat)
	aqp_one.closeStream()
	aqp_two.play(section_two)
	aqp_two.closeStream()
示例#10
0
def shiftPitchOctaves(audio_data, octaves):
    factor = 2.0**octaves
    stretched_data = dirac.timeScale(audio_data, factor)
    index = numpy.floor(numpy.arange(0, stretched_data.shape[0],
                                     factor)).astype('int32')
    new_data = stretched_data[index]
    return new_data
示例#11
0
def do_work(track, options):

    verbose = bool(options.verbose)

    # swing factor
    swing = float(options.swing)
    if swing < -0.9: swing = -0.9
    if swing > +0.9: swing = +0.9

    if swing == 0:
        return Playback(track, 0, track.analysis.duration)

    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # compute rates
    rates = []
    for beat in beats[:-1]:
        # put swing
        if 0 < swing:
            rate1 = 1 + swing
            dur = beat.duration / 2.0
            stretch = dur * rate1
            rate2 = (beat.duration - stretch) / dur
        # remove swing
        else:
            rate1 = 1 / (1 + abs(swing))
            dur = (beat.duration / 2.0) / rate1
            stretch = dur * rate1
            rate2 = (beat.duration - stretch) / (beat.duration - dur)
        # build list of rates
        start1 = int(beat.start * track.sampleRate)
        start2 = int((beat.start + dur) * track.sampleRate)
        rates.append((start1 - offset, rate1))
        rates.append((start2 - offset, rate2))
        if verbose:
            args = (beats.index(beat), dur, beat.duration - dur, stretch,
                    beat.duration - stretch)
            print "Beat %d — split [%.3f|%.3f] — stretch [%.3f|%.3f] seconds" % args

    # get audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate), :]
    # time stretch
    if verbose:
        print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
    # build timestretch AudioData object
    ts = AudioData(ndarray=vecout,
                   shape=vecout.shape,
                   sampleRate=track.sampleRate,
                   numChannels=vecout.shape[1],
                   verbose=verbose)
    # initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start,
                   track.analysis.duration - beats[-1].start)

    return [pb1, ts, pb2]
示例#12
0
def matchDuration(note, songseg, end):
    ratio = 3 * songseg.duration / note.duration
    print ratio
    for seg in note:
        seg_audio = seg.render()
        scaled_seg = dirac.timeScale(seg_audio.data, ratio)
        ts = audio.AudioData(ndarray=scaled_seg, shape=scaled_seg.shape,
                    sampleRate=44100, numChannels=scaled_seg.shape[1])
        end.append(ts)
示例#13
0
def do_work(track, options):

    beat_pattern = string.replace(options.pattern, "-", "")
    beat_pattern_len = len(beat_pattern)
    
    verb = bool(options.verbose)
    
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)
    
    # build rates
    rates = []
    bar_starts = []
    n_beat = 0
    last_start = 0
    start = 0
    if verb == True: print "Running through beats..."
    for beat in beats[:-1]:
        rate = float(options.slowdown)/int(beat_pattern[n_beat % beat_pattern_len])
    	last_start = start
        start = int(beat.start * track.sampleRate)
        if options.debug and (n_beat % beat_pattern_len)==(beat_pattern_len-1) and (n_beat > 0):
	        rates.append(((start*9 + last_start)/10 - offset, 11*rate))	
        rates.append((start-offset, rate))
        n_beat = n_beat + 1
        if verb: print n_beat, start-offset, start, rate
        if (n_beat % beat_pattern_len)==int(options.downbeat):
        	bar_starts.append(n_beat)
        #print rate, start
        #if verb == True: print "Beat %d — split [%.3f|%.3f] — stretch [%.3f|%.3f] seconds" % (beats.index(beat), dur, beat.duration-dur, stretch, beat.duration-stretch)
    if verb == True: print "Done with beats."

    track1 = AudioData32(ndarray = track.data, shape=track.data.shape, sampleRate=44100, numChannels=track.data.shape[1])
    if (options.downbeat >= 0):
        inc2 = AudioData(options.downbeat_file, sampleRate=44100, numChannels=2)
        if verb == True: print "Starting downbeats."
        for bar_start in bar_starts:
            if verb == True: print beats[bar_start].start , bar_start
            track1.add_at(float(beats[bar_start].start) + float(options.downbeat_offset), inc2)
        if verb == True: print "Done with downbeats."

    track1.normalize()

    # get audio
    vecin = track1[offset:int(beats[-1].start * track.sampleRate),:]
    # time stretch
    if verb == True: print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
    # build timestretch AudioData object
    ts = AudioData(ndarray=vecout, shape=vecout.shape, sampleRate=track.sampleRate, numChannels=vecout.shape[1])


	# initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start, track.analysis.duration-beats[-1].start)

    return [pb1, ts, pb2]
示例#14
0
def do_work(track, options):

	verbose = bool(options.verbose)

	if track.analysis.time_signature['value'] in [3,6]:
		if verbose: print "Song is already a waltz"
		return Playback(track, 0, track.analysis.duration)
	
	# Figure out what to do with the bastard beat
	bastard = int(options.waltz)
	if bastard < 1: bastard = 2
	if bastard > 3: bastard = 2
	
	beats = track.analysis.beats
	offset = int(beats[0].start * track.sampleRate)

	rates = []

	# Set offset according to bastard beat
	count = (2,1,4)[bastard - 1] 

	if verbose:
		print "Putting extra beat on beat %s" % bastard
	
	# Convert a song in 4/4 to 3/4
	for beat in beats:
		if count in [1, 4]: 
			rate = 4.0/3.0
		if count in [2, 3]: 
			rate = 2.0/3.0
		
		if count == 4:
			count = 1
		else:
			count += 1
		
		start = int(beat.start * track.sampleRate)
		rates.append((start-offset, rate))
		
	# get audio
	vecin = track.data[offset:int(beats[-1].start * track.sampleRate),:]
	# time stretch
	if verbose: 
		print "\nTime stretching..."
	vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
	# build timestretch AudioData object
	ts = AudioData(ndarray=vecout, shape=vecout.shape, 
					sampleRate=track.sampleRate, numChannels=vecout.shape[1], 
					verbose=verbose)
	
	# initial and final playback
	pb1 = Playback(track, 0, beats[0].start)
	pb2 = Playback(track, beats[-1].start, track.analysis.duration-beats[-1].start)

	return [pb1, ts, pb2]
示例#15
0
def matchDuration(note, songseg, end):
    ratio = 3 * songseg.duration / note.duration
    print ratio
    for seg in note:
        seg_audio = seg.render()
        scaled_seg = dirac.timeScale(seg_audio.data, ratio)
        ts = audio.AudioData(ndarray=scaled_seg,
                             shape=scaled_seg.shape,
                             sampleRate=44100,
                             numChannels=scaled_seg.shape[1])
        end.append(ts)
示例#16
0
def do_work(track, options):
    
    verbose = bool(options.verbose)
    
    # swing factor
    swing = float(options.swing)
    if swing < -0.9: swing = -0.9
    if swing > +0.9: swing = +0.9
    
    if swing == 0:
        return Playback(track, 0, track.analysis.duration)
    
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # compute rates
    rates = []
    for beat in beats[:-1]:
        # put swing
        if 0 < swing:
            rate1 = 1+swing
            dur = beat.duration/2.0
            stretch = dur * rate1
            rate2 = (beat.duration-stretch)/dur
        # remove swing
        else:
            rate1 = 1 / (1+abs(swing))
            dur = (beat.duration/2.0) / rate1
            stretch = dur * rate1
            rate2 = (beat.duration-stretch)/(beat.duration-dur)
        # build list of rates
        start1 = int(beat.start * track.sampleRate)
        start2 = int((beat.start+dur) * track.sampleRate)
        rates.append((start1-offset, rate1))
        rates.append((start2-offset, rate2))
        if verbose:
            args = (beats.index(beat), dur, beat.duration-dur, stretch, beat.duration-stretch)
            print "Beat %d — split [%.3f|%.3f] — stretch [%.3f|%.3f] seconds" % args
    
    # get audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate),:]
    # time stretch
    if verbose: 
        print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
    # build timestretch AudioData object
    ts = AudioData(ndarray=vecout, shape=vecout.shape, 
                    sampleRate=track.sampleRate, numChannels=vecout.shape[1], 
                    verbose=verbose)
    # initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start, track.analysis.duration-beats[-1].start)

    return [pb1, ts, pb2]
示例#17
0
    def shift_tempo_and_play(self, audio_quantum, ratio):
        """
        Takes an echonest.remix.audio.AudioQuantum and a ratio.
        It first shifts the tempo of the AudioQuantum using dirac
        and then writes the modified data to the stream.
        """
        import dirac
        import numpy as np

        ad = audio_quantum.render()
        scaled_beat = dirac.timeScale(ad.data, ratio)
        self.stream.write(scaled_beat.astype(np.int16).tostring())
示例#18
0
    def shift_tempo_and_play(self, audio_quantum, ratio):
        """
        Takes an echonest.remix.audio.AudioQuantum and a ratio.
        It first shifts the tempo of the AudioQuantum using dirac
        and then writes the modified data to the stream.
        """
        import dirac
        import numpy as np

        ad = audio_quantum.render()
        scaled_beat = dirac.timeScale(ad.data, ratio)
        self.stream.write(scaled_beat.astype(np.int16).tostring())
示例#19
0
def main():
    audiofile = audio.LocalAudioFile(file1)
    player = Player(audiofile)
    beats = audiofile.analysis.beats
    for beat in beats:
        ratio = 1.25 - ((float(beat.absolute_context()[0]) / float(len(beats))) * .5)
        print ratio
        beat_audio = beat.render()
        scaled_beat = dirac.timeScale(beat_audio.data, ratio)
        ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape,
                                     sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
        player.play_from_AudioData(ts)
    player.close_stream()
示例#20
0
 def shiftPitchSemiTones(audio_data, semitones):
     while semitones > 6:
         semitones = semitones - 12
     while semitones < -6:
         semitones = 12 + semitones
     if semitones == 0:
         return audio_data
     factor = 2.0**(semitones / 12.0)
     stretched_data = dirac.timeScale(audio_data, factor)
     index = numpy.floor(numpy.arange(0, stretched_data.shape[0],
                                      factor)).astype('int32')
     new_data = stretched_data[index]
     return new_data
示例#21
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = audio.AudioQuantumList()
    count = 0
    for bar in bars:
        try:
            beat = bar.children()[1]
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(
                beat_audio.data, 1.2) if count == 1 else dirac.timeScale(
                    beat_audio.data, 1.0)
            ts = audio.AudioData(ndarray=scaled_beat,
                                 shape=scaled_beat.shape,
                                 sampleRate=audiofile.sampleRate,
                                 numChannels=scaled_beat.shape[1])
            collect.append(ts)
            count = (count + 1) % 3
        except IndexError:
            pass
    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
def main(input_filename, output_filename, ratio):
    audiofile = audio.LocalAudioFile(input_filename)
    beats = audiofile.analysis.beats
    collect = []

    for beat in beats:
        beat_audio = beat.render()
        scaled_beat = dirac.timeScale(beat_audio.data, ratio)
        ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, 
                        sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
        collect.append(ts)

    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
示例#23
0
 def stretch(self, t, l):
     """t is a track, l is a list"""
     signal_start = int(l[0][0] * t.sampleRate)
     signal_duration = int((sum(l[-1]) - l[0][0]) * t.sampleRate)
     vecin = t.data[signal_start:signal_start + signal_duration,:]
     
     rates = []
     for i in xrange(len(l)):
         rates.append((int(l[i][0] * t.sampleRate) - signal_start, self.durations[i] / l[i][1]))
     
     vecout = dirac.timeScale(vecin, rates, t.sampleRate, 0)
     if hasattr(t, 'gain'):
         vecout = limit(multiply(vecout, float32(t.gain)))
     
     return AudioData(ndarray=vecout, shape=vecout.shape, sampleRate=t.sampleRate, numChannels=vecout.shape[1])
示例#24
0
def main(input_filename, output_filename, ratio):
    audiofile = audio.LocalAudioFile(input_filename)
    beats = audiofile.analysis.beats
    collect = []

    for beat in beats:
        beat_audio = beat.render()
        scaled_beat = dirac.timeScale(beat_audio.data, ratio)
        ts = audio.AudioData(ndarray=scaled_beat,
                             shape=scaled_beat.shape,
                             sampleRate=audiofile.sampleRate,
                             numChannels=scaled_beat.shape[1])
        collect.append(ts)

    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
示例#25
0
def do_work(track, options):
    beat_pattern = string.replace(options.pattern, "-", "")
    beat_pattern_len = len(beat_pattern)
    
    verb = bool(options.verbose)
    
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)
    
    # build rates
    rates = []
    n_beat = 0
    last_start = 0
    start = 0
    print "Running through beats..."
    for beat in beats[:-1]:
        last_start = start
        start = int(beat.start * track.sampleRate)
        if (int(beat_pattern[n_beat % beat_pattern_len]) != 0):
            rate = float(options.slowdown)/int(beat_pattern[n_beat % beat_pattern_len])
            if options.debug and (n_beat % beat_pattern_len)==(beat_pattern_len-1) and (n_beat > 0):
                rates.append(((start*9 + last_start)/10 - offset, 11*rate))	
            rates.append((start-offset, rate))
            if verb: print n_beat, start-offset, start, rate
            #print rate, start
            #if verb == True: print "Beat %d — split [%.3f|%.3f] — stretch [%.3f|%.3f] seconds" % (beats.index(beat), dur, beat.duration-dur, stretch, beat.duration-stretch)
        else:
            rates.append((start-offset, 0))
            if verb: print "Dropped beat."
        n_beat = n_beat + 1
    print "Done with beats."
    
    # get audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate),:]
    # time stretch
    if verb == True: print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
    # build timestretch AudioData object
    ts = AudioData(ndarray=vecout, shape=vecout.shape, sampleRate=track.sampleRate, numChannels=vecout.shape[1])
    # initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start, track.analysis.duration-beats[-1].start)

    return [pb1, ts, pb2]
示例#26
0
 def stretch(self, t, l):
     """t is a track, l is a list"""
     signal_start = int(l[0][0] * t.sampleRate)
     signal_duration = int((sum(l[-1]) - l[0][0]) * t.sampleRate)
     vecin = t.data[signal_start:signal_start + signal_duration,:]
     
     rates = []
     for i in xrange(len(l)):
         rate = (int(l[i][0] * t.sampleRate) - signal_start, 
                 self.durations[i] / l[i][1])
         rates.append(rate)
     
     vecout = dirac.timeScale(list(vecin), rates, t.sampleRate, 0)
     if hasattr(t, 'gain'):
         vecout = limit(multiply(vecout, float32(t.gain)))
     
     audio_out = AudioData(ndarray=vecout, shape=vecout.shape, 
                             sampleRate=t.sampleRate, 
                             numChannels=vecout.shape[1])
     return audio_out
示例#27
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = []

    for bar in bars:
        bar_ratio = (bars.index(bar) % 4) / 2.0
        beats = bar.children()
        for beat in beats:
            beat_index = beat.local_context()[0]
            ratio = beat_index / 2.0 + 0.5
            ratio = ratio + bar_ratio # dirac can't compress by less than 0.5!
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(beat_audio.data, ratio)
            ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, 
                            sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
            collect.append(ts)

    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
示例#28
0
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.
    bars = audiofile.analysis.bars

    # The output array
    collect = []

    # This loop streches each beat by a varying ratio, and then re-assmbles them.
    for bar in bars:
        # Caculate a stretch ratio that repeats every four bars.
        bar_ratio = (bars.index(bar) % 4) / 2.0
        # Get the beats in the bar
        beats = bar.children()
        for beat in beats:
            # Find out where in the bar the beat is.
            beat_index = beat.local_context()[0]
            # Calculate a stretch ratio based on where in the bar the beat is
            ratio = beat_index / 2.0 + 0.5
            # Note that dirac can't compress by less than 0.5!
            ratio = ratio + bar_ratio
            # Get the raw audio data from the beat and scale it by the ratio
            # dirac only works on raw data, and only takes floating-point ratios
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(beat_audio.data, ratio)
            # Create a new AudioData object from the scaled data
            ts = audio.AudioData(ndarray=scaled_beat,
                                 shape=scaled_beat.shape,
                                 sampleRate=audiofile.sampleRate,
                                 numChannels=scaled_beat.shape[1])
            # Append the new data to the output list!
            collect.append(ts)

    # Assemble and write the output data
    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
示例#29
0
def tempo_shift(input_filename, seg_range, shift_length, second_song, delay):
    t1 = track.track_from_filename(input_filename)
    t2 = track.track_from_filename(second_song)

    start_range, end_range = seg_range

    shift_length = min(shift_length, end_range - start_range)
    shift_magnitude = t2.tempo - t1.tempo

    beat_increment = (1.0*shift_magnitude)/shift_length
    beat_ratio = 1.0
    beat_count = 0

    audiofile = audio.LocalAudioFile(input_filename)
    beats = audiofile.analysis.segments
    collect = []

    for i in range(start_range, end_range):
        if (i > (end_range - shift_length)):
            desired_bpm = beat_increment * (i - (end_range - shift_length)) + t1.tempo
            beat_ratio = t1.tempo/desired_bpm

        beat_audio = beats[i].render()

        if (beat_ratio == 1.0):
            collect.append(beat_audio)
        else:
            scaled_beat = dirac.timeScale(beat_audio.data, beat_ratio)
            ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape,
                    sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
            collect.append(ts)
    if delay:
        print "Waiting 9 seconds"
        time.sleep(9)

    return collect
示例#30
0
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.  
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.  
    bars = audiofile.analysis.bars

    # The output array
    collect = []

    # This loop streches each beat by a varying ratio, and then re-assmbles them.
    for bar in bars:
        # Caculate a stretch ratio that repeats every four bars.
        bar_ratio = (bars.index(bar) % 4) / 2.0
        # Get the beats in the bar
        beats = bar.children()
        for beat in beats:
            # Find out where in the bar the beat is.
            beat_index = beat.local_context()[0]
            # Calculate a stretch ratio based on where in the bar the beat is
            ratio = beat_index / 2.0 + 0.5
            # Note that dirac can't compress by less than 0.5!
            ratio = ratio + bar_ratio 
            # Get the raw audio data from the beat and scale it by the ratio
            # dirac only works on raw data, and only takes floating-point ratios
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(beat_audio.data, ratio)
            # Create a new AudioData object from the scaled data
            ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, 
                            sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
            # Append the new data to the output list!
            collect.append(ts)

    # Assemble and write the output data
    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
示例#31
0
def do_work(track, options):
    
    # This manages the various input options
    verbose = bool(options.verbose)    
    low_tempo = float(options.low)    
    high_tempo = float(options.high)    
    rate_tempo = float(options.rate)    
    rubato = float(options.rubato)    
    tempo = float(options.tempo)    

    # This set the tempo and applies acceleration or not
    if rate_tempo == 0:
        if tempo == 0:
            low_tempo = track.analysis.tempo['value']
            high_tempo = low_tempo
        else:
            low_tempo = tempo
            high_tempo = tempo
    
    rates = []
    count = min(max(0,int(options.offset)),1)
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # For every beat, we get a tempo, and apply a time stretch
    for beat in beats[:-1]:

        # Get a tempo for the beat
        target_tempo = select_tempo(beats.index(beat), len(beats), low_tempo, high_tempo, rate_tempo)

        # Calculate rates for time stretching each beat.
        # 
        if count == 0:
            dur = beat.duration/2.0
            rate1 = 60.0 / (target_tempo * dur)
            stretch = dur * rate1
            rate2 = rate1 + rubato
        elif count == 1:
            rate1 = 60.0 / (target_tempo * beat.duration)

        # Add a change of rate at a given time
        start1 = int(beat.start * track.sampleRate)
        rates.append((start1-offset, rate1))
        if count == 0:
            start2 = int((beat.start+dur) * track.sampleRate)
            rates.append((start2-offset, rate2))

        # This prints what's happening, if verbose mode is on.
        if verbose:
            if count == 0:
                args = (beats.index(beat), count, beat.duration, dur*rate1, dur*rate2, 60.0/(dur*rate1), 60.0/(dur*rate2))
                print "Beat %d (%d) | stretch %.3f sec into [%.3f|%.3f] sec | tempo = [%d|%d] bpm" % args
            elif count == 1:
                args = (beats.index(beat), count, beat.duration, beat.duration*rate1, 60.0/(beat.duration*rate1))
                print "Beat %d (%d) | stretch %.3f sec into %.3f sec | tempo = %d bpm" % args
        
        count = (count + 1) % 2
   
    # This gets the audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate),:]

    # This does the time stretch
    if verbose: 
        print "\nTime stretching..."
    # Dirac is a timestretching tool that comes with remix.
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
    
    # This builds the timestretched AudioData object
    ts = AudioData(ndarray=vecout, shape=vecout.shape, 
                    sampleRate=track.sampleRate, numChannels=vecout.shape[1], 
                    verbose=verbose)
    
    # Create playback objects (just a collection of audio) for the first and last beat
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start, track.analysis.duration-beats[-1].start)
    
    # Return the first beat, the timestreched beats, and the last beat
    return [pb1, ts, pb2]
示例#32
0
文件: waltzify.py 项目: phlegma/remix
def do_work(track, options):

    # manage options
    verbose = bool(options.verbose)
    low_tempo = float(options.low)
    high_tempo = float(options.high)
    rate_tempo = float(options.rate)
    rubato = float(options.rubato)
    tempo = float(options.tempo)

    # acceleration or not
    if rate_tempo == 0:
        if tempo == 0:
            low_tempo = track.analysis.tempo["value"]
            high_tempo = low_tempo
        else:
            low_tempo = tempo
            high_tempo = tempo

    rates = []
    count = min(max(0, int(options.offset)), 1)
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # for every beat
    for beat in beats[:-1]:

        # get a tempo, particularly for accelerando
        target_tempo = select_tempo(beats.index(beat), len(beats), low_tempo, high_tempo, rate_tempo)

        # calculate rates
        if count == 0:
            dur = beat.duration / 2.0
            rate1 = 60.0 / (target_tempo * dur)
            stretch = dur * rate1
            rate2 = rate1 + rubato
        elif count == 1:
            rate1 = 60.0 / (target_tempo * beat.duration)

        # add a change of rate at a given time
        start1 = int(beat.start * track.sampleRate)
        rates.append((start1 - offset, rate1))
        if count == 0:
            start2 = int((beat.start + dur) * track.sampleRate)
            rates.append((start2 - offset, rate2))

        # show on screen
        if verbose:
            if count == 0:
                args = (
                    beats.index(beat),
                    count,
                    beat.duration,
                    dur * rate1,
                    dur * rate2,
                    60.0 / (dur * rate1),
                    60.0 / (dur * rate2),
                )
                print "Beat %d (%d) | stretch %.3f sec into [%.3f|%.3f] sec | tempo = [%d|%d] bpm" % args
            elif count == 1:
                args = (beats.index(beat), count, beat.duration, beat.duration * rate1, 60.0 / (beat.duration * rate1))
                print "Beat %d (%d) | stretch %.3f sec into %.3f sec | tempo = %d bpm" % args

        count = (count + 1) % 2

    # get audio
    vecin = track.data[offset : int(beats[-1].start * track.sampleRate), :]

    # time stretch
    if verbose:
        print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)

    # build timestretch AudioData object
    ts = AudioData(
        ndarray=vecout, shape=vecout.shape, sampleRate=track.sampleRate, numChannels=vecout.shape[1], verbose=verbose
    )

    # initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start, track.analysis.duration - beats[-1].start)

    return [pb1, ts, pb2]
示例#33
0
def shiftPitchOctaves(audio_data, octaves): 
        factor = 2.0 ** octaves
        stretched_data = dirac.timeScale(audio_data, factor)
        index = numpy.floor(numpy.arange(0, stretched_data.shape[0], factor)).astype('int32')
        new_data = stretched_data[index]
        return new_data
示例#34
0
def do_work(track, options):

    # manage options
    verbose = bool(options.verbose)
    low_tempo = float(options.low)
    high_tempo = float(options.high)
    rate_tempo = float(options.rate)
    rubato = float(options.rubato)
    tempo = float(options.tempo)

    # acceleration or not
    if rate_tempo == 0:
        if tempo == 0:
            low_tempo = track.analysis.tempo['value']
            high_tempo = low_tempo
        else:
            low_tempo = tempo
            high_tempo = tempo

    rates = []
    count = min(max(0, int(options.offset)), 1)
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # for every beat
    for beat in beats[:-1]:

        # get a tempo, particularly for accelerando
        target_tempo = select_tempo(beats.index(beat), len(beats), low_tempo,
                                    high_tempo, rate_tempo)

        # calculate rates
        if count == 0:
            dur = beat.duration / 2.0
            rate1 = 60.0 / (target_tempo * dur)
            stretch = dur * rate1
            rate2 = rate1 + rubato
        elif count == 1:
            rate1 = 60.0 / (target_tempo * beat.duration)

        # add a change of rate at a given time
        start1 = int(beat.start * track.sampleRate)
        rates.append((start1 - offset, rate1))
        if count == 0:
            start2 = int((beat.start + dur) * track.sampleRate)
            rates.append((start2 - offset, rate2))

        # show on screen
        if verbose:
            if count == 0:
                args = (beats.index(beat), count, beat.duration, dur * rate1,
                        dur * rate2, 60.0 / (dur * rate1),
                        60.0 / (dur * rate2))
                print "Beat %d (%d) | stretch %.3f sec into [%.3f|%.3f] sec | tempo = [%d|%d] bpm" % args
            elif count == 1:
                args = (beats.index(beat), count, beat.duration,
                        beat.duration * rate1, 60.0 / (beat.duration * rate1))
                print "Beat %d (%d) | stretch %.3f sec into %.3f sec | tempo = %d bpm" % args

        count = (count + 1) % 2

    # get audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate), :]

    # time stretch
    if verbose:
        print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)

    # build timestretch AudioData object
    ts = AudioData(ndarray=vecout,
                   shape=vecout.shape,
                   sampleRate=track.sampleRate,
                   numChannels=vecout.shape[1],
                   verbose=verbose)

    # initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start,
                   track.analysis.duration - beats[-1].start)

    return [pb1, ts, pb2]
示例#35
0
def do_work(track, options):

    # This manages the various input options
    verbose = bool(options.verbose)
    low_tempo = float(options.low)
    high_tempo = float(options.high)
    rate_tempo = float(options.rate)
    rubato = float(options.rubato)
    tempo = float(options.tempo)

    # This set the tempo and applies acceleration or not
    if rate_tempo == 0:
        if tempo == 0:
            low_tempo = track.analysis.tempo['value']
            high_tempo = low_tempo
        else:
            low_tempo = tempo
            high_tempo = tempo

    rates = []
    count = min(max(0, int(options.offset)), 1)
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # For every beat, we get a tempo, and apply a time stretch
    for beat in beats[:-1]:

        # Get a tempo for the beat
        target_tempo = select_tempo(beats.index(beat), len(beats), low_tempo,
                                    high_tempo, rate_tempo)

        # Calculate rates for time stretching each beat.
        #
        if count == 0:
            dur = beat.duration / 2.0
            rate1 = 60.0 / (target_tempo * dur)
            stretch = dur * rate1
            rate2 = rate1 + rubato
        elif count == 1:
            rate1 = 60.0 / (target_tempo * beat.duration)

        # Add a change of rate at a given time
        start1 = int(beat.start * track.sampleRate)
        rates.append((start1 - offset, rate1))
        if count == 0:
            start2 = int((beat.start + dur) * track.sampleRate)
            rates.append((start2 - offset, rate2))

        # This prints what's happening, if verbose mode is on.
        if verbose:
            if count == 0:
                args = (beats.index(beat), count, beat.duration, dur * rate1,
                        dur * rate2, 60.0 / (dur * rate1),
                        60.0 / (dur * rate2))
                print "Beat %d (%d) | stretch %.3f sec into [%.3f|%.3f] sec | tempo = [%d|%d] bpm" % args
            elif count == 1:
                args = (beats.index(beat), count, beat.duration,
                        beat.duration * rate1, 60.0 / (beat.duration * rate1))
                print "Beat %d (%d) | stretch %.3f sec into %.3f sec | tempo = %d bpm" % args

        count = (count + 1) % 2

    # This gets the audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate), :]

    # This does the time stretch
    if verbose:
        print "\nTime stretching..."
    # Dirac is a timestretching tool that comes with remix.
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)

    # This builds the timestretched AudioData object
    ts = AudioData(ndarray=vecout,
                   shape=vecout.shape,
                   sampleRate=track.sampleRate,
                   numChannels=vecout.shape[1],
                   verbose=verbose)

    # Create playback objects (just a collection of audio) for the first and last beat
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start,
                   track.analysis.duration - beats[-1].start)

    # Return the first beat, the timestreched beats, and the last beat
    return [pb1, ts, pb2]
示例#36
0
	def shift_tempo_and_play(self, audio_quantum, ratio):
		ad = audio_quantum.render()
		scaled_beat = dirac.timeScale(ad.data, ratio)
		self.stream.write(scaled_beat.astype(np.int16).tostring())
示例#37
0
	def shift_and_play(self, audio_quantum, ratio, semitones):
		ad = audio_quantum.render()
		new_data = PyPitch.shiftPitchSemiTones(dirac.timeScale(ad.data, ratio), semitones)
		self.stream.write(new_data.astype(np.int16).tostring())
示例#38
0
def mix_tracks(num_beats, file_names, outfile, bpm=None, dbpm=0):
    # file_names is an array of file names
    aud = []
    for file_path in file_names:
        aud.append(audio.LocalAudioFile(file_path))
    num_files = len(aud)
    x = audio.AudioQuantumList()
    
    print >> sys.stderr, "Assembling beats.",

    volume_norm = []
    aud = sorted(aud, key=lambda a: a.analysis.loudness, reverse=True) # Sort from softest to loudest.
    first_track = aud[0].analysis
    default_tempo = first_track.tempo.get('value', 100)

    main_key = first_track.key.get('value')

    for track in aud:
        volume_norm.append(first_track.loudness / track.analysis.loudness) # This isn't right but I don't want to deal with decibels

    # print "Volume norm:", volume_norm

    aligned_beats = []
    for track in aud:
        section_lengths = []
        song = track.analysis
        sections = song.sections
        for section in sections:
            bars = section.children()
            section_lengths.append(len(bars))


        # Find some regularly repeating section length, call this section the chorus. We'll start the track here
        mode = Counter(section_lengths).most_common(1)
        # print "MODE", mode[0][0]
        beats_from_chorus = []
        start = False
        for section in sections:
            if len(section.children()) == mode[0][0]:
                start = True
            if start:
                bars = section.children()
                for bar in bars:
                    for beat in bar.children():
                        beats_from_chorus.append(beat)
        aligned_beats.append(beats_from_chorus)


    for w in range(num_beats):
        print >> sys.stderr, '.',
        curbeats = []
    
        desired_dur = first_track.beats[w%len(first_track.beats)].duration
        for track, beats, norm in zip(aud, aligned_beats, volume_norm):
            song = track.analysis
            b = beats[w%len(beats)]
            b_audio = b.render()


            modifier = modify.Modify(sampleRate=aud[0].sampleRate, numChannels=b_audio.data.shape[1], blockSize=1000000)
            b_audio = modifier.shiftPitchSemiTones(b_audio, semitones=keyshift(main_key, song.key.get('value'))) # Fix pitch

            scaled_beat = dirac.timeScale(b_audio.data, desired_dur * 1.0 / b_audio.duration) # Fix duration for smoothing
            if bpm:
                # print bpm, dbpm, w, num_beats, default_tempo
                scaled_beat = dirac.timeScale(scaled_beat, default_tempo / (bpm + dbpm * w * 1.0 / num_beats)) # Speed changes
            scaled_beat *= norm # Normalize volume

            ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, sampleRate=aud[0].sampleRate, numChannels=scaled_beat.shape[1])
            curbeats.append(ts)
        
        x.append(audio.Simultaneous(curbeats))
    
    print >> sys.stderr, "\nStarting rendering pass..."
    
    then = time.time()
    # call render_sequentially() with no arguments, and then it calls itself with
    #  contextual arguments for each source, for each AudioQuantum. It's a lot of
    #  tree-walking, but each source file gets loaded once (and takes itself from)
    #  memory when its rendering pass finishes.
    x.render().encode(outfile)
    
    print >> sys.stderr, "%f sec for rendering" % (time.time() - then,)
示例#39
0
from pyechonest import config
config.ECHO_NEST_API_KEY = "O63DZS2LNNEWUO9GC"

"""
This file is useless, just used to try out API calls
"""
#reload(audio)
audio_file = audio.LocalAudioFile("mp3/Calibria.mp3")

beats = audio_file.analysis.beats[128:159]


collect = []
for beat in beats:
	beat_audio = beat.render()
	scaled_beat = dirac.timeScale(beat_audio.data, 1.2)
	ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, 
                sampleRate=audio_file.sampleRate, numChannels=scaled_beat.shape[1])
	collect.append(ts)
print collect
out = audio.assemble(collect, numChannels=2)


# audio_file2 = audio.LocalAudioFile("mp3/Bastille.mp3")
# beats2 = audio_file2.analysis.beats[128:159]


# data1 = audio.getpieces(audio_file, beats)
# # print type(data1)
# # print isinstance(data1, audio.AudioData)
# #out = modify.Modify().shiftTempo(data1, 1)