def main(units, inputFile, outputFile): # This takes your input track, sends it to the analyzer, and returns the results. audiofile = audio.LocalAudioFile(inputFile) # This gets the overall key of the track tonic = audiofile.analysis.key['value'] # This gets a list of all of the selected unit in the track. chunks = audiofile.analysis.__getattribute__(units) # This is a serious line! # It means: "segments that have the tonic as the max pitch and that overlap the start of the <units>" # Note the syntax: ".that(do_something)". These work just the way you think they should # (That is, they act like list comprehensions for the given statement!) # Also, note that have_pitch_max and overlap are imported from selection.py segs = audiofile.analysis.segments.that(have_pitch_max(tonic)).that(overlap_starts_of(chunks)) # Using the same synatx as the above line: # this line gets all rhythmic units that begin with the segment we found above outchunks = chunks.that(overlap_ends_of(segs)) # This assembles the pieces of audio defined in collect from the analyzed audio file. out = audio.getpieces(audiofile, outchunks) # This writes the newly created audio to the given file. out.encode(outputFile)
def main(units, inputFile, outputFile): audiofile = audio.LocalAudioFile(inputFile) tonic = audiofile.analysis.key['value'] chunks = audiofile.analysis.__getattribute__(units) # "segments that have the tonic as the max pitch and that overlap the start of the <units>" # (have_pitch_max() is imported from selection.py) segs = audiofile.analysis.segments.that(have_pitch_max(tonic)).that(overlap_starts_of(chunks)) # "<units> that begin with the above-found segments" outchunks = chunks.that(overlap_ends_of(segs)) out = audio.getpieces(audiofile, outchunks) out.encode(outputFile)
def main(fileOne, fileTwo, outBeat, outMaster): audioOne = audio.LocalAudioFile(fileOne) master = audio.AudioQuantum(start=0.0, duration=20.0, source=audioOne) master.encode(outMaster) audioTwo = audio.LocalAudioFile(fileTwo) sample_rate = audioOne.sampleRate num_channels = audioOne.numChannels out_shape = (len(audioOne)+100000,num_channels) tonic = audioOne.analysis.key['value'] chunks = audioTwo.analysis.__getattribute__("beats") segs = audioTwo.analysis.segments.that(have_pitch_max(tonic)).that(overlap_starts_of(chunks)) outchunks = chunks.that(overlap_ends_of(segs)) out = outchunks.render() out.encode(outBeat)
for i in range(numfiles-1): try: audioOne = auds[i] #audioOne = audio.LocalAudioFile("mp3/"+files[i]) master = audio.AudioQuantum(start=15.0, duration=45.0, source=audioOne) print "Building master mp3 "+str(i) master.encode("f/"+str(i)+".mp3") audioTwo = auds[i+1] #audioTwo = audio.LocalAudioFile("mp3/"+files[i+1]) sample_rate = audioOne.sampleRate num_channels = audioOne.numChannels out_shape = (len(audioOne)+100000,num_channels) tonic = audioOne.analysis.key['value'] chunks = audioTwo.analysis.__getattribute__("beats") segs = audioTwo.analysis.segments.that(have_pitch_max(tonic)).that(overlap_starts_of(chunks)) outchunks = chunks.that(overlap_ends_of(segs)) out = outchunks.render() print "Building beat mp3 "+str(i) out.encode("f/"+str(i)+".beat.mp3") os.system("python drums.py f/"+str(i)+".mp3 f/"+str(i)+".beat.mp3 f/"+str(i)+".mix.mp3 64 4 0.2") except: print "Failed to build "+str(i)+": "+sys.exc_info()[0]