Пример #1
0
def zipSongs(songA, songB, outfile_name, battle_length = 8, random_order = False):
	"""
	alternates between a downbeat from songA and a downbeat from songB for battle_length number of bars from each, then play the remainder from songA.
	
	if random_order is True then songA will still always win but the playorder will be reversed approximately half the time.

	"""
	if random_order == True and random.sample([True, False], 1)[0]:
		firstSong = songB
		secondSong = songA
		flipped = True
	else:
		firstSong = songA
		secondSong = songB
		flipped = False
	onesA = firstSong.analysis.beats.that(fall_on_the(1))
	onesB = secondSong.analysis.beats.that(fall_on_the(1))
		
	lacedUp = audio.AudioData(shape=
		(int(1.2*len(firstSong.data)),2), numChannels=firstSong.numChannels, sampleRate=firstSong.sampleRate)
	overlap_time = 0
	for bar_num in xrange(battle_length):
		lacedUp.append(audio.getpieces(firstSong, [onesA[bar_num]]))
		lacedUp.append(audio.getpieces(secondSong, [onesB[bar_num]]))
		overlap_time += onesA[bar_num].duration + onesB[bar_num].duration
	if flipped:
		lacedUp.append(audio.getpieces(firstSong, [onesA[battle_length]]))
		overlap_time += onesA[battle_length].duration
	lacedUp.append(audio.getpieces(songA,songA.analysis.bars[battle_length:]))
	lacedUp.encode(outfile_name)
	return overlap_time
Пример #2
0
def get_loops(fileobj, output_name="out.mp3", bars_count=8, bars_start=1):
    print "analyzing"
    audio_file = audio.LocalAudioFile(fileobj.name)
    print "done"
    
    print "%d bars" % len(audio_file.analysis.bars)

    collect = audio.AudioQuantumList()
    
    bars = audio_file.analysis.bars
    repeats = 1
    if len(bars)-bars_start < bars_count:
        bars_count = 4
    if len(bars)-bars_start < bars_count:
        bars_count = 1

    print "actual bar count was %d" % (bars_count)
    for y in xrange(repeats):
        for x in xrange(bars_count):
            collect.append(audio_file.analysis.bars[bars_start+x])
    
    out = audio.getpieces(audio_file, collect)
    output_temp = tempfile.NamedTemporaryFile(mode="w+b", suffix=".mp3")
    out.encode(output_temp.name)
    
    # Do it again
    new_one = audio.LocalAudioFile(output_temp.name)
    analysis = json.loads(urllib.urlopen(new_one.analysis.pyechonest_track.analysis_url).read())
    
    
    return (output_temp, analysis)
Пример #3
0
def main(infile, outfile, choices=4, bars=40):
    audiofile = audio.LocalAudioFile(infile)
    meter = audiofile.analysis.time_signature['value']
    fade_in = audiofile.analysis.end_of_fade_in
    fade_out = audiofile.analysis.start_of_fade_out
    segments = audiofile.analysis.segments.that(are_contained_by_range(fade_in, fade_out))
    beats = audiofile.analysis.beats.that(are_contained_by_range(segments[0].start, segments[-1].end))
    
    outchunks = audio.AudioQuantumList()
    
    b = []
    segstarts = []
    for m in range(meter):
        b.append(beats.that(are_beat_number(m)))
        segstarts.append(segments.that(overlap_starts_of(b[m])))
        
    now = b[0][0]
    
    for x in range(0, bars * meter):
        beat = x % meter
        next_beat = (x + 1) % meter
        now_end_segment = segments.that(contain_point(now.end))[0]
        next_candidates = segstarts[next_beat].ordered_by(timbre_distance_from(now_end_segment))
        next_choice = next_candidates[random.randrange(min(choices, len(next_candidates)))]
        next = b[next_beat].that(start_during(next_choice))[0]
        outchunks.append(now)
        print now.context_string()
        now = next
    
    out = audio.getpieces(audiofile, outchunks)
    out.encode(outfile)
Пример #4
0
def main(units, inputFile, outputFile):
    # This takes your input track, sends it to the analyzer, and returns the results.  
    audiofile = audio.LocalAudioFile(inputFile)

    # This gets the overall key of the track
    tonic = audiofile.analysis.key['value']
    
    # This gets a list of all of the selected unit in the track.  
    chunks = audiofile.analysis.__getattribute__(units)
    
    # This is a serious line!  
    # It means:  "segments that have the tonic as the max pitch and that overlap the start of the <units>"
    # Note the syntax:  ".that(do_something)". These work just the way you think they should
    # (That is, they act like list comprehensions for the given statement!)
    # Also, note that have_pitch_max and overlap are imported from selection.py
    segs = audiofile.analysis.segments.that(have_pitch_max(tonic)).that(overlap_starts_of(chunks))
    
    # Using the same synatx as the above line:
    # this line gets all rhythmic units that begin with the segment we found above
    outchunks = chunks.that(overlap_ends_of(segs))
    
    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, outchunks)
    
     # This writes the newly created audio to the given file.  
    out.encode(outputFile)
Пример #5
0
 def saveChunk(self, chunk, chunkId):
     audioChunk = audio.getpieces(self.audioFile, [chunk])
     dir = self.filename + "_chunks"
     if not os.path.exists(dir):
         os.makedirs(dir)
     filename = dir + "/chunk_" + chunkId + ".wav"
     audioChunk.encode(filename)
Пример #6
0
def get_loops(fileobj, output_name="out.mp3", bars_count=8, bars_start=1):
    print "analyzing"
    audio_file = audio.LocalAudioFile(fileobj.name)
    print "done"
    
    print "%d bars" % len(audio_file.analysis.bars)

    collect = audio.AudioQuantumList()
    
    bars = audio_file.analysis.bars
    repeats = 1
    if len(bars)-bars_start < bars_count:
        bars_count = 4
    if len(bars)-bars_start < bars_count:
        bars_count = 1

    print "actual bar count was %d" % (bars_count)
    for y in xrange(repeats):
        for x in xrange(bars_count):
            collect.append(audio_file.analysis.bars[bars_start+x])
    
    out = audio.getpieces(audio_file, collect)
    output_temp = tempfile.NamedTemporaryFile(mode="w+b", suffix=".mp3")
    out.encode(output_temp.name)
    
    # Do it again
    new_one = audio.LocalAudioFile(output_temp.name)
    analysis = json.loads(urllib.urlopen(new_one.analysis.pyechonest_track.analysis_url).read())
    
    
    return (output_temp, analysis)
Пример #7
0
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    tonic = audiofile.analysis.key['value']
    
    chunks = audiofile.analysis.__getattribute__(units)
    
    # Get the segments    
    all_segments = audiofile.analysis.segments
    
    # Find tonic segments
    tonic_segments = audio.AudioQuantumList(kind="segment")
    for segment in all_segments:
        pitches = segment.pitches
        if pitches.index(max(pitches)) == tonic:
            tonic_segments.append(segment)

    # Find each chunk that matches each segment
    out_chunks = audio.AudioQuantumList(kind=units) 
    for chunk in chunks:
        for segment in tonic_segments:
            if chunk.start >= segment.start and segment.end >= chunk.start:
                out_chunks.append(chunk)
                break
    
    out = audio.getpieces(audiofile, out_chunks)
    out.encode(outputFile)
Пример #8
0
def main() :
    config.ECHO_NEST_API_KEY= "GG0IO4JU1FZQJ0IH1"
    clips = file(sys.argv[1], "r")
    dataList = []
    rate = 12000
    channels = 2
    for line in clips:
        detailArray = line.split('|')
        songFile = detailArray[0]
        startTime = float(detailArray[1])
        duration = float(detailArray[2])
        endTime = startTime + duration
        n = numpy.array([])
        e = echoAudio.AudioData(songFile, n, None, rate, channels)
        q = echoAudio.AudioQuantumList(None, "segment", None, e)
        a = echoAudio.AudioAnalysis(songFile)
        segments = a.segments
        finalSeg = segments[len(segments) - 1]
        songLength = finalSeg.start + finalSeg.duration
        complete = False
        while(not complete):
            for s in segments:
                    if((s.start >= startTime) & (s.start <= endTime)):
                        q.append(s)
            if songLength >= endTime:
                complete = True
            else:
                complete = False
                endTime = endTime - songLength                            
        p = echoAudio.getpieces(e, q)
        dataList.append(p)
    clips.close()
    echoAudio.assemble(dataList, channels, rate).encode(sys.argv[2])
Пример #9
0
def inorder():
  audio_file = audio.LocalAudioFile("../zambi.mp3")
  parts = audio_file.analysis.segments

  t = Tree()
  for p in parts:
    # start, duration, timbre, loudness_begin, loudness_max, time_loudness_max or loudness_end
    t.add(p.loudness_max, p)

  ordered = []
  def visit(k, v):
    ordered.append(v)

  t.traverse('postorder', visit)

  # And render the list as a new audio file!
  audio.getpieces(audio_file, ordered).encode("../inorder.mp3")
Пример #10
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = audio.AudioQuantumList()
    for bar in bars:
        collect.append(bar.children()[0])
    out = audio.getpieces(audiofile, collect)
    out.encode(output_filename)
Пример #11
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = audio.AudioQuantumList()
    for bar in bars:
        collect.append(bar)
        collect.append(bar)
    out = audio.getpieces(audiofile, collect)
    out.encode(output_filename)
Пример #12
0
def split_file_into_bars(track_name, bar_list):
    i = 0
    for bars in bar_list:
        four_bar_chunk = audio.AudioQuantumList()
        for bar in bars:
            four_bar_chunk.append(bar)
        
        audiofile = audio.LocalAudioFile("music/tracks/"+track_name+".mp3")
        out = audio.getpieces(audiofile, four_bar_chunk)
        i = i + 1
        out.encode("music/output/"+track_name+"-chunk-"+str(i))
Пример #13
0
def main(input_filename, output_filename, break_filename, break_parts, measures, mix):
    audiofile = audio.LocalAudioFile(input_filename)
    sample_rate = audiofile.sampleRate
    breakfile = audio.LocalAudioFile(break_filename)
    if breakfile.numChannels == 1:
        breakfile = mono_to_stereo(breakfile)
    num_channels = audiofile.numChannels
    drum_data = split_break(breakfile, break_parts)
    hits_per_beat = int(break_parts / (4 * measures))
    bars = audiofile.analysis.bars
    out_shape = (len(audiofile) + 100000, num_channels)
    out = audio.AudioData(shape=out_shape, sampleRate=sample_rate, numChannels=num_channels)
    if not bars:
        print "Didn't find any bars in this analysis!"
        print "No output."
        sys.exit(-1)
    for bar in bars[:-1]:
        beats = bar.children()
        for i in range(len(beats)):
            try:
                break_index = ((bar.local_context()[0] % measures) * 4) + (i % 4)
            except ValueError:
                break_index = i % 4
            tats = range((break_index) * hits_per_beat, (break_index + 1) * hits_per_beat)
            drum_samps = sum([len(drum_data[x]) for x in tats])
            beat_samps = len(audiofile[beats[i]])
            beat_shape = (beat_samps, num_channels)
            tat_shape = (float(beat_samps / hits_per_beat), num_channels)
            beat_data = audio.AudioData(shape=beat_shape, sampleRate=sample_rate, numChannels=num_channels)
            for j in tats:
                tat_data = audio.AudioData(shape=tat_shape, sampleRate=sample_rate, numChannels=num_channels)
                if drum_samps > beat_samps / hits_per_beat:
                    # truncate drum hits to fit beat length
                    tat_data.data = drum_data[j].data[: len(tat_data)]
                elif drum_samps < beat_samps / hits_per_beat:
                    # space out drum hits to fit beat length
                    # temp_data = add_fade_out(drum_data[j])
                    tat_data.append(drum_data[j])
                tat_data.endindex = len(tat_data)
                beat_data.append(tat_data)
                del (tat_data)
            # account for rounding errors
            beat_data.endindex = len(beat_data)
            mixed_beat = audio.mix(beat_data, audiofile[beats[i]], mix=mix)
            del (beat_data)
            out.append(mixed_beat)
    finale = bars[-1].start + bars[-1].duration
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start, audiofile.analysis.duration - audiofile.analysis.bars[-1].start
    )
    last_data = audio.getpieces(audiofile, [last])
    out.append(last_data)
    out.encode(output_filename)
Пример #14
0
def main(toReverse, inputFilename, outputFilename):
    audioFile = audio.LocalAudioFile(inputFilename)
    if toReverse == 'beats' :
        chunks = audioFile.analysis.beats
    elif toReverse == 'segments' :
        chunks = audioFile.analysis.segments
    else :
        print usage
        return
    chunks.reverse()
    reversedAudio = audio.getpieces(audioFile, chunks)
    reversedAudio.encode(outputFilename)
Пример #15
0
def main(input_filename, output_filename, index):
    audio_file = audio.LocalAudioFile(input_filename)
    beats = audio_file.analysis.beats
    collect = audio.AudioQuantumList()
    for beat in beats:
        tata = beat.children()
        if len(tata)>1:
            tat = tata[index]
        else:
            tat = tata[0]
        collect.append(tat)
    out = audio.getpieces(audio_file, collect)
    out.encode(output_filename)
Пример #16
0
def main(username):
  auds=[]
  p = re.compile("mix")
  files = os.listdir("f")
  for file in files:
    m = p.findall(file)
    if m:
      print "processing f/"+file
      try:
        auds.append(audio.LocalAudioFile("f/"+file))
      except:
        print "failed to include "+file
  
  auds.sort(key=keysig)
  
  mixed = []
  end = None
  previous = None
  for aud in auds:
    bars = aud.analysis.bars
    try:
      if end != None and previous != None:
        mix = audio.mix(audio.getpieces(previous, [end]), audio.getpieces(aud, [bars[0]]), 0.5)
        mixed.append(mix)
      else:
        mixed.append(audio.getpieces(aud, [bars[0]]))
    except:
      print "failed to create mix bar"

    try:
      mixed.append(audio.getpieces(aud, bars[1:-5]))
      end = bars[-5]
      previous = aud
    except:
      print "unable to append bars"

  out = audio.assemble(mixed, numChannels=2)
  out.encode(username+".mp3")
Пример #17
0
def main(input_filename1, input_filename2, output_filename):
    audiofile1 = audio.LocalAudioFile(input_filename1)
    audiofile2 = audio.LocalAudioFile(input_filename2)

    beats1 = audiofile1.analysis.beats
    beats2 = audiofile2.analysis.beats

    l = min([len(beats1), len(beats2)])

    collect = audio.AudioQuantumList()
    out = None
    for i in xrange(l):
        if i % 2 == 1:
            beat = beats1[i - offset]
            next = audio.getpieces(audiofile1, [beat])
        else:
            beat = beats2[i]
            next = audio.getpieces(audiofile2, [beat])
        if out == None:
            out = next
        else:
            out.append(next)

    out.encode(output_filename)
Пример #18
0
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    tonic = audiofile.analysis.key['value']
    
    chunks = audiofile.analysis.__getattribute__(units)
    
    # "segments that have the tonic as the max pitch and that overlap the start of the <units>"
    # (have_pitch_max() is imported from selection.py)
    segs = audiofile.analysis.segments.that(have_pitch_max(tonic)).that(overlap_starts_of(chunks))
    
    # "<units> that begin with the above-found segments"
    outchunks = chunks.that(overlap_ends_of(segs))
    
    out = audio.getpieces(audiofile, outchunks)
    out.encode(outputFile)
Пример #19
0
def main(infile, outfile, choices=4):
    audiofile = audio.LocalAudioFile(infile)
    meter = audiofile.analysis.time_signature['value']
    fade_in = audiofile.analysis.end_of_fade_in
    fade_out = audiofile.analysis.start_of_fade_out
    sections = audiofile.analysis.sections.that(overlap_range(fade_in, fade_out))
    outchunks = audio.AudioQuantumList()

    for section in sections:
        print str(section) + ":"
        beats = audiofile.analysis.beats.that(are_contained_by(section))
        segments = audiofile.analysis.segments.that(overlap(section))
        num_bars = len(section.children())
        
        print "\t", len(beats), "beats,", len(segments), "segments"
        if len(beats) < meter:
            continue
        
        b = []
        segstarts = []
        for m in range(meter):
            b.append(beats.that(are_beat_number(m)))
            segstarts.append(segments.that(overlap_starts_of(b[m])))
        
        if not b:
            continue
        elif not b[0]:
            continue
        
        now = b[0][0]
        
        for x in range(0, num_bars * meter):
            beat = x % meter
            next_beat = (x + 1) % meter
            now_end_segment = segments.that(contain_point(now.end))[0]
            next_candidates = segstarts[next_beat].ordered_by(timbre_distance_from(now_end_segment))
            if not next_candidates:
                continue
            next_choice = next_candidates[random.randrange(min(choices, len(next_candidates)))]
            next = b[next_beat].that(start_during(next_choice))[0]
            outchunks.append(now)
            print "\t" + now.context_string()
            now = next
    
    out = audio.getpieces(audiofile, outchunks)
    out.encode(outfile)
Пример #20
0
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.
    # You can manipulate this just like any other Python list!
    bars = audiofile.analysis.bars

    # This makes a new list of "AudioQuantums".
    # Those are just any discrete chunk of audio:  bars, beats, etc.
    collect = audio.AudioQuantumList()

    # This loop puts the first item in the children of each bar into the new list.
    # A bar's children are beats!  Simple as that.
    for bar in bars:
        collect.append(bar.children()[0])

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)

    # This writes the newly created audio to the given file.
    out.encode(output_filename)
Пример #21
0
Файл: one.py Проект: tthew/remix
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.  
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.  
    # You can manipulate this just like any other Python list!
    bars = audiofile.analysis.bars

    # This makes a new list of "AudioQuantums".  
    # Those are just any discrete chunk of audio:  bars, beats, etc
    collect = audio.AudioQuantumList()

    # This loop puts the first item in the children of each bar into the new list. 
    # A bar's children are beats!  Simple as that. 
    for bar in bars:
        collect.append(bar.children()[0])

    # This assembles the pieces of audio defined in collect out of the analyzed audio file.
    out = audio.getpieces(audiofile, collect)
    
    # This writes the newly created to the given file.  
    out.encode(output_filename)
Пример #22
0
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    collect = audio.AudioQuantumList()
    if not audiofile.analysis.bars:
        print "No bars found in this analysis!"
        print "No output."
        sys.exit(-1)
    for b in audiofile.analysis.bars[0:-1]:
        # all but the last beat
        collect.extend(b.children()[0:-1])
        if units.startswith("tatum"):
            # all but the last half (round down) of the last beat
            half = -(len(b.children()[-1].children()) // 2)
            collect.extend(b.children()[-1].children()[0:half])
    # endings were rough, so leave everything after the start
    # of the final bar intact:
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start, audiofile.analysis.duration - audiofile.analysis.bars[-1].start
    )
    collect.append(last)
    out = audio.getpieces(audiofile, collect)
    out.encode(outputFile)
Пример #23
0
def main(toReverse, inputFilename, outputFilename):
    # This takes your input track, sends it to the analyzer, and returns the results.  
    audioFile = audio.LocalAudioFile(inputFilename)

    # Checks what sort of reversing we're doing.
    if toReverse == 'beats' :
        # This gets a list of every beat in the track.  
        chunks = audioFile.analysis.beats
    elif toReverse == 'segments' :
        # This gets a list of every segment in the track.  
        # Segments are the smallest chunk of audio that Remix deals with
        chunks = audioFile.analysis.segments
    else :
        print usage
        return

    # Reverse the list!
    chunks.reverse()

    # This assembles the pieces of audio defined in chunks from the analyzed audio file.
    reversedAudio = audio.getpieces(audioFile, chunks)
    # This writes the newly created audio to the given file.  
    reversedAudio.encode(outputFilename)
Пример #24
0
def main(toReverse, inputFilename, outputFilename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audioFile = audio.LocalAudioFile(inputFilename)

    # Checks what sort of reversing we're doing.
    if toReverse == 'beats':
        # This gets a list of every beat in the track.
        chunks = audioFile.analysis.beats
    elif toReverse == 'segments':
        # This gets a list of every segment in the track.
        # Segments are the smallest chunk of audio that Remix deals with
        chunks = audioFile.analysis.segments
    else:
        print usage
        return

    # Reverse the list!
    chunks.reverse()

    # This assembles the pieces of audio defined in chunks from the analyzed audio file.
    reversedAudio = audio.getpieces(audioFile, chunks)
    # This writes the newly created audio to the given file.
    reversedAudio.encode(outputFilename)
Пример #25
0
def main(input_filename, output_filename):
    choices = 0
    song = audio.LocalAudioFile(input_filename)
    meter = song.analysis.time_signature.values()[1]
    meter_conf = song.analysis.time_signature.values()[0]
    tempo_conf = song.analysis.tempo.values()[0]
    sections = song.analysis.sections
    last_segment = song.analysis.segments[len(song.analysis.segments) - 1]
    sl = len(sections)
    print "meter confidence"
    print meter_conf
    print "meter"
    print song.analysis.time_signature
    print "number of sections"
    print sl
    outchunks = audio.AudioQuantumList()
    if (meter_conf > 0.2):
        outchunks = strong_meter(choices, song, meter, sections, sl, outchunks)
    else:
        outchunks = weak_meter(choices, song, sections, sl, outchunks)
    outchunks.append(last_segment)
    out = audio.getpieces(song, outchunks)
    out.encode(output_filename)
Пример #26
0
def main(input_filename, output_filename):
    choices = 0
    song = audio.LocalAudioFile(input_filename)
    meter = song.analysis.time_signature.values()[1]
    meter_conf = song.analysis.time_signature.values()[0]
    tempo_conf = song.analysis.tempo.values()[0]
    sections = song.analysis.sections
    last_segment = song.analysis.segments[len(song.analysis.segments) - 1]
    sl = len(sections)
    print "meter confidence"
    print meter_conf
    print "meter"
    print song.analysis.time_signature
    print "number of sections"
    print sl
    outchunks = audio.AudioQuantumList()
    if (meter_conf > 0.2):
        outchunks = strong_meter(choices, song, meter, sections, sl, outchunks)
    else:
        outchunks = weak_meter(choices, song, sections, sl, outchunks)
    outchunks.append(last_segment)
    out = audio.getpieces(song, outchunks)
    out.encode(output_filename)
Пример #27
0
def main(units, inputFile, outputFile):
    # This takes your input track, sends it to the analyzer, and returns the results. 
    audiofile = audio.LocalAudioFile(inputFile)

    # This makes a new list of "AudioQuantums".  
    # Those are just any discrete chunk of audio:  bars, beats, etc
    collect = audio.AudioQuantumList()

    # If the analysis can't find any bars, stop!
    # (This might happen with really ambient music)
    if not audiofile.analysis.bars:
        print "No bars found in this analysis!"
        print "No output."
        sys.exit(-1)

    # This loop puts all but the last of each bar into the new list! 
    for b in audiofile.analysis.bars[0:-1]:                
        collect.extend(b.children()[0:-1])

        # If we're using tatums instead of beats, we want all but the last half (round down) of the last beat
        # A tatum is the smallest rhythmic subdivision of a beat -- http://en.wikipedia.org/wiki/Tatum_grid     
        if units.startswith("tatum"):
            half = - (len(b.children()[-1].children()) // 2)
            collect.extend(b.children()[-1].children()[0:half])

    # Endings were rough, so leave everything after the start of the final bar intact:
    last = audio.AudioQuantum(audiofile.analysis.bars[-1].start,
                              audiofile.analysis.duration - 
                                audiofile.analysis.bars[-1].start)
    collect.append(last)

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)

    # This writes the newly created audio to the given file.  
    out.encode(outputFile)
Пример #28
0
def main(units, inputFile, outputFile):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(inputFile)

    # This makes a new list of "AudioQuantums".
    # Those are just any discrete chunk of audio:  bars, beats, etc
    collect = audio.AudioQuantumList()

    # If the analysis can't find any bars, stop!
    # (This might happen with really ambient music)
    if not audiofile.analysis.bars:
        print "No bars found in this analysis!"
        print "No output."
        sys.exit(-1)

    # This loop puts all but the last of each bar into the new list!
    for b in audiofile.analysis.bars[0:-1]:
        collect.extend(b.children()[0:-1])

        # If we're using tatums instead of beats, we want all but the last half (round down) of the last beat
        # A tatum is the smallest rhythmic subdivision of a beat -- http://en.wikipedia.org/wiki/Tatum_grid
        if units.startswith("tatum"):
            half = -(len(b.children()[-1].children()) // 2)
            collect.extend(b.children()[-1].children()[0:half])

    # Endings were rough, so leave everything after the start of the final bar intact:
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    collect.append(last)

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)

    # This writes the newly created audio to the given file.
    out.encode(outputFile)
Пример #29
0
def mashComponents(localAudioFiles, loudnessMarkers):
    instSegments = localAudioFiles[0].analysis.segments# This is the base track
    vocalSegments = localAudioFiles[1].analysis.segments# This is the overlay track
    instBeats = localAudioFiles[0].analysis.beats[loudnessMarkers[0][0]:
                                                  loudnessMarkers[0][1]]
    vocalBeats = localAudioFiles[1].analysis.beats[loudnessMarkers[1][0]:
                                                   loudnessMarkers[1][1]]
    pitches = meanPitches(instSegments,instBeats)
    timbre = meanTimbre(instSegments,instBeats)
    sections = localAudioFiles[1].analysis.sections #This is the new lead vocal layer
    sections = sections.that(selection.are_contained_by_range(
            vocalBeats[0].start, vocalBeats[-1].start+vocalBeats[-1].duration))
    if(len(sections)==0):sections = localAudioFiles[1].analysis.sections[2:-2]
    pyplot.figure(0,(16,9))
    image = numpy.array(pitches)
    image = numpy.concatenate((image,numpy.array(timbre)),axis = 1)
    image = numpy.concatenate((image,numpy.array(meanLoudness(instSegments,instBeats))),
                              axis = 1)
    """ Now image contains chromatic, timbral, and loudness information"""
    sectBeats = getSectBeats(sections[0]) # get beats that comprise a specific section
    template = numpy.array(meanPitches(vocalSegments,sectBeats))
    template = numpy.concatenate((template,numpy.array(
                                meanTimbre(vocalSegments,sectBeats))),axis=1)
    template = numpy.concatenate((template,numpy.array(
                                meanLoudness(vocalSegments,sectBeats))),axis = 1)
    im = feature.match_template(image,template,pad_input=True)
    maxValues = [] #tuples of x coord, y coord, correlation, and section len(in secs)
    ij = numpy.unravel_index(numpy.argmax(im), im.shape)
    x, y = ij[::-1]
    maxValues.append((numpy.argmax(im),x,y,sections[0].duration))
    for i in range(len(sections)-1):
        sectBeats = getSectBeats(sections[i+1])
        template = numpy.array(meanPitches(vocalSegments,sectBeats))
        template = numpy.concatenate((template,numpy.array(
                                meanTimbre(vocalSegments,sectBeats))), axis=1)
        template = numpy.concatenate((template,numpy.array(
                                meanLoudness(vocalSegments,sectBeats))),axis = 1)
        match = feature.match_template(image,template,pad_input=True)
        ij = numpy.unravel_index(numpy.argmax(match), match.shape)
        x, y = ij[::-1]
        maxValues.append((numpy.argmax(match),
                          TEMPLATE_WIDTH*i+x,y,sections[i+1].duration))
        im = numpy.concatenate((im,match),axis = 1)
    maxValues.sort()
    maxValues.reverse()
    try:
        count = 0
        while(maxValues[count][3] < 10.0): # choose a section longer than 10 secs
            count += 1
        x = maxValues[count][1]
        y = maxValues[count][2]
    except:        
        print "exception in mashComponents..."
        ij = numpy.unravel_index(numpy.argmax(im), im.shape)
        x, y = ij[::-1]
    pyplot.imshow(im, cmap = pyplot.get_cmap('gray'), aspect = 'auto')
    pyplot.plot(x,y,'o',markeredgecolor='r',markerfacecolor='none',markersize=15)
    pyplot.show()
    sectionBeats = getSectBeats(sections[x/TEMPLATE_WIDTH])
    print "len(sectionBeats): ", len(sectionBeats)
    print "len(instBeats): ", len(instBeats)
    print "y: ", y
    y = instBeats[y].absolute_context()[0]
    instBeats = localAudioFiles[0].analysis.beats 
    matchingBeats = instBeats[(y-len(sectionBeats)/2):(y+len(sectionBeats)/2)]
    print"len(matchingBeats): ", len(matchingBeats)
    matchingBeats = matchingBeats[-len(sectionBeats):]
    print"len(matchingBeats): ", len(matchingBeats)
    """ Check to make sure lengths of beat lists are equal... """
    if len(matchingBeats) != len(sectionBeats):
        print "len(matchingBeats) != len(sectionBeats). For now, I will just truncate..."
        print "len(matchingBeats): ", len(matchingBeats)
        print "len(sectionBeats): ", len(sectionBeats)
        if len(matchingBeats) > len(sectionBeats):matchingBeats = matchingBeats[
                                                            :len(sectionBeats)]
        else: sectionBeats = sectionBeats[:len(matchingBeats)]
    """ I have to make sure sectionBeats and matchingBeats are similarly aligned
        within their group, aka bar of four beats. I will add a beat to the beginning
        of matchingBeats until that condition is met. I re-initialize instBeats and
        vocalBeats, because now I want to include the areas outside of those marked
        off by AutomaticDJ for fade ins and fade outs."""
    vocalBeats = localAudioFiles[1].analysis.beats
    while(matchingBeats[0].local_context()[0] != sectionBeats[0].local_context()[0]):
        matchingBeats.insert(0,instBeats[matchingBeats[0].absolute_context()[0]-1])
        sectionBeats.append(vocalBeats[sectionBeats[-1].absolute_context()[0]+1])
    """ Check to make sure lengths of beat lists are equal... """
    if len(matchingBeats) != len(sectionBeats):
        print "len(matchingBeats) != len(sectionBeats) at the second checkpoint."
        print "This should not be the case. The while loop must not be adding beats"
        print "to both lists equally."
        print "len(matchingBeats): ", len(matchingBeats)
        print "len(sectionBeats): ", len(sectionBeats)
        sys.exit()
    """ Next, I will use the beats around the designated beats above to transition into
    and out of the mashup. """
    XLEN = 4 # number of beats in crossmatch
    if(matchingBeats[0].absolute_context()[0] < XLEN or
       len(instBeats) - matchingBeats[-1].absolute_context()[0] - 1 < XLEN or
       sectionBeats[0].absolute_context()[0] < XLEN or
       len(vocalBeats) - sectionBeats[-1].absolute_context()[0] - 1 < XLEN):
        XLEN -= 1
    BUFFERLEN = 12 # number of beats before and after crossmatches
    while(matchingBeats[0].absolute_context()[0] < BUFFERLEN+XLEN or
       len(instBeats) - matchingBeats[-1].absolute_context()[0] - 1 < BUFFERLEN+XLEN or
       sectionBeats[0].absolute_context()[0] < BUFFERLEN+XLEN or
       len(vocalBeats) - sectionBeats[-1].absolute_context()[0] - 1 < BUFFERLEN+XLEN):
        BUFFERLEN -= 1
    try:
        """ These are the 4 beats before matchingBeats. These are the four beats of the
        instrumental track that preclude the mashed section. """
        b4beatsI = instBeats[matchingBeats[0].absolute_context()[0]-XLEN:
                            matchingBeats[0].absolute_context()[0]]
        """ These are the 4 beats after matchingBeats. These are the four beats of the
        instrumental track that follow the mashed section. """
        afterbeatsI = instBeats[matchingBeats[-1].absolute_context()[0]+1:
                            matchingBeats[-1].absolute_context()[0]+1+XLEN]
        if(len(b4beatsI) != len(afterbeatsI)):
            print "The lengths of b4beatsI and afterbeatsI are not equal."
        """ These are the 16 beats before the 4-beat crossmatch into matchingBeats. """
        preBufferBeats = instBeats[matchingBeats[0].absolute_context()[0]-BUFFERLEN-XLEN:
                                            matchingBeats[0].absolute_context()[0]-XLEN]
        """ These are the 16 beats before the 4-beat crossmatch into matchingBeats. """
        postBufferBeats = instBeats[matchingBeats[-1].absolute_context()[0]+1+XLEN:
                                matchingBeats[-1].absolute_context()[0]+1+XLEN+BUFFERLEN]
        if(len(preBufferBeats) != len(postBufferBeats)):
            print "The lengths of preBufferBeats and postBufferBeats are not equal."
            print "len(preBufferBeats): ", len(preBufferBeats)
            print "len(postBufferBeats): ", len(postBufferBeats)
            print matchingBeats[-1].absolute_context()[0]
            print len(instBeats)
            sys.exit()
        """ These are the 4 beats before matchingBeats. These are the four beats of the
        new vocal track that preclude the mashed section. """
        b4beatsV = vocalBeats[sectionBeats[0].absolute_context()[0]-XLEN:
                            sectionBeats[0].absolute_context()[0]]
        """ These are the 4 beats after matchingBeats. These are the four beats of the 
        new vocal track that follow the mashed section. """
        afterbeatsV = vocalBeats[sectionBeats[-1].absolute_context()[0]+1:
                            sectionBeats[-1].absolute_context()[0]+1+XLEN]
        if(len(b4beatsV) != len(afterbeatsV)):
            print "The lengths of b4beatsI and afterbeatsI are not equal."
            sys.exit()
    except: 
        print "exception in 4 beat try block."
        sys.exit()
    """ vocData: An AudioData object for the new vocal data that will be overlaid. 
        instData: An AudioData object for the base instrumental track. 
        originalVocData: An AudioData object of the original vocal to accompany 
            the new one. 
        vocalMix: An AudioData of both vocal tracks mixed together, in order to 
            keep the overall vocal loudness approximately constant. 
        mix: An AudioData of the instrumental track and combined vocals
            mixed together. """
    vocData = audio.getpieces(localAudioFiles[3],b4beatsV+sectionBeats+afterbeatsV)
    instData = audio.getpieces(localAudioFiles[2],b4beatsI+matchingBeats+afterbeatsI)
    if instData.data.shape[0] >= vocData.data.shape[0]: 
        mix = audio.megamix([instData, vocData])
    else: 
        mix = audio.megamix([vocData, instData]) # the longer data set has to go first.
    mix.encode('mix.mp3')
    vocData.encode('vocData.mp3')
    """ Now, make a similar mix for before the mashed sections..."""
    instData = audio.getpieces(localAudioFiles[2], preBufferBeats + b4beatsI)
    vocData = audio.getpieces(localAudioFiles[4], preBufferBeats + b4beatsI)
    premix = audio.megamix([instData, vocData])
    """ ...and another mix for after the mashed sections."""
    instData = audio.getpieces(localAudioFiles[2], afterbeatsI + postBufferBeats)
    vocData = audio.getpieces(localAudioFiles[4], afterbeatsI + postBufferBeats)
    postmix = audio.megamix([instData, vocData])
    """ Now, I have three AudioData objects, mix, premix, and postmix, that overlap by
    four beats. I will build Crossmatch objects from the overlapping regions, and three 
    Playback objects for the areas that are not in transition. """
    action.make_stereo(premix)
    action.make_stereo(mix)
    action.make_stereo(postmix)
    preBuffdur = sum([p.duration for p in preBufferBeats]) # duration of preBufferBeats
    playback1 = action.Playback(premix,0.0,preBuffdur)
    b4dur = sum([p.duration for p in b4beatsI]) # duration of b4beatsI
    crossfade1 = action.Crossfade((premix,mix),(preBuffdur,0.0),b4dur) 
    abdur = sum([p.duration for p in afterbeatsI])
    playback2 = action.Playback(mix,b4dur,mix.duration - b4dur - abdur)
    crossfade2 = action.Crossfade((mix,postmix),(mix.duration - abdur,0.0),abdur) 
    playback3 = action.Playback(postmix,abdur,sum([p.duration for p in postBufferBeats]))
    action.render([playback1,crossfade1,playback2,crossfade2,playback3], 'mashup.mp3')
Пример #30
0
"""Reverse a song by playing its beats forward starting from the end of the song"""
import echonest.audio as audio

# Easy wrapper around mp3 decoding and Echo Nest analysis
audio_file = audio.LocalAudioFile("Haiti.mp3")

# You can manipulate the beats in a song as a native python list
beats = audio_file.analysis.beats
beats.reverse()

# And render the list as a new audio file!
audio.getpieces(audio_file, beats).encode("Haiti_modded.mp3")
Пример #31
0
    def compileIntro(self):
        """
            Compiles the dubstep introduction. Returns an AudioData of the first 8 bars.
            (8 bars at 140 bpm = ~13.71 seconds of audio)
            If song is not 4/4, tries to even things out by speeding it up by the appropriate amount.

            Pattern:
                first 4 bars of song
                first beat of 1st bar x 4   (quarter notes)
                first beat of 2nd bar x 4   (quarter notes)
                first beat of 3rd bar x 8   (eighth notes)
                first beat of 4th bar x 8   (sixteenth notes)
                third beat of 4th bar x 8   (sixteenth notes)
        """
        out = audio.AudioQuantumList()
        intro = audio.AudioData(self.sample_path + self.template['intro'], sampleRate=44100, numChannels=2, verbose=False)
        
        #   First 4 bars of song
        custom_bars = []

        if not self.beats or len(self.beats) < 16:
            #   Song is not long or identifiable enough
            #   Take our best shot at making something
            self.tempo = 60.0 * 16.0 / self.original.duration
            for i in xrange(0, 4):
                bar = []
                for j in xrange(0, 4):
                    length = self.original.duration / 16.0
                    start = ((i * 4) + j) * length
                    bar.append(audio.AudioQuantum(start, length, None, 0, self.original.source))
                custom_bars.append(bar)
        else:
            for i in xrange(0, 4):
                custom_bars.append(self.beats[i*4:(i*4)+4])
        out.extend([x for bar in custom_bars for x in bar])

        #   First beat of first bar x 4
        for i in xrange(0, 4):
            out.append(custom_bars[0][0])
        
        #   First beat of second bar x 4
        for i in xrange(0, 4):
            out.append(custom_bars[1][0])

        beatone = custom_bars[2][0]
        beattwo = custom_bars[3][0]
        beatthree = custom_bars[3][2]
        
        #   First beat of third bar x 8
        for x in xrange(0, 8):
            out.append(audio.AudioQuantum(beatone.start, beatone.duration/2, None, beatone.confidence, beatone.source))

        #   First beat of fourth bar x 8
        for x in xrange(0, 8):
            out.append(audio.AudioQuantum(beattwo.start, beattwo.duration/4, None, beattwo.confidence, beattwo.source))

        #   Third beat of fourth bar x 8
        for x in xrange(0, 8):
            out.append(audio.AudioQuantum(beatthree.start, beatthree.duration/4, None, beatthree.confidence, beatthree.source))
        
        if self.original.analysis.time_signature == 4:
            shifted = self.st.shiftTempo(audio.getpieces(self.original, out), self.template['tempo']/self.tempo)
        else:
            shifted1 = audio.getpieces(self.original, out)
            shifted = self.st.shiftTempo(shifted1, len(shifted1) / ((44100 * 16 * 2 * 60.0)/self.template['tempo']))
            shifted1.unload()
        if shifted.numChannels == 1:    
            shifted = self.mono_to_stereo(shifted)
        return self.truncatemix(intro, shifted, self.mixfactor(out))
Пример #32
0
    def compileSection(self, j, section, hats):
        """
            Compiles one "section" of dubstep - that is, one section (verse/chorus) of the original song,
            but appropriately remixed as dubstep.

            Chooses appropriate samples from the section of the original song in three keys (P1, m3, m7)
            then plays them back in order in the generic "dubstep" pattern (all 8th notes):

            |                         |                         :|
            |: 1  1  1  1  1  1  1  1 | m3 m3 m3 m3 m7 m7 m7 m7 :| x2
            |                         |                         :|

            On the first iteration, the dubstep bar is mixed with a "splash" sound - high-passed percussion or whatnot.
            On the second iteration, hats are mixed in on the offbeats and the wubs break on the last beat to let the
            original song's samples shine through for a second, before dropping back down in the next section.

            If samples are missing of one pitch, the searchSamples algorithm tries to find samples
            a fifth from that pitch that will sound good. (If none exist, it keeps trying, in fifths up the scale.)
            
            If the song is not 4/4, the resulting remix is sped up or slowed down by the appropriate amount.
            (That can get really wonky, but sounds cool sometimes, and fixes a handful of edge cases.)
        """
        onebar = audio.AudioQuantumList()

        s1 = self.searchSamples(j, self.tonic)
        s2 = self.searchSamples(j, (self.tonic + 3) % 12)
        s3 = self.searchSamples(j, (self.tonic + 9) % 12)

        biggest = max([s1, s2, s3]) #for music that's barely tonal
        if not biggest:
            for i in xrange(0, 12):
                biggest = self.searchSamples(j, self.tonic + i)
                if biggest:
                    break

        if not biggest:
            raise Exception('Missing samples in section %s of the song!' % j+1)

        if not s1: s1 = biggest
        if not s2: s2 = biggest
        if not s3: s3 = biggest

        if self.template['target'] == "tatums":
            f = 4
            r = 2
        elif self.template['target'] == "beats":
            f = 2
            r = 2
        elif self.template['target'] == "bars":
            f = 1
            r = 1
        for k in xrange(0, r):
            for i in xrange(0, 4*f):
                onebar.append(s1[i % len(s1)])
            for i in xrange(4*f, 6*f):
                onebar.append( s2[i % len(s2)] )
            for i in xrange(6*f, 8*f):
                onebar.append( s3[i % len(s3)] )
        if self.original.analysis.time_signature == 4:
            orig_bar = self.st.shiftTempo(audio.getpieces(self.original, onebar), self.template['tempo']/self.tempo)
        else:
            orig_bar = audio.getpieces(self.original, onebar)
            orig_bar = self.st.shiftTempo(orig_bar, len(orig_bar) / ((44100 * 16 * 2 * 60.0)/self.template['tempo']))
        if orig_bar.numChannels == 1:
            orig_bar = self.mono_to_stereo(orig_bar)
        mixfactor = self.mixfactor(onebar)
        a = self.truncatemix(
                audio.mix(
                    audio.AudioData(
                        self.sample_path + self.template['wubs'][self.tonic], 
                        sampleRate=44100,
                        numChannels=2,
                        verbose=False
                    ),
                    audio.AudioData(
                        self.sample_path + self.template['splashes'][(j+1) % len(self.template['splashes'])],
                        sampleRate=44100,
                        numChannels=2,
                        verbose=False
                    )
                ),
            orig_bar,
            mixfactor
        )
        b = self.truncatemix(
                audio.mix(
                    audio.AudioData(
                        self.sample_path + self.template['wub_breaks'][self.tonic],
                        sampleRate=44100,
                        numChannels=2,
                        verbose=False
                    ),
                    hats
                ),
            orig_bar,
            mixfactor
        )
        return (a, b)
Пример #33
0
    def compileIntro(self):
        """
            Compiles the dubstep introduction. Returns an AudioData of the first 8 bars.
            (8 bars at 140 bpm = ~13.71 seconds of audio)
            If song is not 4/4, tries to even things out by speeding it up by the appropriate amount.

            Pattern:
                first 4 bars of song
                first beat of 1st bar x 4   (quarter notes)
                first beat of 2nd bar x 4   (quarter notes)
                first beat of 3rd bar x 8   (eighth notes)
                first beat of 4th bar x 8   (sixteenth notes)
                third beat of 4th bar x 8   (sixteenth notes)
        """
        out = audio.AudioQuantumList()
        intro = audio.AudioData(self.sample_path + self.template['intro'],
                                sampleRate=44100,
                                numChannels=2,
                                verbose=False)

        #   First 4 bars of song
        custom_bars = []

        if not self.beats or len(self.beats) < 16:
            #   Song is not long or identifiable enough
            #   Take our best shot at making something
            self.tempo = 60.0 * 16.0 / self.original.duration
            for i in xrange(0, 4):
                bar = []
                for j in xrange(0, 4):
                    length = self.original.duration / 16.0
                    start = ((i * 4) + j) * length
                    bar.append(
                        audio.AudioQuantum(start, length, None, 0,
                                           self.original.source))
                custom_bars.append(bar)
        else:
            for i in xrange(0, 4):
                custom_bars.append(self.beats[i * 4:(i * 4) + 4])
        out.extend([x for bar in custom_bars for x in bar])

        #   First beat of first bar x 4
        for i in xrange(0, 4):
            out.append(custom_bars[0][0])

        #   First beat of second bar x 4
        for i in xrange(0, 4):
            out.append(custom_bars[1][0])

        beatone = custom_bars[2][0]
        beattwo = custom_bars[3][0]
        beatthree = custom_bars[3][2]

        #   First beat of third bar x 8
        for x in xrange(0, 8):
            out.append(
                audio.AudioQuantum(beatone.start, beatone.duration / 2, None,
                                   beatone.confidence, beatone.source))

        #   First beat of fourth bar x 8
        for x in xrange(0, 8):
            out.append(
                audio.AudioQuantum(beattwo.start, beattwo.duration / 4, None,
                                   beattwo.confidence, beattwo.source))

        #   Third beat of fourth bar x 8
        for x in xrange(0, 8):
            out.append(
                audio.AudioQuantum(beatthree.start, beatthree.duration / 4,
                                   None, beatthree.confidence,
                                   beatthree.source))

        if self.original.analysis.time_signature == 4:
            shifted = self.st.shiftTempo(audio.getpieces(self.original, out),
                                         self.template['tempo'] / self.tempo)
        else:
            shifted1 = audio.getpieces(self.original, out)
            shifted = self.st.shiftTempo(
                shifted1,
                len(shifted1) /
                ((44100 * 16 * 2 * 60.0) / self.template['tempo']))
            shifted1.unload()
        if shifted.numChannels == 1:
            shifted = self.mono_to_stereo(shifted)
        return self.truncatemix(intro, shifted, self.mixfactor(out))
Пример #34
0
 def output(self, fn, prefix):
     out = audio.getpieces(self.audioFile, self.chunks.ordered_by(fn))
     out.encode(self.filename + "_" + prefix + ".wav")
Пример #35
0
    def compileSection(self, j, section, hats):
        """
            Compiles one "section" of dubstep - that is, one section (verse/chorus) of the original song,
            but appropriately remixed as dubstep.

            Chooses appropriate samples from the section of the original song in three keys (P1, m3, m7)
            then plays them back in order in the generic "dubstep" pattern (all 8th notes):

            |                         |                         :|
            |: 1  1  1  1  1  1  1  1 | m3 m3 m3 m3 m7 m7 m7 m7 :| x2
            |                         |                         :|

            On the first iteration, the dubstep bar is mixed with a "splash" sound - high-passed percussion or whatnot.
            On the second iteration, hats are mixed in on the offbeats and the wubs break on the last beat to let the
            original song's samples shine through for a second, before dropping back down in the next section.

            If samples are missing of one pitch, the searchSamples algorithm tries to find samples
            a fifth from that pitch that will sound good. (If none exist, it keeps trying, in fifths up the scale.)
            
            If the song is not 4/4, the resulting remix is sped up or slowed down by the appropriate amount.
            (That can get really wonky, but sounds cool sometimes, and fixes a handful of edge cases.)
        """
        onebar = audio.AudioQuantumList()

        s1 = self.searchSamples(j, self.tonic)
        s2 = self.searchSamples(j, (self.tonic + 3) % 12)
        s3 = self.searchSamples(j, (self.tonic + 9) % 12)

        biggest = max([s1, s2, s3])  #for music that's barely tonal
        if not biggest:
            for i in xrange(0, 12):
                biggest = self.searchSamples(j, self.tonic + i)
                if biggest:
                    break

        if not biggest:
            raise Exception('Missing samples in section %s of the song!' % j +
                            1)

        if not s1: s1 = biggest
        if not s2: s2 = biggest
        if not s3: s3 = biggest

        if self.template['target'] == "tatums":
            f = 4
            r = 2
        elif self.template['target'] == "beats":
            f = 2
            r = 2
        elif self.template['target'] == "bars":
            f = 1
            r = 1
        for k in xrange(0, r):
            for i in xrange(0, 4 * f):
                onebar.append(s1[i % len(s1)])
            for i in xrange(4 * f, 6 * f):
                onebar.append(s2[i % len(s2)])
            for i in xrange(6 * f, 8 * f):
                onebar.append(s3[i % len(s3)])
        if self.original.analysis.time_signature == 4:
            orig_bar = self.st.shiftTempo(
                audio.getpieces(self.original, onebar),
                self.template['tempo'] / self.tempo)
        else:
            orig_bar = audio.getpieces(self.original, onebar)
            orig_bar = self.st.shiftTempo(
                orig_bar,
                len(orig_bar) /
                ((44100 * 16 * 2 * 60.0) / self.template['tempo']))
        if orig_bar.numChannels == 1:
            orig_bar = self.mono_to_stereo(orig_bar)
        mixfactor = self.mixfactor(onebar)
        a = self.truncatemix(
            audio.mix(
                audio.AudioData(self.sample_path +
                                self.template['wubs'][self.tonic],
                                sampleRate=44100,
                                numChannels=2,
                                verbose=False),
                audio.AudioData(
                    self.sample_path +
                    self.template['splashes'][(j + 1) %
                                              len(self.template['splashes'])],
                    sampleRate=44100,
                    numChannels=2,
                    verbose=False)), orig_bar, mixfactor)
        b = self.truncatemix(
            audio.mix(
                audio.AudioData(self.sample_path +
                                self.template['wub_breaks'][self.tonic],
                                sampleRate=44100,
                                numChannels=2,
                                verbose=False), hats), orig_bar, mixfactor)
        return (a, b)
Пример #36
0
def main(input_filename, output_filename, break_filename, break_parts,
         measures, mix):

    # This takes the input tracks, sends them to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)
    sample_rate = audiofile.sampleRate
    breakfile = audio.LocalAudioFile(break_filename)

    # This converts the break to stereo, if it is mono
    if breakfile.numChannels == 1:
        breakfile = mono_to_stereo(breakfile)

    # This gets the number of channels in the main file
    num_channels = audiofile.numChannels

    # This splits the break into each beat
    drum_data = split_break(breakfile, break_parts)
    hits_per_beat = int(break_parts / (4 * measures))
    # This gets the bars from the input track
    bars = audiofile.analysis.bars

    # This creates the 'shape' of new array.
    # (Shape is a tuple (x, y) that indicates the length per channel of the audio file)
    out_shape = (len(audiofile) + 100000, num_channels)
    # This creates a new AudioData array to write data to
    out = audio.AudioData(shape=out_shape,
                          sampleRate=sample_rate,
                          numChannels=num_channels)
    if not bars:
        # If the analysis can't find any bars, stop!
        # (This might happen with really ambient music)
        print "Didn't find any bars in this analysis!"
        print "No output."
        sys.exit(-1)

    # This is where the magic happens:
    # For every beat in every bar except the last bar,
    # map the tatums of the break to the tatums of the beat
    for bar in bars[:-1]:
        # This gets the beats in the bar, and loops over them
        beats = bar.children()
        for i in range(len(beats)):
            # This gets the index of matching beat in the break
            try:
                break_index = ((bar.local_context()[0] %\
                                measures) * 4) + (i % 4)
            except ValueError:
                break_index = i % 4
            # This gets the tatums from the beat of the break
            tats = range((break_index) * hits_per_beat,
                         (break_index + 1) * hits_per_beat)
            # This gets the number of samples in each tatum
            drum_samps = sum([len(drum_data[x]) for x in tats])

            # This gets the number of sample and the shape of the beat from the original track
            beat_samps = len(audiofile[beats[i]])
            beat_shape = (beat_samps, num_channels)

            # This get the shape of each tatum
            tat_shape = (float(beat_samps / hits_per_beat), num_channels)

            # This creates the new AudioData that will be filled with chunks of the drum break
            beat_data = audio.AudioData(shape=beat_shape,
                                        sampleRate=sample_rate,
                                        numChannels=num_channels)
            for j in tats:
                # This creates an audioData for each tatum
                tat_data = audio.AudioData(shape=tat_shape,
                                           sampleRate=sample_rate,
                                           numChannels=num_channels)
                # This corrects for length / timing:
                # If the original is shorter than the break, truncate drum hits to fit beat length
                if drum_samps > beat_samps / hits_per_beat:
                    tat_data.data = drum_data[j].data[:len(tat_data)]
                # If the original is longer, space out drum hits to fit beat length
                elif drum_samps < beat_samps / hits_per_beat:
                    tat_data.append(drum_data[j])

                # This adds each new tatum to the new beat.
                tat_data.endindex = len(tat_data)
                beat_data.append(tat_data)
                del (tat_data)

            # This corrects for rounding errors
            beat_data.endindex = len(beat_data)

            # This mixes the new beat data with the input data, and appends it to the final file
            mixed_beat = audio.mix(beat_data, audiofile[beats[i]], mix=mix)
            del (beat_data)
            out.append(mixed_beat)

    # This works out the last beat and appends it to the final file
    finale = bars[-1].start + bars[-1].duration
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    last_data = audio.getpieces(audiofile, [last])
    out.append(last_data)

    # This writes the newly created audio to the given file.
    out.encode(output_filename)
Пример #37
0
def getpieces(video, segs):
    a = audio.getpieces(video.audio, segs)
    newv = EditableFrames(settings=video.video.settings)
    for s in segs:
        newv += video.video[s]
    return SynchronizedAV(audio=a, video=newv)
Пример #38
0
def monoSignal(audioFile,chunk):
    audioChunk = audio.getpieces(audioFile,[chunk])
    arr = audioChunk.data
    if arr.ndim==2:
        return arr.mean(1)
    return arr
Пример #39
0
def main(input_filename):
    
    output_filename = input_filename.split('.')[0]
    
    output_filename = ''.join(c for c in output_filename if c.isalnum())
    
    if output_filename == "":
        output_filename = "Untitled"
    
    
    sounds = []
    segments= []
    bricks = []
    
    audiofile = audio.LocalAudioFile(input_filename)   
    bars = audiofile.analysis.bars
    segments = audiofile.analysis.segments

    counter = 0
        
    background = audio.AudioQuantumList()

    for bar in bars:
        
        # Check to see if we are loud enough to generate bricks
      
        start = bar.start
        duration = bar.duration
        
        relevant_segment = None
        
        for segment in segments:
            if segment.start > start:
                if segment.start > start + duration:
                    break # Last segment
                else:
                    relevent_segment = segment
                    break
            relevant_segment = segment

        if relevant_segment.loudness_max < -25:
            continue
        
        counter += 1
        if counter < 5:
           background.append(bar)
        
        
        collect = audio.AudioQuantumList()
        
        collect.append(bar)
        
        out = audio.getpieces(audiofile, collect)        
        this_filename = "%s%d.mp3" % (output_filename, counter)
        
        sounds.append((bar, relevent_segment, "%s" % this_filename))
        
        out.encode(this_filename)
        
        if counter > 200:
            break    
    
    bc = 0
    
    extended_background = audio.AudioQuantumList()

    while bc < 20:
        bc +=1
        for bar in background:
            extended_background.append(bar)
            
    
    background_out = audio.getpieces(audiofile, extended_background)
    background_out.encode("%s_background.mp3" % output_filename) 
    
    luacode_output(output_filename, sounds, audiofile, background_out)
Пример #40
0
import sys, os


import echonest.audio as audio
import random


# Lib
def chunks(l, n):
    """ Yield successive n-sized chunks from l.
    """
    for i in xrange(0, len(l), n):
        yield l[i:i+n]

# /Lib

subject = audio.LocalAudioFile(sys.argv[1])

beats = subject.analysis.beats

slices = list(chunks(beats, 10))
new_slices = []

while slices:
    i = random.randint(0, len(slices))
    for j in slices.pop(i-1):
        new_slices.append(j)


audio.getpieces(subject, new_slices).encode("%s-improved.mp3" % os.path.basename(sys.argv[1][:-4]))
Пример #41
0
def getpieces(video, segs):
    a = audio.getpieces(video.audio, segs)
    newv = EditableFrames(settings=video.video.settings)
    for s in segs:
        newv += video.video[s]
    return SynchronizedAV(audio=a, video=newv)