Пример #1
0
def main(input_one, input_two):
	track_one = audio.LocalAudioFile(input_one)
	track_two = audio.LocalAudioFile(input_two)
	section_one = track_one.analysis.sections[0]
	section_two = track_two.analysis.sections[-1]
	tempo_one = section_one.tempo
	tempo_two = section_two.tempo
	tempo_diff = tempo_two - tempo_one
	bars_one = section_one.children()
	collect = []
	for bar in bars_one:
		if bar == bars_one[-1]:
			numbeats = len(bar.children())
			step = tempo_diff/numbeats
			for i, beat in enumerate(bar.children()):
				beat_audio = beat.render()
				ratio = (tempo_one + step*(i+1))/(tempo_one + step*i)
				scaled_beat = dirac.timeScale(beat_audio.data, ratio)
				new_beat = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape, sampleRate=track_one.sampleRate, numChannels=scaled_beat.shape[1])
				collect.append(new_beat)
			break
		for beat in bar.children():
			collect.append(beat.render())
	out_data_one = audio.assemble(collect, numChannels=2)
	out_name_one = input_one.split('.')[0]+'-stretch.mp3'
	out_data_one.encode(out_name_one)
	play_one = audio.LocalAudioFile(out_name_one)
	aqp_one = Player(play_one)
	aqp_two = Player(track_two)
	beats_one = play_one.analysis.beats
	for beat in beats_one:
		aqp_one.play(beat)
	aqp_one.closeStream()
	aqp_two.play(section_two)
	aqp_two.closeStream()
Пример #2
0
def mixSample(path1, path2):
	audio_file1 = audio.LocalAudioFile(path1)
	audio_file2 = audio.LocalAudioFile(path2)
	
	branches = twoSamples(audio_file1, audio_file2, threshold = 250)

	# get the beats of interest
	branch, collect = [], []
	for each in branches: 
		branch.append(each)
		#branch.append(branches[each][0])

	# beats re-ordered for rendering
	for each in branch: 
		collect.append(beats[each])

	out = audio.getpieces(audio_file1, collect)

	name1 = path1.split('/')
	name2 = path2.split('/')

	output_file = "./mixed/" + name1[len(name1)-1].split('.')[0] + "_" + name2[len(name2)-1].split('.')[0]+ "_out.mp3"

	out.encode(output_file)

	return 
 def run(self):
     for song_i_ in self.all_songs:
         laf_i_ = audio.LocalAudioFile(song_i_)
         for song_j_ in self.all_songs:
             laf_j_ = audio.LocalAudioFile(song_j_)
             edges_ij_ = get_edges(laf_i_, laf_j_)
             update_all_edges(self.edges, edges_ij_)
Пример #4
0
def getSequence(f1, f2):
    song1 = audio.LocalAudioFile(f1)
    song2 = audio.LocalAudioFile(f2)
    sections1 = song1.analysis.sections
    sections2 = song2.analysis.sections
    A1 = [s.mean_loudness() for s in sections1]
    A2 = [s.mean_loudness() for s in sections2]
    # A1 = [featureAnalysis.loudnessMetric(s) for s in sections1]
    # A2 = [featureAnalysis.loudnessMetric(s) for s in sections2]
    sorted1 = sorted(sections1)[::-1]
    sorted2 = sorted(sections2)
    newList = []
    for i in xrange(len(sorted1) + len(sorted2) - 1):
        newList += sorted1[i / 2] if i % 2 == 0 else sorted2[i / 2]
    result = []
    # print len(sections1)
    counter = 0
    for s in newList:
        counter += 1
        if counter == 9:
            counter = 0
            time.sleep(0.5)
        s.encode("subsection.wav")
        t = audio.LocalAudioFile("subsection.wav")
        beats = t.analysis.beats
        result += [(t, beats)]
    return result
Пример #5
0
def render_track(file1, file2, itrim=0.0, fadeout=5, remove=0):
    filename = file1
    track2 = audio.LocalAudioFile('static/instrumentals/'+file2)
    print('###########')
    print('acapella/'+file1)
    print('###########')
    track1 = audio.LocalAudioFile('acapella/'+file1)
    otrim = max(track1.analysis.duration, track2.analysis.duration) - min(track1.analysis.duration, track2.analysis.duration)
    together = combine_tracks(track1, track2, remove=remove)
    formatted = format_track(together, itrim=itrim, otrim=otrim, fadeout=fadeout)
    render(formatted, 'audition_audio/'+filename)
Пример #6
0
def pitchCmp(file1, file2, threshold):
    song1 = audio.LocalAudioFile(file1)
    song2 = audio.LocalAudioFile(file2)
    chunks1 = song1.analysis.segments
    chunks2 = song2.analysis.segments
    closeVals = []
    for i in xrange(len(chunks1)):
        for j in xrange(len(chunks2)):
            vec1 = np.array(chunks1[i])
            vec1 /= (np.dot(vec1, vec1)**0.5)
            vec2 = np.array(chunks2[j])
            vec2 /= (np.dot(vec2, vec2)**0.5)
            result = np.dot(vec1, vec2)
            if result >= threshold: closeVals += [(i, j, result)]
    return closeVals
Пример #7
0
def main():
    audio_file1 = audio.LocalAudioFile("15 Sir Duke.m4a")
    branches = fb.getBranches(audio_file1)
    for i in range(10):
        beat1 = random.choice(branches.keys())
        beat2 = random.choice(branches[beat1])[0]
        make_song_snippet(beat1, beat2, audio_file1, audio_file1)
Пример #8
0
def main(filename):
    song = audio.LocalAudioFile(filename)
    tempo = song.analysis.tempo
    bpm = "%.0f" % round(tempo['value'])
    confidence = "%0.1f" % (tempo['confidence'] * 100)
    print("With an accuracy of " + confidence + " % this song has " + bpm +
          " bpm.")
Пример #9
0
def main(num_beats, directory, outfile):

    aud = []
    ff = os.listdir(directory)
    for f in ff:
        # collect the files
        if f.rsplit('.',
                    1)[1].lower() in ['mp3', 'aif', 'aiff', 'aifc', 'wav']:
            aud.append(audio.LocalAudioFile(os.path.join(directory, f)))
            # mind the rate limit

    num_files = len(aud)
    x = audio.AudioQuantumList()

    print >> sys.stderr, "Assembling beats.",
    for w in range(num_beats):
        print >> sys.stderr, '.',
        ssong = aud[w % num_files].analysis
        s = ssong.beats[w % len(ssong.beats)]
        tsong = aud[(w - 1) % num_files].analysis
        t = tsong.beats[w % len(tsong.beats)]

        x.append(audio.Simultaneous([s, t]))

    print >> sys.stderr, "\nStarting rendering pass..."

    then = time.time()
    # call render_sequentially() with no arguments, and then it calls itself with
    #  contextual arguments for each source, for each AudioQuantum. It's a lot of
    #  tree-walking, but each source file gets loaded once (and takes itself from)
    #  memory when its rendering pass finishes.
    x.render().encode(outfile)

    print >> sys.stderr, "%f sec for rendering" % (time.time() - then, )
Пример #10
0
def main(input_filename, output_filename):
    # Just a local alias to the soundtouch library, which handles the pitch shifting.
    soundtouch = modify.Modify()

    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every beat in the track.
    # You can manipulate this just like any other Python list!
    beats = audiofile.analysis.beats

    # This creates a new chunk of audio that is the same size and shape as the input file
    # We can do this because we know that our output will be the same size as our input
    out_shape = (len(audiofile.data), )
    out_data = audio.AudioData(shape=out_shape,
                               numChannels=1,
                               sampleRate=44100)

    # This loop pitch-shifts each beat and adds it to the new file!
    for i, beat in enumerate(beats):
        # Pitch shifting only works on the data from each beat, not the beat objet itself
        data = audiofile[beat].data
        # The amount to pitch shift each beat.
        # local_context just returns a tuple the position of a beat within its parent bar.
        # (0, 4) for the first beat of a bar, for example
        number = beat.local_context()[0] % 12
        # Do the shift!
        new_beat = soundtouch.shiftPitchSemiTones(audiofile[beat], number * -1)
        out_data.append(new_beat)

    # Write the new file
    out_data.encode(output_filename)
Пример #11
0
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    tonic = audiofile.analysis.key['value']

    chunks = audiofile.analysis.__getattribute__(units)

    # Get the segments
    all_segments = audiofile.analysis.segments

    # Find tonic segments
    tonic_segments = audio.AudioQuantumList(kind="segment")
    for segment in all_segments:
        pitches = segment.pitches
        if pitches.index(max(pitches)) == tonic:
            tonic_segments.append(segment)

    # Find each chunk that matches each segment
    out_chunks = audio.AudioQuantumList(kind=units)
    for chunk in chunks:
        for segment in tonic_segments:
            if chunk.start >= segment.start and segment.end >= chunk.start:
                out_chunks.append(chunk)
                break

    out = audio.getpieces(audiofile, out_chunks)
    out.encode(outputFile)
Пример #12
0
def main(input_filename, output_filename, units, equal_silence):
    audio_file = audio.LocalAudioFile(input_filename)
    chunks = audio_file.analysis.__getattribute__(units)
    num_channels = audio_file.numChannels
    sample_rate = audio_file.sampleRate
    if equal_silence:
        new_shape = ((audio_file.data.shape[0] * 2) + 100000,
                     audio_file.data.shape[1])
    else:
        new_shape = (audio_file.data.shape[0] + (len(chunks) * 44100) + 10000,
                     audio_file.data.shape[1])
    out = audio.AudioData(shape=new_shape,
                          sampleRate=sample_rate,
                          numChannels=num_channels)

    for chunk in chunks:
        chunk_data = audio_file[chunk]
        if equal_silence:
            silence_shape = chunk_data.data.shape
        else:
            silence_shape = (44100, audio_file.data.shape[1])
        silence = audio.AudioData(shape=silence_shape,
                                  sampleRate=sample_rate,
                                  numChannels=num_channels)
        silence.endindex = silence.data.shape[0]

        out.append(chunk_data)
        out.append(silence)
    out.encode(output_filename)
Пример #13
0
def main(units, key, input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    chunks = audiofile.analysis.__getattribute__(units)

    # Define the sorting function
    if key == 'duration':

        def sorting_function(chunk):
            return chunk.duration

    if key == 'confidence':

        def sorting_function(chunk):
            if units != 'segments':
                return chunk.confidence
            else:
                # Segments have no confidence, so we grab confidence from the tatum
                return chunk.tatum.confidence

    if key == 'loudness':

        def sorting_function(chunk):
            return chunk.mean_loudness()

    sorted_chunks = sorted(chunks, key=sorting_function, reverse=reverse)

    out = audio.getpieces(audiofile, sorted_chunks)
    out.encode(output_filename)
Пример #14
0
def insert_sfx(song_path):
    paths = ['bin' + os.sep + f for f in os.listdir('bin')]
    cowbell_paths = filter(lambda a: 'cowbell' in a, paths)
    cowbells = [
        audio.AudioData(path, sampleRate=44100, numChannels=2)
        for path in cowbell_paths
    ]
    # other_paths = filter(lambda a: 'cowbell' not in a, paths)
    # others = [audio.AudioData(path, sampleRate = 44100, numChannels = 2) for path in other_paths]
    song = audio.LocalAudioFile(song_path)
    beats = song.analysis.beats
    tatums = song.analysis.tatums
    bars = song.analysis.bars
    beats_raw = [b.start for b in beats]
    tatums_raw = [t.start for t in beats]
    bars_raw = [b.start for b in bars]
    for tatum in tatums:
        if random.random() > 0.75:
            continue
        if is_in(tatum.start, bars_raw):
            song = mix(tatum.start, random.choice(cowbells), song)
        elif is_in(tatum.start, beats_raw) and random.random() < 0.75:
            song = mix(tatum.start, random.choice(cowbells), song)
        elif random.random() < 0.3:
            song = mix(tatum.start, random.choice(cowbells), song)
    return song
Пример #15
0
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # Just a local alias to the soundtouch library, which handles the pitch shifting.
    soundtouch = modify.Modify()

    # This gets a list of every bar in the track.
    # You can manipulate this just like any other Python list!
    beats = audiofile.analysis.beats

    # The output array
    collect = []

    # This loop streches each beat by a varying ratio, and then re-assmbles them.
    for beat in beats:
        # Find out where in the bar the beat is, and calculate a ratio based on that.
        context = beat.local_context()
        ratio = (math.cos(math.pi * 2 * context[0] / float(context[1])) /
                 2) + 1
        # Stretch the beat!  SoundTouch returns an AudioData object
        new = soundtouch.shiftTempo(audiofile[beat], ratio)
        # Append the stretched beat to the list of beats
        collect.append(new)

    # Assemble and write the output data
    out = audio.assemble(collect)
    out.encode(output_filename)
Пример #16
0
def main(input_filename, output_filename):
    track = audio.LocalAudioFile(input_filename)
    sections, segments = track.analysis.sections, track.analysis.segments
    fsegs, i = [], 0
    for section in sections:
        while segments[i].start <= section.start:
            i = i + 1
        fsegs.append(segments[i - 1])
    ssmp, ssmt, p1, p2, t1, t2 = [], [], [], [], [], []
    for s1 in fsegs:
        p1, t1 = s1.pitches, s1.timbre
        for s2 in fsegs:
            p2, t2 = s2.pitches, s2.timbre
            distp, distt = 0, 0
            for j in range(len(p1)):
                distp = distp + (p2[j] - p1[j])**2
            for k in range(len(t1)):
                distt = distt + (t2[k] - t1[k])**2
            ssmp.append(distp**0.5)
            ssmt.append(distt**0.5)
    ssmp = numpy.array(ssmp).reshape(len(fsegs), len(fsegs))
    ssmt = numpy.array(ssmt).reshape(len(fsegs), len(fsegs))
    plt.imshow(ssmp, 'gray')
    plt.title('Section SSM (Pitches)')
    plt.colorbar()
    plt.savefig(output_filename + '_pitch')
    plt.show()
    plt.imshow(ssmt, 'gray')
    plt.title('Section SSM (Timbre)')
    plt.colorbar()
    plt.savefig(output_filename + '_timbre')
    plt.show()
Пример #17
0
def main(input_filename, output_filename, tatums, beats, bars):

    audiofile = audio.LocalAudioFile(input_filename)
    num_channels = audiofile.numChannels
    sample_rate = audiofile.sampleRate

    # mono files have a shape of (len,)
    out_shape = list(audiofile.data.shape)
    out_shape[0] = len(audiofile)
    out = audio.AudioData(shape=out_shape,
                          sampleRate=sample_rate,
                          numChannels=num_channels)

    # same hack to change shape: we want blip_files[0] as a short, silent blip
    null_shape = list(audiofile.data.shape)
    null_shape[0] = 2
    null_audio = audio.AudioData(shape=null_shape)
    null_audio.endindex = len(null_audio)

    low_blip = audio.AudioData(blip_filenames[0])
    med_blip = audio.AudioData(blip_filenames[1])
    high_blip = audio.AudioData(blip_filenames[2])

    all_tatums = audiofile.analysis.tatums
    all_beats = audiofile.analysis.beats
    all_bars = audiofile.analysis.bars

    if not all_tatums:
        print "Didn't find any tatums in this analysis!"
        print "No output."
        sys.exit(-1)

    print "going to add blips..."

    for tatum in all_tatums:
        mix_list = [audiofile[tatum], null_audio, null_audio, null_audio]
        if tatums:
            print "match! tatum start time:" + str(tatum.start)
            mix_list[1] = low_blip

        if beats:
            for beat in all_beats:
                if beat.start == tatum.start:
                    print "match! beat start time: " + str(beat.start)
                    mix_list[2] = med_blip
                    break

        if bars:
            for bar in all_bars:
                if bar.start == tatum.start:
                    print "match! bar start time: " + str(bar.start)
                    mix_list[3] = high_blip
                    break
        out_data = audio.megamix(mix_list)
        out.append(out_data)
        del (out_data)
    print "blips added, going to encode", output_filename, "..."
    out.encode(output_filename)
    print "Finito, Benito!"
Пример #18
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = audio.AudioQuantumList()
    for bar in bars:
        collect.append(bar.children()[0])
    out = audio.getpieces(audiofile, collect)
    out.encode(output_filename)
def robust_local_audio_file(audio_file_):
    try:
        laf_ = audio.LocalAudioFile(audio_file_)
        return laf_
    except EchoNestAPIError:
        print "Failed to retrieve analysis... wait to try again"
        sleep(10)
        return robust_local_audio_file(audio_file_)
Пример #20
0
 def __init__(self, file_name):
     self.filename = file_name
     self.samples = None
     try:
         self.audio_file = audio.LocalAudioFile(self.filename)
     except EOFError:
         self.audio_file = None
         print "File does not exist"
         return
Пример #21
0
 def __init__(self, input_file):
     self.audiofile = audio.LocalAudioFile(input_file)
     self.audiofile.data *= linear(self.audiofile.analysis.loudness, 
                                     -2, -12, 0.5, 1.5) * 0.75
     # Check that there are beats in the song
     if len(self.audiofile.analysis.beats) < 5: 
         print 'not enough beats in this song...'
         sys.exit(-2)
     self.duration = len(self.audiofile.data) / self.audiofile.sampleRate
 def run(self):
     print "thread is running"
     while True:
         edge_files = [
             f for f in os.listdir(PLAYLIST_DIR)
             if re.search(r"" + self.curr_md5 +
                          ".*\.edges.pkl", f) is not None
         ]
         edge_files = edge_files[:50]
         for edge_file in edge_files:
             print "load edge_file:", edge_file
             new_md5 = None
             m = re.match(r"" + self.curr_md5 + "_([a-z0-9]{32})",
                          edge_file)
             if m is not None:
                 new_md5 = m.group(1)
             m = re.match(r"([a-z0-9]{32})_" + self.curr_md5, edge_file)
             if m is not None:
                 new_md5 = m.group(1)
             if new_md5 == self.curr_md5:
                 continue
             audio_file = PLAYLIST_DIR + os.sep + new_md5 + '.mp3'
             self.local_audio[new_md5] = audio.LocalAudioFile(audio_file)
             new_edges = get_edges(self.local_audio[self.curr_md5],
                                   self.local_audio[new_md5])
             update_all_edges(self.edges, new_edges)
             new_edges = get_edges(self.local_audio[new_md5],
                                   self.local_audio[new_md5])
             update_all_edges(self.edges, new_edges)
             new_edges = get_edges(self.local_audio[new_md5],
                                   self.local_audio[self.curr_md5])
             update_all_edges(self.edges, new_edges)
             self.start_secs[new_md5] = self.start_secs['total']
             self.start_secs['total'] += len(
                 self.local_audio[new_md5].analysis.sections)
             s = get_adjacency_matrix(self.edges, self.start_secs,
                                      THRESHOLD)
             fs = find(s)
             self.sim.set_data(fs[0], fs[1])
             self.sim.figure.gca().set_xlim([0, self.start_secs['total']])
             self.sim.figure.gca().set_ylim([self.start_secs['total'], 0])
             x = sorted(self.start_secs.values() * 2)[1:]
             y = sorted(self.start_secs.values() * 2)[:-1]
             self.boundaries[0].set_xdata(x)
             self.boundaries[0].set_ydata(y)
             self.boundaries[1].set_xdata(y)
             self.boundaries[1].set_ydata(x)
             print "************** REDRAW SELF-SIMILARITY ********************"
             self.sim.figure.canvas.draw()
             if self.ejecting() or self.stopping():
                 break
         self._ejecting.wait()
         if self.stopping():
             break
         self.update()
         self._ejecting.clear()
Пример #23
0
def loudnessMetric(file1):
    song1 = audio.LocalAudioFile(file1)
    chunks1 = song1.analysis.segments
    gradient = []
    for i in xrange(len(chunks1) - 1):
        dif = chunks1[i + 1].mean_loudness() - chunks1[i].mean_loudness()
        time = chunks[i].duration
        gradient += [dif / time]
    assert (len(gradient) == len(chunks1) - 1)
    return gradient
Пример #24
0
def robust_local_audio_file(audio_file_):
    from time import sleep
    from pyechonest.util import EchoNestAPIError
    try:
        laf_ = audio.LocalAudioFile(audio_file_)
        return laf_
    except EchoNestAPIError:
        print "Failed to retrieve analysis... wait to try again"
        sleep(10)
        return robust_local_audio_file(audio_file_)
    def __init__(self, mp3):
        self.mp3 = mp3
        self.audio_file = audio.LocalAudioFile(self.mp3)
        self.analysis = self.audio_file.analysis
        self.beats = self.analysis.beats
        self.beats.reverse()

        #print self.audio_file.analysis.id
        print audio
        audio.getpieces(self.audio_file, self.beats).encode("remix.mp3")
Пример #26
0
def main():
    file1 = "Mp3Songs/15 Sir Duke.m4a"
    audiofile = audio.LocalAudioFile(file1)
    player = Player()
    beats = audiofile.analysis.beats
    for beat in beats:
        ratio = 1.25 - (
            (float(beat.absolute_context()[0]) / float(len(beats))) * .5)
        player.shift_tempo_and_play(beat, ratio)
    player.close_stream()
Пример #27
0
def getClosestBar(filename):
    song0 = audio.LocalAudioFile("dhorse1.wav")
    song = audio.LocalAudioFile(filename)
    sections = song.analysis.sections
    bars = song.analysis.bars
    sectionStart = [q.start for q in sections][1:]
    barStart = [b.start for b in bars]
    VIPBars = []
    for start in sectionStart:
        for i in xrange(len(barStart) - 1):
            if barStart[i] < start and barStart[i + 1] >= start:
                VIPBars += [i]
    #need to split the audio file based on the bar partitiion now
    for barVal in VIPBars:
        if barVal == VIPBars[0]: continue
        smallBar = audio.getpieces(song, bars[barVal - 3:barVal + 3])
        smallBar.encode("smallBar.wav")
        smallBar = audio.LocalAudioFile("smallBar.wav")
        print smallBar.analysis.segments
        return featureAnalysis.main(smallBar, song0)
Пример #28
0
def loadav(videofile, verbose=True):
    foo, audio_file = tempfile.mkstemp(".mp3")        
    cmd = "en-ffmpeg -y -i \"" + videofile + "\" " + audio_file
    if verbose:
        log.info(cmd)
    out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
    res = out.communicate()
    ffmpeg_error_check(res[1])
    a = audio.LocalAudioFile(audio_file)
    v = sequencefrommov(videofile)
    return SynchronizedAV(audio=a, video=v)
Пример #29
0
def main(filename):
    song = audio.LocalAudioFile(filename)

    bpm = extractBPM(song)
    pattern = extractRhythmicPattern(song)
    meter = extractMeter(song)
    rhythm = extractRhythm(song)

    print("This song has " + bpm + " bpm.")
    print("The rhythmic pattern is " + str(pattern))
    print("The time signature of this song is " + str(meter))
    print("The rhythm of this song is " + str(rhythm))
Пример #30
0
def main(toReverse, inputFilename, outputFilename):
    audioFile = audio.LocalAudioFile(inputFilename)
    if toReverse == 'beats':
        chunks = audioFile.analysis.beats
    elif toReverse == 'segments':
        chunks = audioFile.analysis.segments
    else:
        print usage
        return
    chunks.reverse()
    reversedAudio = audio.getpieces(audioFile, chunks)
    reversedAudio.encode(outputFilename)