def find_chorus(path_to_song): track = audio.LocalAudioFile(path_to_song) track_segments = getattr(track.analysis, 'segments') tmp_segments = deepcopy(track_segments) chains = [] ends_of_chains = [] start = 0 other_start = 1 while tmp_segments: chain, start, tmp_segments = comb_segment(tmp_segments, start, other_start) other_start = start + 1 chains.append(chain) ends_of_chains.append(start) #print chains max_chain = max(chains) index = chains.index(max_chain) end_index = ends_of_chains[index] start_index = end_index - max_chain print max_chain, start_index, end_index render(track_segments[start_index:end_index+1], 'findchorus.mp3', True)
def find_chorus_delta(path_to_song): """Attempts to find the chorus (or a suitable position in the song to cut into/play from) for a given song.""" #loads data track = audio.LocalAudioFile(path_to_song) track_segments = getattr(track.analysis, 'segments') #a is where the track will split a = find_split(track_segments) #calculate loudness either side of the split to decide which is chorus split_index = a[1] left_loud, right_loud = 0, 0 for i in range(1,31): left_loud += track_segments[split_index-i].loudness_max right_loud += track_segments[split_index+i].loudness_max print left_loud, right_loud #if chorus lies on the left, find another split within that segment so #output does not start from beginning of song if left_loud > right_loud: print 'left' print split_index left_segments = track_segments[:split_index] b = find_split(left_segments) print b render(track_segments[b[1]:split_index], 'chorusof'+path_to_song, True) #if chorus lies on the right of split, simply play from split_index else: print 'right' render(track_segments[split_index:], 'chorusof'+path_to_song, True)
def main(): usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0] parser = OptionParser(usage=usage) parser.add_option("-o", "--offset", default=0, help="offset where to start counting") parser.add_option("-l", "--low", default=100, help="low tempo") parser.add_option("-H", "--high", default=192, help="high tempo") parser.add_option("-r", "--rate", default=0, help="acceleration rate (try 30)") parser.add_option("-R", "--rubato", default=0, help="rubato on second beat (try 0.2)") parser.add_option("-t", "--tempo", default=0, help="target tempo (try 160)") parser.add_option("-v", "--verbose", action="store_true", help="show results on screen") (options, args) = parser.parse_args() if len(args) < 1: parser.print_help() return -1 verbose = options.verbose # get Echo Nest analysis for this file track = LocalAudioFile(args[0], verbose=verbose) if verbose: print "Waltzifying..." # this is where the work takes place actions = do_work(track, options) if verbose: display_actions(actions) # new name name = os.path.splitext(os.path.basename(args[0])) name = str(name[0] + '_waltz_%d' % int(options.offset) + '.mp3') if verbose: print "Rendering... %s" % name # send to renderer render(actions, name, verbose=verbose) if verbose: print "Success!" return 1
def main(): options, args = get_options(warn=True); actions = do_work(args, options) verbose = bool(options.verbose) if verbose: display_actions(actions) print "Output Duration = %.3f sec" % sum(act.duration for act in actions) print "Rendering..." # Send to renderer render(actions, 'capsule.mp3', verbose) return 1
def main(song_directory,title, artist,num_songs, output_file, effects=False): START = time.time() a = Playlist(song_directory, title, artist, int(num_songs)) a.sort_playlist() a.splice_songs() switch_durations = a.mix_songs() render(a.mix, output_file + '.mp3', verbose=False) if effects: add_effects(switch_durations, output_file) a.show() print '\nTook %f seconds to compile and render playlist\n' %round(time.time()-START, 1)
def main(): options, args = get_options(warn=True); actions = do_work(args, options) verbose = bool(options.verbose) if verbose: display_actions(actions) print "Output Duration = %.3f sec" % sum(act.duration for act in actions) print "Rendering..." # Send to renderer theuser = str(options.the_user) final_file = theuser + ".mp3" print final_file render(actions, final_file, verbose) return 1
def main(): usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0] parser = OptionParser(usage=usage) parser.add_option("-d", "--duration", default=DEF_DUR, help="target duration (argument in seconds) default=600") parser.add_option("-m", "--minimum", default=MIN_JUMP, help="minimal loop size (in beats) default=8") parser.add_option("-i", "--infinite", action="store_true", help="generate an infinite loop (outputs a wav file)") parser.add_option("-l", "--length", action="store_true", help="length must be accurate") parser.add_option("-k", "--pickle", action="store_true", help="output graph as a pickle object") parser.add_option("-g", "--graph", action="store_true", help="output graph as a gml text file") parser.add_option("-p", "--plot", action="store_true", help="output graph as png image") parser.add_option("-f", "--force", action="store_true", help="force (re)computing the graph") parser.add_option("-S", "--shortest", action="store_true", help="output the shortest loop") parser.add_option("-L", "--longest", action="store_true", help="output the longest loop") parser.add_option("-v", "--verbose", action="store_true", help="show results on screen") (options, args) = parser.parse_args() if len(args) < 1: parser.print_help() return -1 verbose = options.verbose track = LocalAudioFile(args[0], verbose=verbose) # this is where the work takes place actions = do_work(track, options) if verbose: display_actions(actions) print "Output Duration = %.3f sec" % sum(act.duration for act in actions) # Send to renderer name = os.path.splitext(os.path.basename(args[0])) # Output wav for loops in order to remain sample accurate if bool(options.infinite) == True: name = name[0]+'_'+str(int(options.duration))+'_loop.wav' elif bool(options.shortest) == True: name = name[0]+'_'+str(int(sum(act.duration for act in actions)))+'_shortest.wav' elif bool(options.longest) == True: name = name[0]+'_'+str(int(sum(act.duration for act in actions)))+'_longest.wav' else: name = name[0]+'_'+str(int(options.duration))+'.mp3' if options.verbose: print "Rendering..." render(actions, name, verbose=verbose) return 1
def reconstruct(self, out, algorithm, verbose=False): # Check that we have loaded track from Echo Nest # Create source dictionary source_dict = {} if self.mashup.track == None: self.mashup.load_track(verbose) for s in self.sources: if s.track == None: s.load_track(verbose) source_dict[s.mp3_name] = s.track if verbose: print("Calculatiing actions in reconstructed mashup...") actions = get_actions(self.labeled, source_dict, verbose) if verbose: print("Found actions: %s" % actions) filename = out+"-"+algorithm+"-reconstructed.mp3" if verbose: print("Rendering reconstructed mashup...") render(actions, filename)
def main(): usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0] parser = OptionParser(usage=usage) parser.add_option("-o", "--offset", default=0, help="offset where to start counting") parser.add_option("-l", "--low", default=100, help="low tempo") parser.add_option("-H", "--high", default=192, help="high tempo") parser.add_option("-r", "--rate", default=0, help="acceleration rate (try 30)") parser.add_option("-R", "--rubato", default=0, help="rubato on second beat (try 0.2)") parser.add_option("-t", "--tempo", default=0, help="target tempo (try 160)") parser.add_option("-v", "--verbose", action="store_true", help="show results on screen") (options, args) = parser.parse_args() if len(args) < 1: parser.print_help() return -1 verbose = options.verbose # get Echo Nest analysis for this file track = LocalAudioFile(args[0], verbose=verbose) if verbose: print "Waltzifying..." # this is where the work takes place actions = do_work(track, options) if verbose: display_actions(actions) # new name name = os.path.splitext(os.path.basename(args[0])) name = str(name[0] + '_waltz_%d' % int(options.offset) +'.mp3') if verbose: print "Rendering... %s" % name # send to renderer render(actions, name, verbose=verbose) if verbose: print "Success!" return 1
def main(): usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0] parser = OptionParser(usage=usage) parser.add_option("-s", "--swing", default=0.33, help="swing factor default=0.33") parser.add_option("-v", "--verbose", action="store_true", help="show results on screen") (options, args) = parser.parse_args() if len(args) < 1: parser.print_help() return -1 verbose = options.verbose track = None track = LocalAudioFile(args[0], verbose=verbose) if verbose: print "Computing swing . . ." # this is where the work takes place actions = do_work(track, options) if verbose: display_actions(actions) # Send to renderer name = os.path.splitext(os.path.basename(args[0])) sign = ('-', '+')[float(options.swing) >= 0] name = name[0] + '_swing' + sign + str(int( abs(float(options.swing)) * 100)) + '.mp3' name = name.replace(' ', '') name = os.path.join(os.getcwd(), name) # TODO: use sys.path[0] instead of getcwd()? if verbose: print "Rendering... %s" % name render(actions, name, verbose=verbose) if verbose: print "Success!" return 1
def crossmatch_sources(self, recompute=False, verbose=False): #[(start,duration),...] by timing rather than node index def to_tuples(graph): return [(d['source'],d['duration']) for s,t,d in graph.edges_iter(data=True)] #for all combinations of source songs for pair in combinations(self.sources, 2): #get crossmatch filename and beats lists s1_s2 = "-".join([pair[0].mp3_path,pair[1].mp3_name,"cross.mp3"]) s2_s1 = "-".join([pair[1].mp3_path,pair[0].mp3_name,"cross.mp3"]) s1_beats, s2_beats = to_tuples(pair[0].graph), to_tuples(pair[1].graph) #use length of min source if len(s1_beats) > len(s2_beats): s1_beats = s1_beats[:len(s2_beats)] elif len(s2_beats) > len(s1_beats): s2_beats = s2_beats[:len(s1_beats)] #check if crossmatch mp3 exists try: f = open(s1_s2) f.close() if verbose: print("Found precomputed crossmatch %s" % s1_s2) if recompute: raise Exception() self.sources.append(Song(s1_s2)) except: try: f = open(s2_s1) f.close() if verbose: print("Found precomputed crossmatch %s" % s2_s1) if recompute: raise Exception() self.sources.append(Song(s2_s1)) #RENDER new crossmatch mp3 except: if verbose and not recompute: print("Found no precomputed crossmatches.") if verbose and recompute: print("Recomputing crossmatches...") #load tracks if pair[0].track == None: pair[0].load_track() if pair[1].track == None: pair[1].load_track() #equalize tracks #TODO beat match to mashup tempo pair[0].track, pair[1].track = equalize_tracks([pair[0].track,pair[1].track]) if verbose: print("Rendering crossmatch %s..." % s1_s2) render([Crossmatch( (pair[0].track,pair[1].track), (s1_beats,s2_beats) )], s1_s2) self.sources.append(Song(s1_s2))
def main(): options, args = get_options(warn=True); actions = do_work(args, options) verbose = bool(options.verbose) stop = bool(options.stop) if verbose: display_actions(actions) print "Output Duration = %.3f sec" % sum(act.duration for act in actions) print "Rendering..." # Send to renderer dts = str(time.time()) if stop: render(actions, 'hmix_%s.mp3' % dts, verbose) else: render(actions, 'captemp.mp3', verbose) return 1
def main(): options, args = get_options(warn=True) actions = do_work(args, options) verbose = bool(options.verbose) stop = bool(options.stop) if verbose: display_actions(actions) print "Output Duration = %.3f sec" % sum(act.duration for act in actions) print "Rendering..." # Send to renderer dts = str(time.time()) if stop: render(actions, 'hmix_%s.mp3' % dts, verbose) else: render(actions, 'captemp.mp3', verbose) return 1
def main(): usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0] parser = OptionParser(usage=usage) parser.add_option("-s", "--swing", default=0.33, help="swing factor default=0.33") parser.add_option("-v", "--verbose", action="store_true", help="show results on screen") (options, args) = parser.parse_args() if len(args) < 1: parser.print_help() return -1 verbose = options.verbose track = None track = LocalAudioFile(args[0], verbose=verbose) if verbose: print "Computing swing . . ." # this is where the work takes place actions = do_work(track, options) if verbose: display_actions(actions) # Send to renderer name = os.path.splitext(os.path.basename(args[0])) sign = ('-','+')[float(options.swing) >= 0] name = name[0] + '_swing' + sign + str(int(abs(float(options.swing))*100)) +'.mp3' name = name.replace(' ','') name = os.path.join(os.getcwd(), name) # TODO: use sys.path[0] instead of getcwd()? if verbose: print "Rendering... %s" % name render(actions, name, verbose=verbose) if verbose: print "Success!" return 1
def main(): # Initiate playlist with a base song and length START = time.time() a = Playlist('song_test', "Love Story - Taylor Swift.mp3", 10) a.sort_playlist() ordering = ['start'] + ['middle']*(len(a.playlist)-2) + ['end'] # Create real_playlist rp = [0] * len(a.playlist) # Comes out to be like [[Tune, 'start'], [Tune, 'middle'], ..., [Tune, 'end']] for i in range(len(a.playlist)): rp[i] = [a.playlist[i], ordering[i], 0, 0] # Print playlist print '' for i in rp: print i[0].songName, i[0].bpm, i[1] print '' # Start the processing output_song = [] # Add starting and ending indices of bars of each song in real_playlist for i in rp: print "Mixing %s" %i[0].songName i[2], i[3] = i[0].choose_jump_point(position=i[1]) # print rp: [[<tune.Tune instance at 0x7f848b1149e0>, 'start', 0, 66], # [<tune.Tune instance at 0x7f8489d7cfc8>, 'middle', 14, 64], # [<tune.Tune instance at 0x7f8489f35ea8>, 'middle', 37, 51], # [<tune.Tune instance at 0x7f8489b340e0>, 'end', 36, 146]] def make_transition(l1, l2): # l1, l2 are lists of order [self.tune, self.position, start_bar_index, end_bar_index] # Start cutting at the final_bar of the first song final_bar = l1[0].bars[l1[3]-1: l1[3]+1] # Cut into the first two bars of the second song first_bar = l2[0].bars[l2[2]: l2[2]+2] # Duration is length of the first two bars of the second song because # it has to line up with the [i+2: end] bars. duration = sum([i.duration for i in first_bar]) # Return iterable cross faded object return make_crossfade(l1[0].tune, l2[0].tune, final_bar[0].start, first_bar[0].start, duration) for i in range(len(rp)): if rp[i][1] == 'start': output_song += rp[i][0].bars[: rp[i][3]-1] if rp[i][1] == 'middle': output_song += rp[i][0].bars[rp[i][2]+2 : rp[i][3]-1] if rp[i][1] == 'end': output_song += rp[i][0].bars[rp[i][2]+2 :] try: output_song += make_transition(rp[i], rp[i+1]) except IndexError: pass render(output_song, 'fullMix.mp3', True) print '\nTook %f seconds to compile and render playlist' %round(time.time()-START, 1)
def save(self, path): render(self.shifted_audio_datas, path)