def get_loops(fileobj, output_temp, inter=8.0, trans=2.0): track = LocalAudioFile(fileobj.name) tracks = [track, track, track] # 3 of em! valid = [] # compute resampled and normalized matrices for track in tracks: track.resampled = resample_features(track, rate='beats') track.resampled['matrix'] = timbre_whiten(track.resampled['matrix']) # remove tracks that are too small if is_valid(track, inter, trans): valid.append(track) # for compatibility, we make mono tracks stereo track = make_stereo(track) tracks = valid if len(tracks) < 1: return [] # Initial transition. Should contain 2 instructions: fadein, and playback. start = initialize(tracks[0], inter, trans) # Middle transitions. Should each contain 2 instructions: crossmatch, playback. middle = [] [middle.extend(make_transition(t1, t2, inter, trans)) for (t1, t2) in tuples(tracks)] # Last chunk. Should contain 1 instruction: fadeout. end = terminate(tracks[-1], FADE_OUT) actions = start + middle + end # output_temp = tempfile.NamedTemporaryFile(mode="w+b", suffix=".mp3") render(actions, output_temp.name, False) # Do it again new_one = audio.LocalAudioFile(output_temp.name) analysis = json.loads(urllib.urlopen(new_one.analysis.pyechonest_track.analysis_url).read()) return (output_temp.name, analysis)
def do_work(audio_files, options): inter = float(options.inter) trans = float(options.transition) order = bool(options.order) equal = bool(options.equalize) verbose = bool(options.verbose) # Get pyechonest/remix objects analyze = lambda x: LocalAudioFile( x, verbose=verbose, sampleRate=44100, numChannels=2) tracks = map(analyze, audio_files) # decide on an initial order for those tracks if order == True: if verbose: print "Ordering tracks..." tracks = order_tracks(tracks) if equal == True: equalize_tracks(tracks) if verbose: print for track in tracks: print "Vol = %.0f%%\t%s" % ( track.gain * 100.0, track.analysis.pyechonest_track.title) print valid = [] # compute resampled and normalized matrices for track in tracks: if verbose: print "Resampling features for", track.analysis.pyechonest_track.title track.resampled = resample_features(track, rate='beats') track.resampled['matrix'] = timbre_whiten(track.resampled['matrix']) # remove tracks that are too small if is_valid(track, inter, trans): valid.append(track) # for compatibility, we make mono tracks stereo track = make_stereo(track) tracks = valid if len(tracks) < 1: return [] # Initial transition. Should contain 2 instructions: fadein, and playback. if verbose: print "Computing transitions..." start = initialize(tracks[0], inter, trans) # Middle transitions. Should each contain 2 instructions: crossmatch, playback. middle = [] [ middle.extend(make_transition(t1, t2, inter, trans)) for (t1, t2) in tuples(tracks) ] # Last chunk. Should contain 1 instruction: fadeout. end = terminate(tracks[-1], FADE_OUT) return start + middle + end
def do_work(audio_files, options): inter = float(options.inter) trans = float(options.transition) order = bool(options.order) equal = bool(options.equalize) verbose = bool(options.verbose) # Get pyechonest/remix objects analyze = lambda x : LocalAudioFile(x, verbose=verbose, sampleRate = 44100, numChannels = 2) tracks = map(analyze, audio_files) # decide on an initial order for those tracks if order == True: if verbose: print "Ordering tracks..." tracks = order_tracks(tracks) if equal == True: equalize_tracks(tracks) if verbose: print for track in tracks: print "Vol = %.0f%%\t%s" % (track.gain*100.0, track.analysis.pyechonest_track.title) print valid = [] # compute resampled and normalized matrices for track in tracks: if verbose: print "Resampling features for", track.analysis.pyechonest_track.title track.resampled = resample_features(track, rate='beats') track.resampled['matrix'] = timbre_whiten(track.resampled['matrix']) # remove tracks that are too small if is_valid(track, inter, trans): valid.append(track) # for compatibility, we make mono tracks stereo track = make_stereo(track) tracks = valid if len(tracks) < 1: return [] # Initial transition. Should contain 2 instructions: fadein, and playback. if verbose: print "Computing transitions..." start = initialize(tracks[0], inter, trans) # Middle transitions. Should each contain 2 instructions: crossmatch, playback. middle = [] [middle.extend(make_transition(t1, t2, inter, trans)) for (t1, t2) in tuples(tracks)] # Last chunk. Should contain 1 instruction: fadeout. end = terminate(tracks[-1], FADE_OUT) return start + middle + end
def process(self, track): if not hasattr(track.analysis.pyechonest_track, "title"): setattr(track.analysis.pyechonest_track, "title", track._metadata.title) log.info("Resampling features...", uid=track._metadata.id) track.resampled = resample_features(track, rate='beats') track.resampled['matrix'] = timbre_whiten(track.resampled['matrix']) if not is_valid(track, self.transition_time): raise ValueError("Track too short!") track.gain = self.__db_2_volume(track.analysis.loudness) log.info("Done processing.", uid=track._metadata.id) return track
def process(self, track): if not hasattr(track.analysis.pyechonest_track, "title"): setattr(track.analysis.pyechonest_track, "title", track._metadata.track_details['title']) log.info("Resampling features [%r]...", track._metadata.id) if len(track.analysis.beats): track.resampled = resample_features(track, rate='beats') track.resampled['matrix'] = timbre_whiten(track.resampled['matrix']) else: log.info("no beats returned for this track.") track.resampled = {"rate":'beats', "matrix": []} track.gain = self.__db_2_volume(track.analysis.loudness) log.info("Done processing [%r].", track._metadata.id) return track
def get_loops(fileobj, output_temp, inter=8.0, trans=2.0): track = LocalAudioFile(fileobj.name) tracks = [track, track, track] # 3 of em! valid = [] # compute resampled and normalized matrices for track in tracks: track.resampled = resample_features(track, rate='beats') track.resampled['matrix'] = timbre_whiten(track.resampled['matrix']) # remove tracks that are too small if is_valid(track, inter, trans): valid.append(track) # for compatibility, we make mono tracks stereo track = make_stereo(track) tracks = valid if len(tracks) < 1: return [] # Initial transition. Should contain 2 instructions: fadein, and playback. start = initialize(tracks[0], inter, trans) # Middle transitions. Should each contain 2 instructions: crossmatch, playback. middle = [] [ middle.extend(make_transition(t1, t2, inter, trans)) for (t1, t2) in tuples(tracks) ] # Last chunk. Should contain 1 instruction: fadeout. end = terminate(tracks[-1], FADE_OUT) actions = start + middle + end # output_temp = tempfile.NamedTemporaryFile(mode="w+b", suffix=".mp3") render(actions, output_temp.name, False) # Do it again new_one = audio.LocalAudioFile(output_temp.name) analysis = json.loads( urllib.urlopen(new_one.analysis.pyechonest_track.analysis_url).read()) return (output_temp.name, analysis)