def track_attributes(self, item): spotify_uri = item['track']['uri'] spotify_track_name = item['track']['name'] spotify_album_name = item['track']['album']['name'] spotify_album_cover_url = item['track']['album']['images'][0]['url'] try: t = track.track_from_id(spotify_uri) res = { 'name': spotify_track_name, 'spotify_uri': spotify_uri, 'spotify_album_cover_url': spotify_album_cover_url, 'spotify_album_name': spotify_album_name, 'echonest_id': t.id, 'speechiness': t.speechiness, 'key': t.key, 'energy': t.energy, 'liveness': t.liveness, 'tempo': t.tempo, 'acousticness': t.acousticness, 'instrumentalness': t.instrumentalness, 'mode': t.mode, 'time_signature': t.time_signature, 'duration': t.duration, 'loudness': t.loudness, 'valence': t.valence, 'danceability': t.danceability } return res except: print("Error occurred finding track '" + spotify_uri + "' using Echonest.")
def dump_loudness(id): t = track.track_from_id(id) print "# ID ", id print "#", t.title, 'by', t.artist print "#" weighted = [] half_track = t.duration / 2 first_half = 0 second_half = 0 for seg in t.segments: loudness = seg['loudness_max'] weighted.append(loudness) if len(weighted) > WEIGHT: weighted.pop(0) avg = sum(weighted) / len(weighted) seg_loudness = seg['loudness_max'] * seg['duration'] if seg['start'] < half_track: first_half += seg_loudness else: second_half += seg_loudness ramp_factor = second_half / half_track - first_half / half_track #print seg['start'], loudness, avg, first_half, second_half, ramp_factor print "%8.6f %9.4f %9.4f %12.6f %12.6f %12.6f" % (seg['start'], loudness, avg, first_half, second_half, ramp_factor) print "#" print "#", 'ramp factor', ramp_factor
def plot(self): """Plot song data""" songs = echosong.search(title=self.title, artist=self.artist, results=1) if songs: # Search for the track through all catalogues for catalog in CATALOGS: # Fails when the API Key can't access a catalogue try: tracks = songs[0].get_tracks(catalog) except EchoNestAPIError: continue # Get track or go to next catalogue if not tracks: continue track = echotrack.track_from_id(tracks[0]['id']) # Adjust start and end self.start = self.start < track.duration and self.start or 0 self.end = self.end < track.duration and self.end or track.duration # Get full aoustic analysis track.get_analysis() # Loudness (from segments) x = np.array([segment.get('start') for segment in track.segments if self._inrange(segment)]) y = np.array([segment.get('loudness_max') for segment in track.segments if self._inrange(segment)]) plt.plot(x, y) plt.xlabel('Duration') plt.ylabel('Loudness') # Use sections as a grid [plt.axvline(section.get('start')+section.get('duration'), color='k', linestyle='--') for section in track.sections] # Add caption and show plot plt.suptitle('%s - %s' %(self.artist, self.title)) plt.show() return
def getAnalysis(artist_name, track_name): """ pulls audio features that are stored, dumps in json format """ track_id, terms = getTrackID(artist_name, track_name) # if the track is not found if track_id == 0: g = open("./output/failed.txt", "a") g.write(str(artist_name) + ", " + str(track_name) + " does not exist") return #pull audio analsis tmp = track.track_from_id(track_id) if not tmp: return tmp.get_analysis() filename = "./output/json/" + track_name + ".json" filename.replace(" ", "") with open(filename, 'w') as f: json.dump( { 'artist': artist_name, 'track': track_name, 'terms': terms, 'features': data }, f)
def analyzeTracks(songToTrack): """ Creates a list of songs for training. Limit allows us to get a smaller dataset for testing purposes. Current dataset is based on [Artist, Tempo, Danceability, Energy, Speech, Acoustic] """ data = [] for song, trackInfo in songToTrack.items(): trackId, artist, songName = trackInfo while True: try: t = echotrack.track_from_id(trackId) t.get_analysis() tempo = t.tempo dance = t.danceability energy = t.energy speech = t.speechiness acoustic = t.acousticness data.append([song, artist, tempo, dance, energy, speech, acoustic]) except echoutil.EchoNestAPIError: # We exceeded our access limit print "too many accesses per minute - retry in a minute" time.sleep(60) continue except IndexError: # The song wasn't found on echo nest print "index error - skip" break except echoutil.EchoNestIOError: # Unknown error from echo nest print "unknown error - retry" continue break # retry request return data
def get_best_audio_track(song_id, catalog, pysong=None): """ Get the best (audio) track for a given song in the given catalog. Returns a dict with {'track': ..., 'duration': ..., etc.} """ if not pysong: pysong = song.Song(song_id) tracks = pysong.get_tracks(catalog) best_track = None for t in tracks: pytrack = track.track_from_id(t['id']) if pytrack.__dict__.get('tempo'): # track has audio features best_track = pytrack foreign_id = t['foreign_id'] break if best_track: result = {'track': best_track.id, 'title': best_track.title, 'duration': best_track.duration, 'tempo': pysong.audio_summary['tempo'], 'energy': pysong.audio_summary['energy'], 'danceability': pysong.audio_summary['danceability'], 'loudness': best_track.loudness, 'foreign_id': foreign_id.split(':')[2], 'hotttnesss': pysong.song_hotttnesss, 'familiarity': pysong.artist_familiarity, 'song': pysong.id } return result else: return None
def create_track_file_from_trackid(maindir, trackid, song, artist, mbconnect=None): """ Get a track from a track id and calls for its creation. We assume we already have song and artist. We can have a connection to musicbrainz as an option. This function should create only one file! GOAL: mostly, it checks if we have the track already created before calling EchoNest API. It saves some calls/time Also, handles some errors. INPUT maindir - MillionSongDataset root directory trackid - Echo Nest track ID (string: TRABC.....) song - pyechonest song object for that track artist - pyechonest artist object for that song/track mbconnect - open musicbrainz pg connection RETURN true if a song file is created, false otherwise """ # CLOSED CREATION? if CREATION_CLOSED: return False # do we already have this track in the dataset? track_path = os.path.join(maindir, path_from_trackid(trackid)) if os.path.exists(track_path): return False # get that track! try_cnt = 0 while True: try: try_cnt += 1 track = trackEN.track_from_id(trackid) break except KeyboardInterrupt: close_creation() raise except urllib2.HTTPError, e: print type(e), ':', e print 'we dont retry for that error, trackid=', trackid, '(pid=' + str( os.getpid()) + ')' return False except Exception, e: print type(e), ':', e print 'at time', time.ctime( ), 'in create_track_file_from_trackid, tid=', trackid, '(we wait', SLEEPTIME, 'seconds) (pid=' + str( os.getpid()) + ')' if try_cnt < 50: time.sleep(SLEEPTIME) continue else: print 'we give up after', try_cnt, 'tries.' return False
def create_track_file_from_trackid(maindir,trackid,song,artist,mbconnect=None): """ Get a track from a track id and calls for its creation. We assume we already have song and artist. We can have a connection to musicbrainz as an option. This function should create only one file! GOAL: mostly, it checks if we have the track already created before calling EchoNest API. It saves some calls/time Also, handles some errors. INPUT maindir - MillionSongDataset root directory trackid - Echo Nest track ID (string: TRABC.....) song - pyechonest song object for that track artist - pyechonest artist object for that song/track mbconnect - open musicbrainz pg connection RETURN true if a song file is created, false otherwise """ # CLOSED CREATION? if CREATION_CLOSED: return False # do we already have this track in the dataset? track_path = os.path.join(maindir,path_from_trackid(trackid)) if os.path.exists(track_path): return False # get that track! try_cnt = 0 while True: try: try_cnt += 1 track = trackEN.track_from_id(trackid) break except KeyboardInterrupt: close_creation() raise except urllib.error.HTTPError as e: print(type(e),':',e) print('we dont retry for that error, trackid=',trackid,'(pid='+str(os.getpid())+')') return False except Exception as e: print(type(e),':',e) print('at time',time.ctime(),'in create_track_file_from_trackid, tid=',trackid,'(we wait',SLEEPTIME,'seconds) (pid='+str(os.getpid())+')') if try_cnt < 50: time.sleep(SLEEPTIME) continue else: print('we give up after',try_cnt,'tries.') return False # we have everything, launch create track file res = create_track_file(maindir,trackid,track,song,artist,mbconnect=mbconnect) return res
def analyze(self, item): log.info("analyze") # md5 = '96fa0180d225f14e9f8cbfffbf5eb81d' t = None if item.echonest_id: try: log.debug("query by echonest id: %s" % item.echonest_id) t = track.track_from_id(item.echonest_id) except EchoNestAPIError, e: print e
def track_attributes(self, spotify_uri): try: t = track.track_from_id(spotify_uri) res = { 'danceability': t.danceability, 'loudness': t.loudness, 'energy': t.energy, 'speechiness': t.speechiness, 'liveness': t.liveness, 'acousticness': t.acousticness, 'instrumentalness': t.instrumentalness } return res except: return { 'error': "Error occurred finding track '" + spotify_uri + "'." }
def __init__(self, name, track_ids=None): self.name = name # Add it to the dictionary containing all the keystreams total[name] = self # Get some random tracks from the specified catalog UNLESS track_ids is specified if track_ids: # FIXME make sure list contains no empty strings in the first place self.tracks = [track.track_from_id(track_id) for track_id in track_ids if track_id != ""] else: self.tracks = echonest.get_catalog_tracks() tracks_data = [echonest.get_track_data(each_track) for each_track in self.tracks] # This should be fixed - don't save the raw track data, just save the track names etc self.bitstream = "0".join(["0".join(x) for x in utils.do_something_weird(tracks_data)]) self.encrypt_pointer = 0 self.decrypt_pointer = 0 repickle()
def getSegments(): # import track t = track.track_from_id('TRJQYJG14AD4E8352A') # get the track analysis t.get_analysis() # prints the metadata t.meta #access to the tatums list #print t.tatums segs = t.segments for seg in segs: print seg['timbre']
def track_with_file(filename, mp3=None, track_id=None): if track_id: nest_track = track.track_from_id(track_id) else: nest_track = track.track_from_filename(mp3 or filename) audio_track = audiolab.Sndfile(filename) nest_track.samplerate = rate = audio_track.samplerate nest_track.nframes = audio_track.nframes cur_frame = 0 for seg in nest_track.segments: num_frames = rate * seg["duration"] if cur_frame != seg["start"] * rate: num_frames += seg["start"] * rate - cur_frame if cur_frame + num_frames > audio_track.nframes: num_frames = audio_track.nframes - cur_frame seg["raw"] = audio_track.read_frames(num_frames) cur_frame += num_frames return nest_track
def get_catalog_tracks(variety=0.8): print "getting catalog tracks" p = playlist.static(type='catalog-radio', seed_catalog=settings.seed_catalog, variety=variety, results=100) # lol print "done getting catalog tracks" tracks = [] while len(tracks) < settings.songs_per_keystream: try: #s = p.get_next_song() s = p.pop() t = track.track_from_id(s.get_tracks('7digital')[0]['id']) tracks.append(t) print t except IndexError: pass if len(p) == 0: p = playlist.static(type='catalog-radio', seed_catalog=settings.seed_catalog, variety=variety, results=100) return tracks
def analyzeTracksNoArtist(songToTrack): """ Creates a list of songs for training. Limit allows us to get a smaller dataset for testing purposes. Current dataset is based on [Tempo, Danceability, Energy] """ data = [] for song, trackIds in songToTrack.items(): if len(trackIds) == 0: continue trackId = trackIds[0] while True: try: t = echotrack.track_from_id(trackId) t.get_analysis() tempo = t.tempo dance = t.danceability energy = t.energy speech = t.speechiness acoustic = t.acousticness data.append([song, tempo, dance, energy, speech, acoustic]) except echoutil.EchoNestAPIError: # We exceeded our access limit print "too many accesses per minute - retry in a minute" time.sleep(60) continue except IndexError: # The song wasn't found on echo nest print "index error - skip" break except echoutil.EchoNestIOError: # Unknown error from echo nest print "unknown error - retry" continue break # retry request # Write to file every 1000 analyzes if len(data) % 1000 == 0: with open("data/analyzed_data.csv", "wb") as f: writer = csv.writer(f) writer.writerows(data) return data
def get_playlist_track_analysis(playlist_tracks): """ Description: Analyzes the audio features of a playlist's tracks through Echonest, given a list of Spotify track IDs. Return: A list of track features for songs in the specified playlist """ set_api_key() analysis = [] for i in range(len(playlist_tracks)): if i % 10 == 0: print 'On %dth track' % i print playlist_tracks[i] try: next_track = track.track_from_id(playlist_tracks[i]) print next_track print '---' except util.EchoNestAPIError: print 'Error extracting track profile: %s' % playlist_tracks[i] pass try: if not contains_all_attributes(next_track): # Try to get analysis, if missing attributes next_track = next_track.get_analysis() analysis.append(next_track) except Exception: print 'Error processing track: %s' % next_track pass time.sleep(3) # limited to 20 access/s if i % 20 == 0 and i > 0: print "Completed %d tracks" % i return analysis
def get_track_features(start, stop): for num, track_info in enumerate(track_list[start:stop]): if num % 20 == 0: time.sleep(15) trk_id = track_info[0] year, song_id, artist, track_name = track_info[1] info_dict = dict() info_dict['year'] = year info_dict['artist'] = artist info_dict['track_name'] = track_name info_dict['track_id'] = trk_id info_dict['song_id'] = song_id tester = 0 try: trk = track.track_from_id(trk_id) info_dict['track_acousticness'] = trk.acousticness info_dict['track_danceability'] = trk.danceability info_dict['track_duration'] = trk.duration info_dict['track_energy'] = trk.energy info_dict['track_instrumentalness'] = trk.instrumentalness info_dict['track_key'] = trk.key info_dict['track_liveness'] = trk.liveness info_dict['track_loudness'] = trk.loudness info_dict['track_mode'] = trk.mode info_dict['track_speechiness'] = trk.speechiness info_dict['track_tempo'] = trk.tempo info_dict['track_time_signature'] = trk.time_signature info_dict['track_valence'] = trk.valence except: print num, ' - track fail' tester += 1 try: sawng = song.Song(song_id) location = sawng.get_artist_location() summary = sawng.get_audio_summary() # location info_dict['latitude'] = location['latitude'] info_dict['longitude'] = location['longitude'] info_dict['city'] = location['location'] # audio summary info_dict['song_danceability'] = summary['danceability'] info_dict['song_duration'] = summary['duration'] info_dict['song_acousticness'] = summary['acousticness'] info_dict['analysis_url'] = summary['analysis_url'] info_dict['song_energy'] = summary['energy'] info_dict['song_instrumentalness'] = summary['instrumentalness'] info_dict['song_key'] = summary['key'] info_dict['song_liveness'] = summary['liveness'] info_dict['song_loudness'] = summary['loudness'] info_dict['song_mode'] = summary['mode'] info_dict['speechiness'] = summary['speechiness'] info_dict['song_tempo'] = summary['tempo'] info_dict['song_time_signature'] = summary['time_signature'] info_dict['song_valence'] = summary['valence'] # hotness, familiarity, and type info_dict['song_type'] = sawng.get_song_type() info_dict['artist_familiarity'] = sawng.get_artist_familiarity() info_dict['artist_hotttnesss'] = sawng.get_artist_hotttnesss() info_dict['song_currency'] = sawng.get_song_currency() info_dict['song_hotttnesss'] = sawng.get_song_hotttnesss() info_dict['artist_id'] = str(sawng.artist_id) except: print num, ' - song fail' tester += 1 if tester < 2: trackDict[trk_id] = info_dict else: pass
def dump_loudness(id, file=sys.stdout): t = track.track_from_id(id) title = t.title + ' by ' + t.artist spotify_id = get_spotify_id(t.artist, t.title) print >>file, "# ID ", id print >>file, "#", title print >>file, "# ARTIST ", t.artist print >>file, "# TITLE ", t.title print >>file, "# SONG_ID ", t.song_id print >>file, "# SPOT_ID ", spotify_id print >>file, "#" weighted = [] half_track = t.duration / 2 first_half = 0 second_half = 0 xdata = [] ydata = [] for seg in t.segments: sstart = seg['start'] sloudness = min(seg['loudness_max'], 0) sloudness = max(sloudness, -60) sduration = seg['duration'] send = sstart + sduration weighted.append(sloudness) if len(weighted) > WEIGHT: weighted.pop(0) avg = sum(weighted) / len(weighted) if send <= half_track: seg_loudness = sloudness * sduration first_half += seg_loudness elif sstart < half_track and send > half_track: # this is the nasty segment that spans the song midpoint. # apportion the loudness appropriately first_seg_loudness = sloudness * (half_track - sstart) first_half += first_seg_loudness second_seg_loudness = sloudness * (send - half_track) second_half += second_seg_loudness else: seg_loudness = sloudness * sduration second_half += seg_loudness xdata.append( sstart ) ydata.append( sloudness ) ramp_factor = second_half / half_track - first_half / half_track #print >>file, seg['start'], sloudness, avg, first_half, second_half, ramp_factor #print >>file, "%8.6f %9.4f %9.4f %12.6f %12.6f %12.6f" % (sstart, sloudness, avg, first_half, second_half, ramp_factor) print >>file, "%8.6f %9.4f %9.4f" % (sstart, sloudness, avg) correlation = pearsonr(xdata, ydata) print >>file, "#" print >>file, "#", 'ramp factor', ramp_factor print >>file, "#", 'correlation', correlation print >>file, "#", 'first', first_half / half_track print >>file, "#", 'second', second_half / half_track print >>file, "#" return title, ramp_factor, first_half / half_track, second_half / half_track
def echo_nest_update(): """ Updates the json with all EchoNest data available for this song """ from pyechonest import config config.ECHO_NEST_API_KEY = ECHO_NEST_API_KEY config.CALL_TIMEOUT = 60 while True: json_data = yield if json_data == STOP: break if json_data.get('echo_nest', ''): continue json_data['echo_nest'] = {} track_title = '' artist_name = '' if json_data.get('lastfm', ''): track_title = json_data['lastfm'].get('track', '') artist_name = json_data['lastfm'].get('artist', '') if not track_title: track_title = json_data['id3'].get('title', '') if not artist_name: artist_name = json_data['id3'].get('artist', '') a = None try: if artist_name: a = artist.Artist(artist_name, buckets=['biographies', 'blogs', 'doc_counts', 'familiarity', 'hotttnesss', 'genre', 'artist_location', 'news', 'reviews', 'urls', 'years_active']) json_data['echo_nest']['artist_id'] = a.id time.sleep(1) json_data['echo_nest']['artist'] = a.name time.sleep(1) json_data['echo_nest']['bios'] = a.biographies time.sleep(1) json_data['echo_nest']['blogs'] = a.blogs time.sleep(1) json_data['echo_nest']['doc_counts'] = a.doc_counts time.sleep(1) json_data['echo_nest']['a_familiarity'] = a.familiarity time.sleep(1) json_data['echo_nest']['a_hotttnesss'] = a.hotttnesss time.sleep(1) json_data['echo_nest']['news'] = a.news time.sleep(1) json_data['echo_nest']['reviews'] = a.reviews time.sleep(1) json_data['echo_nest']['urls'] = a.urls time.sleep(1) json_data['echo_nest']['years_active'] = a.years_active time.sleep(1) json_data['echo_nest']['similar'] = [str(sim.name) for sim in a.get_similar()] time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) if a and track_title: try: results = song.search(artist=a.name, title=track_title, buckets=['audio_summary', 'song_hotttnesss', 'song_discovery']) time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) if results: json_data['echo_nest']['id'] = results[0].id time.sleep(1) json_data['echo_nest']['summary'] =\ results[0].audio_summary time.sleep(1) json_data['echo_nest']['s_hotttnesss'] =\ results[0].song_hotttnesss time.sleep(1) json_data['echo_nest']['s_discovery'] =\ results[0].song_discovery time.sleep(1) time.sleep(1) tr = None if json_data['echo_nest'].get('id', ''): try: tr = track.track_from_id(json_data['echo_nest']['id']) time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) if not tr: continue try: tr.get_analysis() time.sleep(1) json_data['echo_nest']['analysis'] = {} json_data['echo_nest']['analysis']['acousticness'] =\ tr.acousticness json_data['echo_nest']['analysis']['analysis_url'] =\ tr.analysis_url json_data['echo_nest']['analysis']['danceability'] =\ tr.danceability json_data['echo_nest']['analysis']['duration'] =\ tr.duration json_data['echo_nest']['analysis']['energy'] = tr.energy json_data['echo_nest']['analysis']['key'] = tr.key json_data['echo_nest']['analysis']['liveness'] =\ tr.liveness json_data['echo_nest']['analysis']['loudness'] =\ tr.loudness json_data['echo_nest']['analysis']['mode'] = tr.mode json_data['echo_nest']['analysis']['speechiness'] =\ tr.speechiness json_data['echo_nest']['analysis']['tempo'] =\ tr.tempo json_data['echo_nest']['analysis']['time_signature'] =\ tr.time_signature json_data['echo_nest']['analysis']['valence'] = tr.valence json_data['echo_nest']['analysis']['analysis_channels'] =\ tr.analysis_channels json_data['echo_nest']['analysis']['bars'] = tr.bars json_data['echo_nest']['analysis']['beats'] = tr.beats json_data['echo_nest']['analysis']['start_of_fade_out'] =\ tr.start_of_fade_out json_data['echo_nest']['analysis']['end_of_fade_in'] =\ tr.end_of_fade_in json_data['echo_nest']['analysis']['key_confidence'] =\ tr.key_confidence json_data['echo_nest']['analysis']['meta'] = tr.meta json_data['echo_nest']['analysis']['mode_confidence'] =\ tr.mode_confidence json_data['echo_nest']['analysis']['num_samples'] =\ tr.num_samples json_data['echo_nest']['analysis']['sections'] =\ tr.sections json_data['echo_nest']['analysis']['segments'] =\ tr.segments json_data['echo_nest']['analysis']['synchstring'] =\ tr.synchstring json_data['echo_nest']['analysis']['tatums'] =\ tr.tatums json_data['echo_nest']['analysis']['tempo_confidence'] =\ tr.tempo_confidence json_data['echo_nest']['analysis']['sign_confidence'] =\ tr.time_signature_confidence except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass except Exception as e: print(e) time.sleep(1) if a: try: json_data['echo_nest']['basic_song_list'] =\ ['{} - {}'.format(s.artist_name, s.title) for s in playlist.basic(type='song-radio', artist_id=a.id, song_id=tr.id)] time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) try: json_data['echo_nest']['basic_artist_list'] =\ ['{} - {}'.format(s.artist_name, s.title) for s in playlist.basic(artist_id=a.id, song_id=tr.id)] time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1)
def echo_nest_update(): """ Updates the json with all EchoNest data available for this song ('Echo Nest API Error 5: bucket - Invalid parameter: bucket "id" is not one of "audio", "biographies", "blogs", "doc_counts", "familiarity", "familiarity_rank", "genre", "hotttnesss", "hotttnesss_rank", "discovery", "discovery_rank", "images", "artist_location", "news", "reviews", "songs", "terms", "urls", "video", "years_active", "id:7digital-US", "id:7digital-AU", "id:7digital-UK", "id:facebook", "id:fma", "id:emi_bluenote", "id:emi_artists", "id:twitter", "id:spotify-WW", "id:seatwave", "id:lyricfind-US", "id:jambase", "id:musixmatch-WW", "id:rdio-US", "id:rdio-AT", "id:rdio-AU", "id:rdio-BR", "id:rdio-CA", "id:rdio-CH", "id:rdio-DE", "id:rdio-DK", "id:rdio-ES", "id:rdio-FI", "id:rdio-FR", "id:rdio-IE", "id:rdio-IT", "id:rdio-NL", "id:rdio-NO", "id:rdio-NZ", "id:rdio-PT", "id:rdio-SE", "id:emi_electrospective", "id:rdio-EE", "id:rdio-LT", "id:rdio-LV", "id:rdio-IS", "id:rdio-BE", "id:rdio-MX", "id:seatgeek", "id:rdio-GB", "id:rdio-CZ", "id:rdio-CO", "id:rdio-PL", "id:rdio-MY", "id:rdio-HK", "id:rdio-CL", "id:twitter_numeric", "id:7digital-ES", "id:openaura", "id:spotify", "id:spotify-WW", "id:tumblr", or "id:<CATALOG ID>" """ from pyechonest import config config.ECHO_NEST_API_KEY = ECHO_NEST_API_KEY config.CALL_TIMEOUT = 60 while True: json_data = yield if json_data == STOP: break if json_data.get('echo_nest', ''): continue json_data['echo_nest'] = {} track_title = '' artist_name = '' if json_data.get('lastfm', ''): track_title = json_data['lastfm'].get('track', '') artist_name = json_data['lastfm'].get('artist', '') if not track_title: track_title = json_data['id3'].get('title', '') if not artist_name: artist_name = json_data['id3'].get('artist', '') a = None try: if artist_name: a = artist.Artist(artist_name, buckets=['biographies', 'blogs', 'doc_counts', 'familiarity', 'hotttnesss', 'genre', 'artist_location', 'news', 'reviews', 'urls', 'years_active']) json_data['echo_nest']['artist_id'] = a.id time.sleep(1) json_data['echo_nest']['artist'] = a.name time.sleep(1) json_data['echo_nest']['bios'] = a.biographies time.sleep(1) json_data['echo_nest']['blogs'] = a.blogs time.sleep(1) json_data['echo_nest']['doc_counts'] = a.doc_counts time.sleep(1) json_data['echo_nest']['a_familiarity'] = a.familiarity time.sleep(1) json_data['echo_nest']['a_hotttnesss'] = a.hotttnesss time.sleep(1) json_data['echo_nest']['news'] = a.news time.sleep(1) json_data['echo_nest']['reviews'] = a.reviews time.sleep(1) json_data['echo_nest']['urls'] = a.urls time.sleep(1) json_data['echo_nest']['years_active'] = a.years_active time.sleep(1) json_data['echo_nest']['similar'] = [str(sim.name) for sim in a.get_similar()] time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) if a and track_title: try: results = song.search(artist=a.name, title=track_title, buckets=['audio_summary', 'song_hotttnesss', 'song_discovery']) time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) if results: json_data['echo_nest']['id'] = results[0].id time.sleep(1) json_data['echo_nest']['summary'] =\ results[0].audio_summary time.sleep(1) json_data['echo_nest']['s_hotttnesss'] =\ results[0].song_hotttnesss time.sleep(1) json_data['echo_nest']['s_discovery'] =\ results[0].song_discovery time.sleep(1) time.sleep(1) tr = None if json_data['echo_nest'].get('id', ''): try: tr = track.track_from_id(json_data['echo_nest']['id']) time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) if not tr: continue try: tr.get_analysis() time.sleep(1) json_data['echo_nest']['analysis'] = {} json_data['echo_nest']['analysis']['acousticness'] =\ tr.acousticness json_data['echo_nest']['analysis']['analysis_url'] =\ tr.analysis_url json_data['echo_nest']['analysis']['danceability'] =\ tr.danceability json_data['echo_nest']['analysis']['duration'] =\ tr.duration json_data['echo_nest']['analysis']['energy'] = tr.energy json_data['echo_nest']['analysis']['key'] = tr.key json_data['echo_nest']['analysis']['liveness'] =\ tr.liveness json_data['echo_nest']['analysis']['loudness'] =\ tr.loudness json_data['echo_nest']['analysis']['mode'] = tr.mode json_data['echo_nest']['analysis']['speechiness'] =\ tr.speechiness json_data['echo_nest']['analysis']['tempo'] =\ tr.tempo json_data['echo_nest']['analysis']['time_signature'] =\ tr.time_signature json_data['echo_nest']['analysis']['valence'] = tr.valence json_data['echo_nest']['analysis']['analysis_channels'] =\ tr.analysis_channels json_data['echo_nest']['analysis']['bars'] = tr.bars json_data['echo_nest']['analysis']['beats'] = tr.beats json_data['echo_nest']['analysis']['start_of_fade_out'] =\ tr.start_of_fade_out json_data['echo_nest']['analysis']['end_of_fade_in'] =\ tr.end_of_fade_in json_data['echo_nest']['analysis']['key_confidence'] =\ tr.key_confidence json_data['echo_nest']['analysis']['meta'] = tr.meta json_data['echo_nest']['analysis']['mode_confidence'] =\ tr.mode_confidence json_data['echo_nest']['analysis']['num_samples'] =\ tr.num_samples json_data['echo_nest']['analysis']['sections'] =\ tr.sections json_data['echo_nest']['analysis']['segments'] =\ tr.segments json_data['echo_nest']['analysis']['synchstring'] =\ tr.synchstring json_data['echo_nest']['analysis']['tatums'] =\ tr.tatums json_data['echo_nest']['analysis']['tempo_confidence'] =\ tr.tempo_confidence json_data['echo_nest']['analysis']['sign_confidence'] =\ tr.time_signature_confidence except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass except Exception as e: print(e) time.sleep(1) if a: try: json_data['echo_nest']['basic_song_list'] =\ ['{} - {}'.format(s.artist_name, s.title) for s in playlist.basic(type='song-radio', artist_id=a.id, song_id=tr.id)] time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) try: json_data['echo_nest']['basic_artist_list'] =\ ['{} - {}'.format(s.artist_name, s.title) for s in playlist.basic(artist_id=a.id, song_id=tr.id)] time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1)
def play_segment(segment): audiolab.play(segment["raw"].mean(axis=1)) def track_with_file(filename, mp3=None, track_id=None): if track_id: nest_track = track.track_from_id(track_id) else: nest_track = track.track_from_filename(mp3 or filename) audio_track = audiolab.Sndfile(filename) nest_track.samplerate = rate = audio_track.samplerate nest_track.nframes = audio_track.nframes cur_frame = 0 for seg in nest_track.segments: num_frames = rate * seg["duration"] if cur_frame != seg["start"] * rate: num_frames += seg["start"] * rate - cur_frame if cur_frame + num_frames > audio_track.nframes: num_frames = audio_track.nframes - cur_frame seg["raw"] = audio_track.read_frames(num_frames) cur_frame += num_frames return nest_track if __name__ == "__main__": chad = track.track_from_id(u'TRP8KVE11BC6E02570') #Chad VanGaalen - Willow Tree #print "%s - %s"%(chad.artist, chad.title) for seg in isegments(chad,min_confidence=0.85): #print "[%7.2f -> %7.2f] %s"%(seg["start"], seg["end"], seg["keys"]) print """<span data-start="%0.2f" data-stop="%0.2f">%s</span>"""%(seg["start"], seg["end"], ", ".join(seg["keys"].keys()))
def main(argv): """main function for standalone usage""" usage = "usage: %prog [options] dir dbfile" parser = OptionParser(usage=usage) parser.add_option('-r', '--replace', help='Replace existing BPM/key ID3 tags', default=False, action='store_true') (options, args) = parser.parse_args(argv) if len(args) != 2: parser.print_help() return 2 # do stuff mp3s = rwalk(args[0], '*.mp3') # Initialize shelve db. Check for existing 'maxkey'. db = shelve.open(args[1]) try: idx = db['maxkey'] print('%sAppending to existing database at index %s%s' % (GREEN, idx, ENDC)) except KeyError: print('%sStarting new database%s' % (GREEN, ENDC)) idx = u'0' try: for mp3 in mp3s: tags = ID3(mp3) # Skip already analyzed files if not options.replace and 'TXXX:mashupid' in tags: continue # Skip big files if os.path.getsize(mp3) > MAXSIZE: continue try: echosong = retry(audio.LocalAnalysis, [mp3], (socket.error,), sleep=30) except Exception: # This is to handle the LocalAnalysis call in the event it fails for # reasons out of my control. Print an error message and move on. When # you have time you should change the library's source to use something # smarter, say a specialized exception: AnalysisPendingException, etc. sys.stderr.write('%sSong analysis failed, skipping %s%s\n' % (RED, mp3, ENDC)) continue moreinfo = track_from_id(echosong.analysis.identifier) # Add main tags tags.add(TBPM(encoding=1, text=unicode(round(echosong.analysis.tempo['value'])))) try: tags.add(TKEY(encoding=1, text=key(echosong))) except KeyError: sys.stderr.write('%sIncorrect key info; key: %d, mode: %d%s\n' % (RED, echosong.analysis.key['value'], echosong.analysis.mode['value'], ENDC)) tags.add(TXXX(encoding=3, desc=u'danceability', text=unicode(moreinfo.danceability))) tags.add(TXXX(encoding=3, desc=u'energy', text=unicode(moreinfo.energy))) tags.add(TXXX(encoding=3, desc=u'loudness', text=unicode(moreinfo.loudness))) # Update entry in DB if it exists, or... if 'TXXX:mashupid' in tags: db[tags.get('TXXX:mashupid').text[0]] = echosong else: # create a new entry tags.add(TXXX(encoding=3, desc=u'mashupid', text=idx)) db[idx] = echosong idx = unicode(int(idx) + 1) tags.save() # So we don't hammer their servers print('%sFinished analyzing %s, sleeping...%s' % (GREEN, mp3, ENDC)) sleep(randint(0, 3)) except KeyboardInterrupt: sys.stderr.write('User termination, exiting...\n') finally: # Update 'maxkey'. db['maxkey'] = idx db.close() print('Done!')
def dump_loudness(id, file=sys.stdout): t = track.track_from_id(id) title = t.title + ' by ' + t.artist spotify_id = get_spotify_id(t.artist, t.title) print >> file, "# ID ", id print >> file, "#", title print >> file, "# ARTIST ", t.artist print >> file, "# TITLE ", t.title print >> file, "# SONG_ID ", t.song_id print >> file, "# SPOT_ID ", spotify_id print >> file, "#" weighted = [] half_track = t.duration / 2 first_half = 0 second_half = 0 xdata = [] ydata = [] for seg in t.segments: sstart = seg['start'] sloudness = min(seg['loudness_max'], 0) sloudness = max(sloudness, -60) sduration = seg['duration'] send = sstart + sduration weighted.append(sloudness) if len(weighted) > WEIGHT: weighted.pop(0) avg = sum(weighted) / len(weighted) if send <= half_track: seg_loudness = sloudness * sduration first_half += seg_loudness elif sstart < half_track and send > half_track: # this is the nasty segment that spans the song midpoint. # apportion the loudness appropriately first_seg_loudness = sloudness * (half_track - sstart) first_half += first_seg_loudness second_seg_loudness = sloudness * (send - half_track) second_half += second_seg_loudness else: seg_loudness = sloudness * sduration second_half += seg_loudness xdata.append(sstart) ydata.append(sloudness) ramp_factor = second_half / half_track - first_half / half_track #print >>file, seg['start'], sloudness, avg, first_half, second_half, ramp_factor #print >>file, "%8.6f %9.4f %9.4f %12.6f %12.6f %12.6f" % (sstart, sloudness, avg, first_half, second_half, ramp_factor) print >> file, "%8.6f %9.4f %9.4f" % (sstart, sloudness, avg) correlation = pearsonr(xdata, ydata) print >> file, "#" print >> file, "#", 'ramp factor', ramp_factor print >> file, "#", 'correlation', correlation print >> file, "#", 'first', first_half / half_track print >> file, "#", 'second', second_half / half_track print >> file, "#" return title, ramp_factor, first_half / half_track, second_half / half_track
def analyzeTracksNoArtist(songToTrack): """ Creates a list of songs for training. Limit allows us to get a smaller dataset for testing purposes. Current dataset is based on [SongId, Tempo, Danceability, Energy, Speech, Acoutsicness] """ data = [] skipped = 0 for song, trackIds in songToTrack.items(): if len(trackIds) == 0: continue trackId = trackIds[0] retryCount = 0 while True: if retryCount == 10: print "Retried 10 times - skipping" skipped += 1 break try: t = echotrack.track_from_id(trackId) t.get_analysis() tempo = t.tempo dance = t.danceability energy = t.energy speech = t.speechiness acoustic = t.acousticness data.append([song, tempo, dance, energy, speech, acoustic]) except echoutil.EchoNestAPIError: # We exceeded our access limit print "too many accesses per minute - retry in a minute" time.sleep(60) retryCount += 1 continue except IndexError: # The song wasn't found on echo nest print "index error - skip" skipped += 1 break except echoutil.EchoNestIOError: # Unknown error from echo nest print "IO error - retry" retryCount += 1 continue except Exception: print "unknown exception - skip" skipped += 1 break break # retry request # Write to file every 1000 analyzes if len(data) % 500 == 0: count = len(data) print "total analyzed: ", count with open(write_file, "w") as f: for d in data: # Convert list to string dString = "" for item in d: dString += str(item) + ',' f.write(dString[:-1]) f.write('\n') print "total skipped: ", skipped return data
def getArtists(): songs = request.json['songs'] ### API Key Configuration from pyechonest import config config.ECHO_NEST_API_KEY="YZZS9XI0IMOLQRKQ6" ###################### Getting Artists + Song Info ######################## ## Basic info on the artists and song from the Echonest API based on a spotify ID sent to the ## app. This part also collects any available information on tempo, danceability and energy ## from the Echonest API for use in subsetting the recommended playlist from pyechonest import track artists = [] EchoNestSongId = [] EchoNestArtistId = [] energy = [] dance = [] tempo = [] for i in range(len(songs)): songs[i] = songs[i].replace("spotify:track:", "spotify-WW:track:") try: t = track.track_from_id(songs[i]) EchoNestSongId.append(t.song_id) EchoNestArtistId.append(t.id) artists.append(t.artist) if "energy" in vars(t): energy.append(t.energy) if "danceability" in vars(t): dance.append(t.danceability) if "tempo" in vars(t): tempo.append(t.tempo) except: continue ###################### Terms Per Artist // All Terms ######################## ### Building Terms from Artists artist_dict = {} terms = [] from pyechonest import artist ##Creates a dictionary with artists and terms associated with that artist, also creates a vector of all terms for i in range(len(artists)): artistTerm = artist.Artist(artists[i]).terms find_term = [] limitTerms = 4 for artistT in artistTerm: find_term.append(artistT['name']) limitTerms -= 1 if limitTerms == 0: break artist_dict[artists[i]] = find_term; terms.extend(find_term) ###################### Weights for Terms ######################## ##Creates weights for each term based on the number of times it appeared in artist list terms_dict = {} for i in range(len(terms)): if terms[i] not in terms_dict.items(): find_count = terms.count(terms[i]) terms_dict[terms[i]] = find_count ###################### Weights for Artists ######################## ## Adds all of the weights per term per artist to get the overall weight for an artist ## 5 subtracted from total weight (number of terms used per artist, each guarenteed a count of 5) weights = {} for key in artist_dict: getWeight=0 for value in artist_dict[key]: if value in terms: getWeight = getWeight+terms_dict[value] weights[key] = getWeight #checking weights #for key in weights: # print key +":" + str(weights[key]) #NOTE::: If we ever want to add more than 5 songs, we have to order the dictionary and select top 5 artists of terms. ###################### Finding Similar Artists w/Weights ######################## createURL = "http://developer.echonest.com/api/v4/artist/similar?api_key=YZZS9XI0IMOLQRKQ6" for key in weights: createURL += "&id=" +artist.Artist(key).id +"^" + str(weights[key]) simMake = urllib2.urlopen(createURL); echonestSim = simMake.read(); findSimilarArtist = [] for artist in echonestSim.split(":"): if "name" in artist: artistShrink = artist.split(",")[0] artistShrink= artistShrink.translate(None, string.punctuation) artistShrink = artistShrink.strip() findSimilarArtist.append(artistShrink) findSimilarArtists = findSimilarArtist[1:30] ###################### Setting Limits on Audio for Song Rec ######################## ### Used to find songs in range of input songs on danceability, enerygy, and tempo, and return full range ### values if there is not enough data to calculate an adjusted range ### NOTE: Currently - tempo and energy not used if len(dance) > 1: min_danceability = dance[dance.index(min(dance))] max_danceability = dance[dance.index(max(dance))] else: min_danceability = 0 max_danceability = 1 if len(tempo) > 1: min_tempo = tempo[tempo.index(min(tempo))] max_tempo = tempo[tempo.index(max(tempo))] else: min_tempo = 0 max_tempo = 500 if len(energy) > 1: min_energy = energy[energy.index(min(energy))] max_energy = energy[energy.index(max(energy))] else: min_energy = 0 max_energy = 1 ###################### Selecting Top Songs for Recommended Artists + Creates Playlist ######################## playlist = [] for i in findSimilarArtists: createURL="http://developer.echonest.com/api/v4/song/search?api_key=YZZS9XI0IMOLQRKQ6&artist_id=" +i+ "&bucket=id:spotify-WW&bucket=tracks&sort=song_hotttnesss-desc&min_danceability=" + str(min_danceability) + "&max_danceability=" + str(max_danceability) + "&results=5" getURL = urllib2.urlopen(createURL); clean_page = getURL.read(); if "spotify-WW:track" in clean_page: get_track = clean_page[clean_page.index("spotify-WW:track")-1:clean_page.index("spotify-WW:track")+60].replace(":", ",").split(",")[2].translate(None, string.punctuation) playlist.append("spotify:track:" + get_track) playlist = jsonify( { 'playlist': playlist} ) return(playlist)
import requests from pyechonest import config, track config.ECHO_NEST_API_KEY = "ERAACIJP7XRWDPHSZ" # f = open("Kanye.mp3",'rb') # t = track.track_from_file(f,"mp3") t = track.track_from_id("TREZZIS139E66C60C4") print t.id print t.danceability # print print t.tempo
def run(self): log.info('maintenance walker') log.info('action: %s' % self.action) if self.action == 'check_media': from alibrary.models import Media if self.id: items = Media.objects.filter(id=self.id) else: items = Media.objects.filter()[0:self.limit] for item in items: delete = False if not item.master: log.info('no master for: %s' % item) if self.delete_missing: log.info('delete item: %s' % item) delete = True else: log.debug('got master for: %s' % item) log.debug('path: %s' % item.master.path) #print item.master.path if os.path.isfile(item.master.path): size = b = os.path.getsize(item.master.path) log.debug('filesize: %s' % size) if size < 10: log.debug('size too small or zero > delete: %s' % size) else: log.debug('file does not exist') delete = True if delete and self.delete_missing: log.info('delete item: %s' % item) item.delete() if not delete: item.status = 1 item.save() if self.action == 'echonest_media': from alibrary.models import Media if self.id: items = Media.objects.filter(id=self.id) else: items = Media.objects.filter(echonest_id=None)[0:self.limit] for item in items: log.info('analyze: %s' % item) item.echonest_analyze() if self.action == 'clean_playlists': from alibrary.models import PlaylistItem from alibrary.models import Media items = PlaylistItem.objects.all() for item in items: log.info('clean: %s' % item.pk) if not item.content_object: log.info('no content object > delete: %s' % item.pk) item.delete() # m = Media.objects.get(pk=item.) if self.action == 'self_check_playlists': from alibrary.models.playlistmodels import self_check_playlists # reset # ps = Playlist.objects.all() # ps.update(status=1) self_check_playlists() if self.action == 'degrade_playlists': from alibrary.models.playlistmodels import Playlist ps = Playlist.objects.filter(type='broadcast').exclude(status=1) ps.update(type='playlist', status=1) if self.action == 'map_tags': from alibrary.models import Media if self.id: items = Media.objects.filter(id=self.id) else: items = Media.objects.filter()[0:self.limit] for item in items: if item.tags.count() < 1: print item print 'map release tags' if item.release and item.release.tags.count() > 0: item.tags = item.release.tags item.save() if self.action == 'echonest_media__': from alibrary.models import Media from pyechonest.util import EchoNestAPIError from pyechonest import track from pyechonest import config as echonest_config echonest_config.ECHO_NEST_API_KEY=ECHONEST_API_KEY if self.id: items = Media.objects.filter(id=self.id) else: items = Media.objects.filter(status=0)[0:self.limit] for item in items: log.info('analyze: %s' % item) #md5 = '96fa0180d225f14e9f8cbfffbf5eb81d' t = None if item.echonest_id: try: log.debug('query by echonest id: %s' % item.echonest_id) t = track.track_from_id(item.echonest_id) except EchoNestAPIError, e: print e if not t: try: f = open(item.master.path) md5 = md5_for_file(f); log.debug('query by md5: %s' % md5) t = track.track_from_md5(md5) except EchoNestAPIError, e: print e if not t: try: log.debug('query by file: %s' % item.master.path) f = open(item.master.path) t = track.track_from_file(f, 'mp3') except EchoNestAPIError, e: print e
def echo_nest_update(): """ Updates the json with all EchoNest data available for this song ('Echo Nest API Error 5: bucket - Invalid parameter: bucket "id" is not one of "audio", "biographies", "blogs", "doc_counts", "familiarity", "familiarity_rank", "genre", "hotttnesss", "hotttnesss_rank", "discovery", "discovery_rank", "images", "artist_location", "news", "reviews", "songs", "terms", "urls", "video", "years_active", "id:7digital-US", "id:7digital-AU", "id:7digital-UK", "id:facebook", "id:fma", "id:emi_bluenote", "id:emi_artists", "id:twitter", "id:spotify-WW", "id:seatwave", "id:lyricfind-US", "id:jambase", "id:musixmatch-WW", "id:rdio-US", "id:rdio-AT", "id:rdio-AU", "id:rdio-BR", "id:rdio-CA", "id:rdio-CH", "id:rdio-DE", "id:rdio-DK", "id:rdio-ES", "id:rdio-FI", "id:rdio-FR", "id:rdio-IE", "id:rdio-IT", "id:rdio-NL", "id:rdio-NO", "id:rdio-NZ", "id:rdio-PT", "id:rdio-SE", "id:emi_electrospective", "id:rdio-EE", "id:rdio-LT", "id:rdio-LV", "id:rdio-IS", "id:rdio-BE", "id:rdio-MX", "id:seatgeek", "id:rdio-GB", "id:rdio-CZ", "id:rdio-CO", "id:rdio-PL", "id:rdio-MY", "id:rdio-HK", "id:rdio-CL", "id:twitter_numeric", "id:7digital-ES", "id:openaura", "id:spotify", "id:spotify-WW", "id:tumblr", or "id:<CATALOG ID>" """ from pyechonest import config config.ECHO_NEST_API_KEY = ECHO_NEST_API_KEY config.CALL_TIMEOUT = 60 while True: json_data = yield if json_data == STOP: break if json_data.get('echo_nest', ''): continue json_data['echo_nest'] = {} track_title = '' artist_name = '' if json_data.get('lastfm', ''): track_title = json_data['lastfm'].get('track', '') artist_name = json_data['lastfm'].get('artist', '') if not track_title: track_title = json_data['id3'].get('title', '') if not artist_name: artist_name = json_data['id3'].get('artist', '') a = None try: if artist_name: a = artist.Artist(artist_name, buckets=[ 'biographies', 'blogs', 'doc_counts', 'familiarity', 'hotttnesss', 'genre', 'artist_location', 'news', 'reviews', 'urls', 'years_active' ]) json_data['echo_nest']['artist_id'] = a.id time.sleep(1) json_data['echo_nest']['artist'] = a.name time.sleep(1) json_data['echo_nest']['bios'] = a.biographies time.sleep(1) json_data['echo_nest']['blogs'] = a.blogs time.sleep(1) json_data['echo_nest']['doc_counts'] = a.doc_counts time.sleep(1) json_data['echo_nest']['a_familiarity'] = a.familiarity time.sleep(1) json_data['echo_nest']['a_hotttnesss'] = a.hotttnesss time.sleep(1) json_data['echo_nest']['news'] = a.news time.sleep(1) json_data['echo_nest']['reviews'] = a.reviews time.sleep(1) json_data['echo_nest']['urls'] = a.urls time.sleep(1) json_data['echo_nest']['years_active'] = a.years_active time.sleep(1) json_data['echo_nest']['similar'] = [ str(sim.name) for sim in a.get_similar() ] time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) if a and track_title: try: results = song.search(artist=a.name, title=track_title, buckets=[ 'audio_summary', 'song_hotttnesss', 'song_discovery' ]) time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) if results: json_data['echo_nest']['id'] = results[0].id time.sleep(1) json_data['echo_nest']['summary'] =\ results[0].audio_summary time.sleep(1) json_data['echo_nest']['s_hotttnesss'] =\ results[0].song_hotttnesss time.sleep(1) json_data['echo_nest']['s_discovery'] =\ results[0].song_discovery time.sleep(1) time.sleep(1) tr = None if json_data['echo_nest'].get('id', ''): try: tr = track.track_from_id(json_data['echo_nest']['id']) time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) if not tr: continue try: tr.get_analysis() time.sleep(1) json_data['echo_nest']['analysis'] = {} json_data['echo_nest']['analysis']['acousticness'] =\ tr.acousticness json_data['echo_nest']['analysis']['analysis_url'] =\ tr.analysis_url json_data['echo_nest']['analysis']['danceability'] =\ tr.danceability json_data['echo_nest']['analysis']['duration'] =\ tr.duration json_data['echo_nest']['analysis']['energy'] = tr.energy json_data['echo_nest']['analysis']['key'] = tr.key json_data['echo_nest']['analysis']['liveness'] =\ tr.liveness json_data['echo_nest']['analysis']['loudness'] =\ tr.loudness json_data['echo_nest']['analysis']['mode'] = tr.mode json_data['echo_nest']['analysis']['speechiness'] =\ tr.speechiness json_data['echo_nest']['analysis']['tempo'] =\ tr.tempo json_data['echo_nest']['analysis']['time_signature'] =\ tr.time_signature json_data['echo_nest']['analysis']['valence'] = tr.valence json_data['echo_nest']['analysis']['analysis_channels'] =\ tr.analysis_channels json_data['echo_nest']['analysis']['bars'] = tr.bars json_data['echo_nest']['analysis']['beats'] = tr.beats json_data['echo_nest']['analysis']['start_of_fade_out'] =\ tr.start_of_fade_out json_data['echo_nest']['analysis']['end_of_fade_in'] =\ tr.end_of_fade_in json_data['echo_nest']['analysis']['key_confidence'] =\ tr.key_confidence json_data['echo_nest']['analysis']['meta'] = tr.meta json_data['echo_nest']['analysis']['mode_confidence'] =\ tr.mode_confidence json_data['echo_nest']['analysis']['num_samples'] =\ tr.num_samples json_data['echo_nest']['analysis']['sections'] =\ tr.sections json_data['echo_nest']['analysis']['segments'] =\ tr.segments json_data['echo_nest']['analysis']['synchstring'] =\ tr.synchstring json_data['echo_nest']['analysis']['tatums'] =\ tr.tatums json_data['echo_nest']['analysis']['tempo_confidence'] =\ tr.tempo_confidence json_data['echo_nest']['analysis']['sign_confidence'] =\ tr.time_signature_confidence except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass except Exception as e: print(e) time.sleep(1) if a: try: json_data['echo_nest']['basic_song_list'] =\ ['{} - {}'.format(s.artist_name, s.title) for s in playlist.basic(type='song-radio', artist_id=a.id, song_id=tr.id)] time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1) try: json_data['echo_nest']['basic_artist_list'] =\ ['{} - {}'.format(s.artist_name, s.title) for s in playlist.basic(artist_id=a.id, song_id=tr.id)] time.sleep(1) except EchoNestException as e: print(e) except EchoNestIOError as e: print(e) except socket.timeout: pass time.sleep(1)
# Uncomment to set the API key explicitly. Otherwise Pyechonest will # look in the ECHO_NEST_API_KEY environment variable for the key. #from pyechonest import config #config.ECHO_NEST_API_KEY='YOUR API KEY' from pyechonest import song, track rof_results = song.search(title='Ring of Fire', artist='Johnny Cash', buckets=['id:7digital'], limit=True) if rof_results: rof = rof_results[0] rof_tracks = rof.get_tracks('7digital') if rof_tracks: roft = track.track_from_id(rof_tracks[0]['id']) for attr in ['analysis_channels', 'analysis_sample_rate', 'analysis_url', 'artist', 'audio_md5', 'catalog', 'danceability', 'duration', 'end_of_fade_in', 'energy', 'foreign_id', 'foreign_release_id', 'id', 'key', 'key_confidence', 'loudness', 'md5', 'meta', 'mode', 'mode_confidence', 'num_samples', 'preview_url', 'release_image', 'sample_md5', 'song_id', 'start_of_fade_out', 'status', 'tempo', 'tempo_confidence', 'time_signature', 'time_signature_confidence', 'title']: print '%-30s %s' % (attr, getattr(roft, attr)) print '' for dicts_attr in ['bars', 'beats', 'sections', 'segments', 'tatums']: print '"%s" example dict:' % (dicts_attr) for key, val in getattr(roft, dicts_attr)[0].iteritems(): print ' %-26s %s' % (key, val) print '' else: print 'No tracks found.' else: print 'No songs found.'