def remove_duplicates_in_playlist(playlist_name): playlist_id = create_playlist(sp, playlist_name, '') tracks = get_playlist_tracks(playlist_id) duplicates = find_duplicates(tracks) rem = [] position = 0 for curr in tracks: if curr in duplicates: rem.append({'uri': curr['id'], 'positions': [position]}) duplicates.remove(curr) position = position + 1 remove_specific_occurrences(rem, playlist_name)
def generate_aritst_cvs(sp, playlist_name, cvs_name): playlist_id = create_playlist(sp, playlist_name, '') results = sp.playlist_tracks(playlist_id) artists = {} add_to_artist_list(results, artists) while results['next']: results = sp.next(results) add_to_artist_list(results, artists) with open(cvs_name, 'w', newline='') as csvfile: fieldnames = ['id', 'name', 'uri'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for artist in list(artists.values()): writer.writerow({'id': artist['id'], 'name': artist['name'], 'uri': artist['uri']})
def main(): ids = load_data(favorite_artists[CURRENT_USER]) artist_names = load_data_at(favorite_artists[CURRENT_USER], 1) new_songs = [] index = 0 num_ids = str(len(ids)) for artist_id in ids: artist_name = artist_names[index] index = index + 1 print(str(index) + '/' + num_ids + ': ' + artist_name) # try: time.sleep(PAUSE_TIME) results = sp.artist_albums(artist_id, album_type='album') new_songs.extend(get_recent_tracks(results, artist_id)) # except Exception as e: # print(e) # try: time.sleep(PAUSE_TIME) results = sp.artist_albums(artist_id, album_type='single') new_songs.extend(get_recent_tracks(results, artist_id)) # except Exception as e: # print(e) print('New songs: ' + str(len(new_songs))) print('Removing duplicates...') new_songs = remove_duplicates(new_songs) print('New songs: ' + str(len(new_songs))) if CURRENT_USER == ALEJANDRO: print('Removing tracks in history...') new_songs = remove_history_tracks(new_songs) print('New songs: ' + str(len(new_songs))) main_bucket = [] remix_bucket = [] for track in new_songs: name = track['name'] add_to_main = True for indicator in REMIX_INDICATORS: if indicator in name.lower(): remix_bucket.append(track) add_to_main = False if add_to_main: main_bucket.append(track) print('main_bucket: ' + str(len(main_bucket))) print('remix_bucket: ' + str(len(remix_bucket))) playlist_id = create_playlist(sp, playlist_title[CURRENT_USER], 'All new music released after last friday') add_to_playlist(playlist_id, tracks_to_ids(main_bucket)) add_to_playlist(playlist_id, tracks_to_ids(remix_bucket))
def copy_playlist(playlist_id, new_playlist_name): new_playlist_id = create_playlist(sp, new_playlist_name, '') new_playlist_tracks = get_playlist_tracks(new_playlist_id) curr_playlist_tracks = get_playlist_tracks(playlist_id) duplicate_tracks = [] for curr_track in curr_playlist_tracks: for old_tacks in new_playlist_tracks: if tracks_equal(curr_track, old_tacks): duplicate_tracks.append(curr_track) ids = [] for curr in curr_playlist_tracks: if curr not in duplicate_tracks: ids.extend(tracks_to_ids([curr])) print('Adding ' + str(len(ids)) + ' tracks to ' + new_playlist_name) add_to_playlist(new_playlist_id, ids)
def remove_specific_occurrences(elements, playlist_name): playlist_id = create_playlist(sp, playlist_name, '') print('Removing ' + str(len(elements)) + ' duplicate tracks from ' + playlist_name) if len(elements) < 100: if elements: time.sleep(PAUSE_TIME) sp.user_playlist_remove_specific_occurrences_of_tracks( username, playlist_id, elements) else: time.sleep(PAUSE_TIME) curr = elements[:100] sp.user_playlist_remove_specific_occurrences_of_tracks( username, playlist_id, curr) # This needs to be recursive because the position numbers change # once user_playlist_remove_specific_occurrences_of_tracks is called. remove_duplicates_in_playlist(playlist_name)