def test_toatl_length(self): songs = [Song(), Song("Pretty Fly", "The Offspring")] self.pl1.add_songs(songs) self.assertEqual(self.pl1.total_length(), "0:7:28")
def static(type='artist', artist_pick='song_hotttnesss-desc', variety=.5, artist_id=None, artist=None, song_id=None, track_id=None, description=None, style=None, mood=None, results=15, max_tempo=None, min_tempo=None, max_duration=None, min_duration=None, max_loudness=None, min_loudness=None, max_danceability=None, min_danceability=None, max_energy=None, min_energy=None, artist_max_familiarity=None, artist_min_familiarity=None, artist_max_hotttnesss=None, artist_min_hotttnesss=None, song_max_hotttnesss=None, song_min_hotttnesss=None, min_longitude=None, max_longitude=None, min_latitude=None, max_latitude=None, adventurousness=0.2, mode=None, key=None, buckets=None, sort=None, limit=False, seed_catalog=None, source_catalog=None, rank_type=None, test_new_things=None, artist_start_year_after=None, artist_start_year_before=None, artist_end_year_after=None, artist_end_year_before=None, dmca=False, distribution=None, song_type=None, genres=None): """Get a static playlist Args: Kwargs: type (str): a string representing the playlist type ('artist', 'artist-radio', ...) artist_pick (str): How songs should be chosen for each artist variety (float): A number between 0 and 1 specifying the variety of the playlist artist_id (str): the artist_id artist (str): the name of an artist song_id (str): the song_id track_id (str): the track id description (str): A string describing the artist and song style (str): A string describing the style/genre of the artist and song mood (str): A string describing the mood of the artist and song results (int): An integer number of results to return max_tempo (float): The max tempo of song results min_tempo (float): The min tempo of song results max_duration (float): The max duration of song results min_duration (float): The min duration of song results max_loudness (float): The max loudness of song results min_loudness (float): The min loudness of song results artist_max_familiarity (float): A float specifying the max familiarity of artists to search for artist_min_familiarity (float): A float specifying the min familiarity of artists to search for artist_max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for artist_min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for song_max_hotttnesss (float): A float specifying the max hotttnesss of songs to search for song_min_hotttnesss (float): A float specifying the max hotttnesss of songs to search for max_energy (float): The max energy of song results min_energy (float): The min energy of song results max_danceability (float): The max danceability of song results min_danceability (float): The min danceability of song results mode (int): 0 or 1 (minor or major) key (int): 0-11 (c, c-sharp, d, e-flat, e, f, f-sharp, g, a-flat, a, b-flat, b) max_latitude (float): A float specifying the max latitude of artists to search for min_latitude (float): A float specifying the min latitude of artists to search for max_longitude (float): A float specifying the max longitude of artists to search for min_longitude (float): A float specifying the min longitude of artists to search for adventurousness (float): A float ranging from 0 for old favorites to 1.0 for unheard music according to a seed_catalog sort (str): A string indicating an attribute and order for sorting the results buckets (list): A list of strings specifying which buckets to retrieve limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets seed_catalog (str or Catalog): An Artist Catalog object or Artist Catalog id to use as a seed source_catalog (str or Catalog): A Catalog object or catalog id rank_type (str): A string denoting the desired ranking for description searches, either 'relevance' or 'familiarity' artist_start_year_before (int): Returned song's artists will have started recording music before this year. artist_start_year_after (int): Returned song's artists will have started recording music after this year. artist_end_year_before (int): Returned song's artists will have stopped recording music before this year. artist_end_year_after (int): Returned song's artists will have stopped recording music after this year. distribution (str): Affects the range of artists returned and how many songs each artist will have in the playlist relative to how similar they are to the seed. (wandering, focused) song_type (str): A string or list of strings of the type of songs allowed. The only valid song type at the moment is 'christmas'. Valid formats are 'song_type', 'song_type:true', 'song_type:false', or 'song_type:any'. Returns: A list of Song objects Example: >>> p = playlist.static(type='artist-radio', artist=['ida maria', 'florence + the machine']) >>> p [<song - Pickpocket>, <song - Self-Taught Learner>, <song - Maps>, <song - Window Blues>, <song - That's Not My Name>, <song - My Lover Will Go>, <song - Home Sweet Home>, <song - Stella & God>, <song - Don't You Want To Share The Guilt?>, <song - Forget About It>, <song - Dull Life>, <song - This Trumpet In My Head>, <song - Keep Your Head>, <song - One More Time>, <song - Knights in Mountain Fox Jackets>] >>> """ limit = str(limit).lower() if seed_catalog and isinstance(seed_catalog, catalog.Catalog): seed_catalog = seed_catalog.id if source_catalog and isinstance(source_catalog, catalog.Catalog): source_catalog = source_catalog.id dmca = str(dmca).lower() kwargs = locals() kwargs['bucket'] = kwargs['buckets'] or [] del kwargs['buckets'] kwargs['genre'] = kwargs['genres'] del kwargs['genres'] result = util.callm("%s/%s" % ('playlist', 'static'), kwargs) return [Song(**util.fix(s_dict)) for s_dict in result['response']['songs']]
async def q_nextSong(self): if len(self.queue) > 0: self.np_song = Song(self, 'local', self.queue.pop(0), self.q_nextSong) await self.np_song.play()
def setUp(self): self.test_song = Song("Tri momi", "Vievska grupa", "none", 0, 150, 320)
def setUp(self): self.test_song = Song('Hells Bells', 'AC/DC', 'rough and though', 5, 520, 256)
def setUp(self): self.playlist = Playlist('My playlist') self.song1 = Song('Name1', 'Artist', 'Album', 3, 240, 10) self.song2 = Song('Name2', 'Artist', 'Album', 4, 210, 9) self.playlist.add_song(self.song1) self.playlist.add_song(self.song2)
from harmonica import swan1040, diatonic_C from song import Song gotTheme = Song("Got Theme") gotTheme.getFromTabs( diatonic_C, """ -6 -4 -5 +6 -6 -4 -5 +6 +5 +6 +4 +5 -5 +6 +4 -5 +5 -4 -6 -4 -5 +6 -6 -4 -5 +6 +5 +6 +4 +5 -5 +5 +4 -4 -8 +7 -4 -6 -3* -5 +6 -6 -8 +7 +6 -6 -3* -5 +5 -4 """.replace("\t", " ").replace("+", "")) gotTheme.exportTabs(swan1040) gotTheme.shift(-12) gotTheme.exportTabs(swan1040) gotTheme.shift(+14) gotTheme.exportTabs(swan1040)
def test_playlist_save(self): self.test_playlist.add_song(self.test_song) self.test_song_second = Song("Back in Black", "ACDC", "Back in Black", 5, 250, 320) self.test_playlist.add_song(self.test_song_second) self.test_playlist.save("gg")
def setBackgroundMusic(self): Song(WelcomeScreen.SONG_PATH).play(Constants.NORMAL_VOLUME, -1, 0)
def test_playlist_show_artists(self): self.test_playlist.add_song(self.test_song) self.test_song_second = Song("Back in Black", "ACDC", "Back in Black", 5, 312, 320) self.test_playlist.add_song(self.test_song_second) self.assertEqual(self.test_playlist.show_artists(), {'ACDC'})
def setUp(self): self.test_playlist = Playlist("Test") self.test_song = Song("Hells Bells", "ACDC", "Back in Black", 5, 312, 320)
# test empty SongList song_list = SongList() print(song_list) assert len(song_list.song) == 0 # test loading songs song_list.load_song() print(song_list) assert len(song_list.song) > 0 # assuming CSV file is not empty # TODO: add tests below to show the various required methods work as expected # test sorting songs print("Sorting by year") assert song_list.sort('year') print("Sorting by title") assert song_list.sort('title') # test adding a new Song song3 = Song('Hero', 'Enrique Iglesias', 2008, 'y') assert song_list.add_song(song3) # test get_song() assert song_list.get_song('Hero') # test getting the number of required and learned songs (separately) assert song_list.count_learned() assert song_list.count_require() # test saving songs (check CSV file manually to see results) song_list.save_song()
def main(): song = Song(name='demo') # It needs at least one sample. I'll just make a simple one sample = Sample(name='sine') # I think the samples are supposed to have a power of 2 length sample.wave = [100 * sin(2 * pi * n / 32) for n in range(32)] song.samples.append(sample) # I didn't really think all the way through ex nihilo generation, # so this is a little clunky. Make some note objects... note_c = Note() note_c.sample = 1 # Sample numbers start at 1, 0 is empty note_c.pitch = 'C-2' note_d = Note() note_d.sample = 1 note_d.pitch = 'D-2' note_e = Note() note_e.sample = 1 note_e.pitch = 'E-2' note_f4 = Note() note_f4.sample = 1 note_f4.pitch = 'F-3' note_d5 = Note() note_d5.sample = 1 note_d5.pitch = 'D-4' note_d4 = Note() note_d4.sample = 1 note_d4.pitch = 'D-3' # I'm not sure if there will be side-effects of using a note # object in multiple places. I should look into making them # behave more like primitves. It will be ok for now. # Make a new pattern to go in our song pattern0 = Pattern() # Channels are made automatically and can't be moved or replaced, # but we'll grab one so we don't have to type pattern0[0] everywhere channel = pattern0[0] #channel = [0] * 64 # Write our song #channel[0] = note_e #channel[4] = note_d #channel[8] = note_c #channel[16] = note_e #channel[20] = note_d #channel[24] = note_c #channel[32:40:2] = note_c #channel[40:48:2] = note_d #channel[48] = note_e #channel[52] = note_d #channel[56] = note_c channel[0] = note_f4 channel[8] = note_d5 channel[16] = note_d4 channel[24] = note_d5 channel[28] = note_f4 channel[32] = note_f4 channel[40] = note_f4 channel[48] = note_d4 channel[56] = note_d5 # Adding a pattern takes two steps song.patterns.append(pattern0) # Store the pattern song.positions = [0] # Give it a position # Write out the file song.write_file('~/Desktop/forths.mod')
def setUp(self): self.song = Song()
""" Tests each method of the SongList class showing how each one works at an individual level """ from songlist import SongList from song import Song song = Song("Amazing Grace", "John Newton", 1779, True) # test empty SongList song_list = SongList() print(song_list) assert len(song_list.songs) == 0 # test loading songs song_list.load_songs('songs.csv') print("Loaded SongList: \n{}".format(song_list)) assert len(song_list.songs) > 0 # assuming CSV file is not empty # test sorting songs print("----Testing sort----") print("Unsorted SongList is:\n{}".format(song_list)) song_list.sort_songs("required") print("Sorted SongList by learned or not then title is:\n{}".format(song_list)) # test adding a new Song print("----Testing add_song----") print("Adding {} to SongList".format(song)) song_list.add_song(song) print("Updated SongList is: \n\n{}".format(song_list)) # test get_song_by_title()
from song import Song from album import Album from band import Band song = Song("Running in the 90s", 3.45, False) print(song.get_info()) album = Album("Initial D", song) second_song = Song("Around the World", 2.34, False) print(album.add_song(second_song)) print(album.details()) print(album.publish()) band = Band("Manuel") print(band.add_album(album)) print(band.remove_album("Initial D")) print(band.details())
''' Quick script to visualize the difference between the HFC and Melflux ODFs ''' from song import Song import sys import numpy as np from essentia import * from essentia.standard import * song = Song(sys.argv[1]) song.open() song.openAudio() spec = Spectrum(size=1024) w = Windowing(type='hann') fft = np.fft.fft c2p = CartesianToPolar() pool = Pool() odf_hfc = OnsetDetection(method='hfc') odf_mel = OnsetDetection(method='melflux') for frame in FrameGenerator(song.audio, frameSize=1024, hopSize=512): pool.add('audio.windowed_frames', w(frame)) fft_result = fft(pool['audio.windowed_frames']).astype('complex64') print fft_result.shape fft_result_mag = np.absolute(fft_result) fft_result_ang = np.angle(fft_result) HOP_SIZE = 512 for mag, phase in zip(fft_result_mag, fft_result_ang):
def analyze(self, files=[]): self.pending.extend((path.relpath(f, self.prefix) for f in files)) # Collect all songs (was hoping for less memory consumption) i = 0 ideal_treble = None ideal_bass = None ideal_pad = None ideal_kick = None ideal_hihat = None collected_song_samples = [] while self.pending: f = self.pending.pop() # print("Add file:", f) song = Song(filename=f) if path.split(f)[-1] == "song24.mod": ideal_treble = len(self.instruments) + len( list(filter(None, song.instruments[:5]))) ideal_bass = len(self.instruments) + len( list(filter(None, song.instruments[:2]))) if path.split(f)[-1] == "bs1.mod": ideal_kick = len(self.instruments) + len( list(filter(None, song.instruments[:5]))) ideal_hihat = len(self.instruments) + len( list(filter(None, song.instruments[:8]))) #ideal_kick = i + 5 - 1 #ideal_hihat = i + 8 - 1 if path.split(f)[-1] == "fucking_disco2.mod": ideal_pad = i + 5 - 1 if path.split(f)[-1] == "jcge2.mod": self.ideal_snare = len(self.instruments) + len( list(filter(None, song.instruments[:3]))) if path.split(f)[-1] == 'chiptune_no_139.mod': ideal_treble = len(self.instruments) + len( list(filter(None, song.instruments[:9]))) ideal_bass = len(self.instruments) + len( list(filter(None, song.instruments[:1]))) ideal_kick = len(self.instruments) + len( list(filter(None, song.instruments[:11]))) ideal_hihat = len(self.instruments) + len( list(filter(None, song.instruments[:16]))) ideal_snare = ideal_hihat ideal_pad = len(self.instruments) + len( list(filter(None, song.instruments[:10]))) # Idea: Add a bunch of pad samples, so that they don't get mixed in with the other clusters...? # if path.split(f)[-1] == "cardiaxx_1.mod": # self.ideal_sample_indexes.append(i + 2 - 1) # for j in range(32): # if song.instruments[j] is None: # print(str(j) + " is none") self.songs += [song] self.instruments.extend(filter(None, song.instruments)) self.learned += [f] collected_song_samples.append( list(range(i, i + len(list(filter(None, song.instruments)))))) # What about a list of dictionaries? i += len(list(filter(None, song.instruments))) # print(collected_song_samples[len(collected_song_samples)-1]) # print("....................") self.ideal_sample_indexes.append(ideal_treble) self.ideal_sample_indexes.append(ideal_bass) self.ideal_sample_indexes.append(ideal_pad) self.ideal_sample_indexes.append(ideal_kick) self.ideal_sample_indexes.append(ideal_hihat) self.ideal_treble = ideal_treble self.ideal_bass = ideal_bass self.ideal_pad = ideal_pad self.ideal_kick = ideal_kick self.ideal_hihat = ideal_hihat self.ideal_snare = ideal_snare self.collected_song_samples = collected_song_samples # test_list = [1,2,3,4,1,5,6,7,8,9] # print(test_list.count(1)) # print(test_list.count(2,3)) # Assemble vectors instrument_vecs = np.array( [instrument.vector for instrument in self.instruments]) # Standardize axes instrument_vecs /= np.std(instrument_vecs, 0)[np.newaxis, :] instrument_vecs -= np.mean(instrument_vecs, 0)[np.newaxis, :] instrument_vecs *= np.array([1.0, 1.0, 1.0])[np.newaxis, :] # Do clustering stuff, group instruments linkage = sch.linkage(instrument_vecs, method='ward') groups = self.make_groups(linkage) # Let's try plotting here # fig = plt.figure() # ax = fig.add_subplot(111, projection='3d') # x = [] # y = [] # z = [] # #FREQ2MIDI[self.std_freq], self.snr, self.unique_pitches # for index in self.bass_cluster: # x.append(FREQ2MIDI[self.instruments[int(index)].std_freq]) # y.append(self.instruments[int(index)].snr) # z.append(self.instruments[int(index)].unique_pitches) # bplot = ax.scatter(x,y,z,c='r', marker = 'o', s = 50) # a = [] # b = [] # c = [] # #FREQ2MIDI[self.std_freq], self.snr, self.unique_pitches # for index in self.treb_cluster: # a.append(FREQ2MIDI[self.instruments[int(index)].std_freq]) # b.append(self.instruments[int(index)].snr) # c.append(self.instruments[int(index)].unique_pitches) # tplot = ax.scatter(a,b,c, c='b', marker = '^', s = 50) # a = [] # b = [] # c = [] # #FREQ2MIDI[self.std_freq], self.snr, self.unique_pitches # for index in self.snare_cluster: # a.append(FREQ2MIDI[self.instruments[int(index)].std_freq]) # b.append(self.instruments[int(index)].snr) # c.append(self.instruments[int(index)].unique_pitches) # splot = ax.scatter(a,b,c, c='g', marker = '*', s = 50) # a = [] # b = [] # c = [] # #FREQ2MIDI[self.std_freq], self.snr, self.unique_pitches # for index in self.bassdrum_cluster: # a.append(FREQ2MIDI[self.instruments[int(index)].std_freq]) # b.append(self.instruments[int(index)].snr) # c.append(self.instruments[int(index)].unique_pitches) # bdplot = ax.scatter(a,b,c, c='y', marker = 'p', s = 50) # ax.set_xlabel('Frequency',fontsize=20) # ax.set_ylabel('Signal-to-Noise Ratio',fontsize=20) # ax.set_zlabel('Unique Pitches',fontsize=20) # ax.set_title('Instrument Clusters', fontsize=50) # plt.legend((bplot,tplot,splot,bdplot), ('Bass','Treble','Snare','Bassdrum')) # plt.show() ######################################################## # Assemble the Samples # basspick = choice(self.bass_cluster) # bass_sample = self.instruments[int(basspick)].sample # trebpick = choice(self.treb_cluster) # treb_sample = self.instruments[int(trebpick)].sample # lopick = choice(self.kick_cluster) # kick_sample = self.instruments[int(lopick)].sample # bdpitch = self.instruments[int(lopick)]._rounded_pitch_num # hipick = choice(self.snare_cluster) # snare_sample = self.instruments[int(hipick)].sample # snpitch = self.instruments[int(hipick)]._rounded_pitch_num ######################################################## # Assemble fomm's in clusters self.bass_cluster = Cluster(self.instruments, self.ideal_bass, self.bass_cluster) self.treb_cluster = Cluster(self.instruments, self.ideal_treble, self.treb_cluster) self.kick_cluster = Cluster(self.instruments, self.ideal_kick, self.kick_cluster) self.snare_cluster = Cluster(self.instruments, self.ideal_hihat, self.snare_cluster) arrayprint = lambda x: print('\n'.join( ('{:4.2f} ' * len(y)).format(*y) for y in x)) #print('===') # arrayprint(self.bass_cluster.fomm_pitch) #print('===') #arrayprint(self.bass_cluster.fomm_beats) # Find bridging pairs (to construct conditional probs) bp2tp = self.bass_cluster.pitch_correlation(self.treb_cluster, self.basstreble_parings) #arrayprint(bp2tp) bt2tt = self.bass_cluster.beats_correlation(self.treb_cluster, self.basstreble_parings) #arrayprint(bt2tt) #self.bass_cluster.new_sample() bass_sample = self.bass_cluster.sample.sample #self.treb_cluster.new_sample() treb_sample = self.treb_cluster.sample.sample # kick_sample = self.instruments[self.ideal_hihat].sample # bdpitch = self.instruments[self.ideal_hihat].rounded_pitch_num kick_sample = self.snare_cluster.sample.sample bdpitch = self.snare_cluster.sample.rounded_pitch_num snare_sample = self.instruments[self.ideal_kick].sample snpitch = self.instruments[self.ideal_hihat].rounded_pitch_num def newsamples(): self.bass_cluster.new_sample() self.treb_cluster.new_sample() def makesong(): generator(self.bass_cluster.fomm_pitch, self.bass_cluster.fomm_beats, self.treb_cluster.fomm_pitch, self.treb_cluster.fomm_beats, self.bass_cluster.sample.sample, self.treb_cluster.sample.sample, kick_sample, bdpitch, self.kick_cluster.fomm_beats, snare_sample, snpitch, self.snare_cluster.fomm_beats, bp2tp, bt2tt) makesong() set_trace() return linkage
''' Visualize ODF and RMS adaptive mean, to confirm that it correctly detects high segments usage: TestSegmentationODFandRMS.py path_to_song ''' from song import Song import matplotlib.pyplot as plt import numpy as np import sys from essentia import * from essentia.standard import FrameGenerator s1 = Song(sys.argv[1]) s1.open() s1.openAudio() audio = s1.audio FRAME_SIZE = int(44100 * (60.0 / s1.tempo) / 2) HOP_SIZE = FRAME_SIZE / 2 def adaptive_mean(x, N): return np.convolve(x, [1.0] * int(N), mode='same') / N pool = Pool() for frame in FrameGenerator(audio, frameSize=FRAME_SIZE, hopSize=HOP_SIZE): pool.add('lowlevel.rms', np.average(frame**2)) adaptive_mean_rms = adaptive_mean( pool['lowlevel.rms'],
def mimic_test(): song = Song(filename='mods/mods/SimpleMods/3_pasar_lypsyl_4.mod') print(len(song.instruments))
def setUp(self): self.test_song = Song("Tri momi", "Vievska grupa", "none", 0, 180, 320) self.test_pls = Playlist("test") self.song_mariiko = Song("mari mariiko", "oratnica", "none", 0, 200, 320)
def start_process(): MOODS = ['aggressive', 'angry', 'bittersweet', 'calm', 'depressing', 'dreamy', 'fun', 'gay', 'happy', 'heavy', 'intense', 'melancholy', 'playful', 'quiet', 'quirky', 'sad', 'sentimental', 'sleepy', 'soothing', 'sweet'] date = str(datetime.datetime.now()).replace(" ", "_" ) filesystem = Filesystem(MOODS_SONGS_FILE_NAME, "r+") #file_name = filesystem_path+'/'+date+'_moods_songs_lyrics.csv' global file_name file_name = OUTPUT_SONGS_LYRICS_FILE+'.csv' song_list = filesystem.get_songs_list() song_lyrics_not_found = list() global song_lyrics_list song_lyrics_list = {'mood':[], 'title':[], 'artist':[], 'lyric':[], 'youtube_video_id':[], 'file':[], 'bow':[]} global multi_mood_songs multi_mood_songs = {'mood':[], 'title':[], 'artist':[], 'youtube_video_id':[]} global moods_list_indexes_retrieved moods_list_indexes_retrieved = { i : 0 for i in MOODS } #moods_list_indexes_retrieved = {'aggressive': 0,'angry':0, 'bittersweet': 0, 'calm': 0, 'depressing': 0, 'dreamy': 0, 'fun': 0, 'gay': 0, 'happy': 0, 'heavy': 0, 'intense': 0, 'melancholy': 0, 'playful': 0, 'quiet': 0, 'quirky': 0, 'sad': 0, 'sentimental': 0, 'sleepy': 0, 'soothing': 0, 'sweet':0} dataframe = pd.DataFrame.from_dict(song_lyrics_list, orient='columns') with open(file_name, 'a') as f: dataframe.to_csv(f) global ids_unique_set ids_unique_set = set() song_lyrics_not_found = {'mood':[], 'title':[], 'artist':[]} global ids_repeated_set ids_repeated_set = set() global available_dataset, available_downloaded available_dataset = [] available_downloaded = [] if DATASET_AVAILABE != "": available_dataset = dataset.make_audio_table(DATASET_AVAILABE) available_downloaded = dataset.make_audio_table(AUDIO_STORAGE_PATH) total_songs = len(song_list) service = "" global query query = QueryVideo() starting_index=0 lyr = "" COUNTER = 0 q = queue.Queue() moods_list_indexes_done = { i : 0 for i in MOODS } #moods_list_indexes_done = {'aggressive': 0, 'angry':0, 'bittersweet': 0, 'calm': 0, 'depressing': 0, 'dreamy': 0, 'fun': 0, 'gay': 0, 'happy': 0, 'heavy': 0, 'intense': 0, 'melancholy': 0, 'playful': 0, 'quiet': 0, 'quirky': 0, 'sad': 0, 'sentimental': 0, 'sleepy': 0, 'soothing': 0, 'sweet':0} df_song_list = pd.DataFrame.from_dict(song_list, orient='columns') MOODS = MOODS[0:] for i in range(len(MOODS)): q.put(MOODS[i]) while not q.empty(): mood = q.get() print(mood, end=' ') df_mood_list = df_song_list.loc[df_song_list[0].isin([mood])] current_id = moods_list_indexes_done[mood] available_downloaded = dataset.make_audio_table(AUDIO_STORAGE_PATH) available_songs = available_downloaded.query('mood == @mood') print("len(available_songs)", len(available_songs)) if current_id < len(df_mood_list[0]) and moods_list_indexes_retrieved[mood] < ( int(MINIMUM_SONGS) - len(available_songs)): row = df_mood_list.iloc[current_id] q.put(mood) #for index, row in enumerate(song_list[starting_index:]): song = Song(artist=row[2].strip(), title=row[1].strip()) lyr = song.lyricwikia() if lyr == '': lyr = song.songlyrics() if lyr != '': service = "songlyrics" store_lyric(lyr, row, query) #moods_list_indexes_retrieved[mood]+=1 print("moods_list_indexes_retrieved[mood]", moods_list_indexes_retrieved[mood]) COUNTER+=1 else: ##Here one can add functionality if tryin to add another source or lyrics or another way to call the service song_lyrics_not_found['mood'].append(str(row[0])) song_lyrics_not_found['title'].append(str(row[1])) song_lyrics_not_found['artist'].append(str(row[2])) print("not found in both", str(current_id + starting_index )+"/"+str(total_songs)) print(row) service = "" else: service = "lyricwikia" store_lyric(lyr, row, query) #moods_list_indexes_retrieved[mood]+=1 print("moods_list_indexes_retrieved[mood]", moods_list_indexes_retrieved[mood]) COUNTER+=1 moods_list_indexes_done[mood]+=1 print("", str(COUNTER)+"/"+ str(current_id)+"/"+str(total_songs), service) print(moods_list_indexes_done) """ if len(song_lyrics_list['mood']) % 5 == 0 and len(song_lyrics_list['mood']) > 4 : #ipdb.set_trace() #print("", str(index)+"/"+str(total_songs), service, lyr) dataframe = pd.DataFrame.from_dict(song_lyrics_list, orient='columns') with open(file_name, 'a') as f: dataframe.to_csv(f, header=False) elif len(song_lyrics_list['mood']) == 1: dataframe = pd.DataFrame.from_dict(song_lyrics_list, orient='columns') with open(file_name, 'a') as f: dataframe.to_csv(f) """ song_lyrics_list = {'mood':[], 'title':[], 'artist':[], 'lyric':[], 'youtube_video_id':[], 'file':[], 'bow':[]}
def analyze(self, files=[]): self.pending.extend((path.relpath(f, self.prefix) for f in files)) # Collect all songs (was hoping for less memory consumption) i = 0 ideal_treble = None ideal_bass = None ideal_pad = None ideal_kick = None ideal_hihat = None collected_song_samples = [] while self.pending: f = self.pending.pop() # print("Add file:", f) song = Song(filename=f) if path.split(f)[-1] == "song24.mod": ideal_treble = len(self.instruments) + len(list(filter( None, song.instruments[:5]))) ideal_bass = len(self.instruments) + len(list(filter( None, song.instruments[:2]))) if path.split(f)[-1] == "bs1.mod": ideal_kick = len(self.instruments) + len(list(filter( None, song.instruments[:5]))) ideal_hihat = len(self.instruments) + len(list(filter( None, song.instruments[:8]))) #ideal_kick = i + 5 - 1 #ideal_hihat = i + 8 - 1 if path.split(f)[-1] == "fucking_disco2.mod": ideal_pad = i + 5 - 1 if path.split(f)[-1] == "jcge2.mod": self.ideal_snare = len(self.instruments) + len(list(filter( None, song.instruments[:3]))) if path.split(f)[-1] == 'chiptune_no_139.mod': ideal_treble = len(self.instruments) + len(list(filter( None, song.instruments[:9]))) ideal_bass = len(self.instruments) + len(list(filter( None, song.instruments[:1]))) ideal_kick = len(self.instruments) + len(list(filter( None, song.instruments[:11]))) ideal_hihat = len(self.instruments) + len(list(filter( None, song.instruments[:16]))) ideal_snare = ideal_hihat ideal_pad = len(self.instruments) + len(list(filter( None, song.instruments[:10]))) # Idea: Add a bunch of pad samples, so that they don't get mixed in with the other clusters...? # if path.split(f)[-1] == "cardiaxx_1.mod": # self.ideal_sample_indexes.append(i + 2 - 1) # for j in range(32): # if song.instruments[j] is None: # print(str(j) + " is none") self.songs += [song] self.instruments.extend(filter(None, song.instruments)) self.learned += [f] collected_song_samples.append(list(range(i, i+len(list( filter(None, song.instruments)))))) # What about a list of dictionaries? i += len(list(filter(None, song.instruments))) # print(collected_song_samples[len(collected_song_samples)-1]) # print("....................") self.ideal_sample_indexes.append(ideal_treble) self.ideal_sample_indexes.append(ideal_bass) self.ideal_sample_indexes.append(ideal_pad) self.ideal_sample_indexes.append(ideal_kick) self.ideal_sample_indexes.append(ideal_hihat) self.ideal_treble = ideal_treble self.ideal_bass = ideal_bass self.ideal_pad = ideal_pad self.ideal_kick = ideal_kick self.ideal_hihat = ideal_hihat self.ideal_snare = ideal_snare self.collected_song_samples = collected_song_samples # Assemble vectors instrument_vecs = np.array([instrument.vector for instrument in self.instruments]) # Standardize axes instrument_vecs /= np.std(instrument_vecs, 0)[np.newaxis, :] instrument_vecs -= np.mean(instrument_vecs, 0)[np.newaxis, :] instrument_vecs *= np.array([1.0,1.0,1.0])[np.newaxis, :] # Do clustering stuff, group instruments linkage = sch.linkage(instrument_vecs, method='ward') groups = self.make_groups(linkage) ######################################################## # Assemble fomm's in clusters self.bass_cluster = Cluster(self.instruments, self.ideal_bass, self.bass_cluster) self.treb_cluster = Cluster(self.instruments, self.ideal_treble, self.treb_cluster) self.kick_cluster = Cluster(self.instruments, self.ideal_kick, self.kick_cluster) self.snare_cluster = Cluster(self.instruments, self.ideal_hihat, self.snare_cluster) # arrayprint = lambda x: print('\n'.join(('{:4.2f} '*len(y)).format(*y) for y in x)) #print('===') # arrayprint(self.bass_cluster.fomm_pitch) #print('===') #arrayprint(self.bass_cluster.fomm_beats) # Find bridging pairs (to construct conditional probs) bp2tp = self.bass_cluster.pitch_correlation(self.treb_cluster, self.basstreble_parings) #arrayprint(bp2tp) bt2tt = self.bass_cluster.beats_correlation(self.treb_cluster, self.basstreble_parings) #arrayprint(bt2tt) #self.bass_cluster.new_sample() bass_sample = self.bass_cluster.sample.sample #self.treb_cluster.new_sample() treb_sample = self.treb_cluster.sample.sample # kick_sample = self.instruments[self.ideal_hihat].sample # bdpitch = self.instruments[self.ideal_hihat].rounded_pitch_num kick_sample = self.snare_cluster.sample.sample bdpitch = self.snare_cluster.sample.rounded_pitch_num snare_sample = self.instruments[self.ideal_kick].sample snpitch = self.instruments[self.ideal_hihat].rounded_pitch_num def newsamples(): self.bass_cluster.new_sample() self.treb_cluster.new_sample() def makesong(): generator(self.bass_cluster.fomm_pitch, self.bass_cluster.fomm_beats, self.treb_cluster.fomm_pitch, self.treb_cluster.fomm_beats, self.bass_cluster.sample.sample, self.treb_cluster.sample.sample, kick_sample, bdpitch, self.kick_cluster.fomm_beats, snare_sample, snpitch, self.snare_cluster.fomm_beats, bp2tp, bt2tt ) makesong() set_trace() return linkage
def main(): data = load_csv('../data/stock-top50-last90-2017-04-10.csv') song = Song('test', data) song.generate() song.write('../out/test.mid')
def add_to_list(self, title, artist, year, is_required): #add the inputted song to the song list newSong = Song(title, artist, year, 'y') self.list_songs.append(newSong)
def setBackgroundMusic(self): self.__previousSongTime = Song(PacaneleScreen.SONG_PATH).play( self.__musicPlayer.musicVolume, -1, 0)
def setUp(self): self.test_song = Song("Hells Bells", "ACDC", "Back in Black", 5, 312, 320)
def setUp(self): self.song = Song("Desert rose", "Sting", "Brand New Day", 0.0, "4:45", 8)
brooklyn_bowl = Venue(name="Brooklyn Bowl", capacity=600) nassau_coliseum.city = uniondale springfield_creamery.city = veneta msg.city = nyc barton_hall.city = ithaca brooklyn_bowl.city = nyc phish_11_28_03.venue = nassau_coliseum phish_12_31_95.venue = msg dead_5_8_77.venue = barton_hall dead_venetta.venue = springfield_creamery dead_1_7_79.venue = msg # Songs mikes = Song(name="Mike's Song") brown_eyed_women = Song(name="Brown Eyed Women") scarlet_begonias = Song(name="Scarlet Begonias") fire = Song(name="Fire On the Mountain") phish_11_28_03.songs.append(mikes) phish_12_31_95.songs.append(mikes) dead_5_8_77.songs.extend([brown_eyed_women, scarlet_begonias, fire]) cornell_scarlet = session.query(ShowSong).filter_by(id=4).one() cornell_scarlet.length = 675 cornell_scarlet.notes = "famous Cornell Scarlet->Fire" session.add_all( [phish_11_28_03, phish_12_31_95, dead_5_8_77, dead_venetta, dead_1_7_79]) session.add_all([phish, dead, my_hs_garage_band])
def test_add_songs(self): songs = [Song(), Song("Pretty Fly")] self.pl1.add_songs(songs) self.assertEqual(len(self.pl1.songs), 2)