def final(): name = request.get_json()['name'] #asks for title/song lyrics = getlyrics(name) #gets lyrics by entered song sortedwords = sortwords(lyrics.get('lyrics')) #gives back top 5 words finishedwords = [] #start empty list for word in lyrics.get('lyrics').split(): if word in sortedwords: finishedwords.append(word) #if word in top 5, add to list return {'lyrics': finishedwords, 'title': lyrics.get('full_title')}
def populate_lyrics(self, artist, title): """Fills self.lyrics with the lyrics from the included lyrics package.""" self.SetTitle('Lyrics for %s - %s' % (artist, title)) error = True try: self.url = lyricwikiurl(artist, title) l = getlyrics(artist, title) error = False except (IOError, UnicodeEncodeError): raw_title = self.format_string(title) raw_artist = unidecode(artist).replace(' ', '').replace('&', 'and').lower() if 'feat' in raw_artist: raw_artist = raw_artist[:raw_artist.index('feat')] raw_artist = self.format_string(raw_artist) if raw_artist.startswith('the'): raw_artist = raw_artist[3:] self.url = 'http://www.azlyrics.com/lyrics/%s/%s.html' % ( raw_artist, raw_title) try: res = requests.get(self.url) if res.status_code != 200: l = 'No lyrics found.' else: l = res.content start = '<!-- Usage of azlyrics.com content by any third-party lyrics provider is prohibited by our licensing agreement. Sorry about that. -->' end = '</div>' if start in l: l = l[l.index(start) + len(start):].replace('<br>', '') if end in l: l = l[:l.index(end)] l = re.sub(r'\<[^>]+\>', '', l) while l.startswith('\n') or l.startswith('\r'): l = l[1:] error = False else: l = 'Error in HTML. Place marker not found.' except (requests.exceptions.RequestException, requests.adapters.ReadTimeoutError) as e: l = 'Could not get lyrics from %s: %s.' % (self.url, str(e)) if error: wx.CallAfter(self.lyrics.SetValue, '') wx.CallAfter(self.browse.Disable) else: wx.CallAfter(self.lyrics.SetValue, 'Lyrics URL: %s\n\n' % self.url) wx.CallAfter(self.browse.Enable) wx.CallAfter(self.lyrics.write, l) wx.CallAfter(self.lyrics.SetInsertionPoint, 0)
def populate_lyrics(self, artist, title): """Fills self.lyrics with the lyrics from the included lyrics package.""" self.SetTitle('Lyrics for %s - %s' % (artist, title)) error = True try: self.url = lyricwikiurl(artist, title) l = getlyrics(artist, title) error = False except (IOError, UnicodeEncodeError): raw_title = self.format_string(title) raw_artist = unidecode(artist).replace(' ', '').replace('&', 'and').lower() if 'feat' in raw_artist: raw_artist = raw_artist[:raw_artist.index('feat')] raw_artist = self.format_string(raw_artist) if raw_artist.startswith('the'): raw_artist = raw_artist[3:] self.url = 'http://www.azlyrics.com/lyrics/%s/%s.html' % (raw_artist, raw_title) try: res = requests.get(self.url) if res.status_code != 200: l = 'No lyrics found.' else: l = res.content start = '<!-- Usage of azlyrics.com content by any third-party lyrics provider is prohibited by our licensing agreement. Sorry about that. -->' end = '</div>' if start in l: l = l[l.index(start) + len(start):].replace('<br>', '') if end in l: l = l[:l.index(end)] l = re.sub(r'\<[^>]+\>', '', l) while l.startswith('\n') or l.startswith('\r'): l = l[1:] error = False else: l = 'Error in HTML. Place marker not found.' except (requests.exceptions.RequestException, requests.adapters.ReadTimeoutError) as e: l = 'Could not get lyrics from %s: %s.' % (self.url, str(e)) if error: wx.CallAfter(self.lyrics.SetValue, '') wx.CallAfter(self.browse.Disable) else: wx.CallAfter(self.lyrics.SetValue, 'Lyrics URL: %s\n\n' % self.url) wx.CallAfter(self.browse.Enable) wx.CallAfter(self.lyrics.write, l) wx.CallAfter(self.lyrics.SetInsertionPoint, 0)
import lyrics import csv wfile = open('MoodyLyricsFullSmall4Q.csv', 'w') csv_writer = csv.writer(wfile, delimiter=",") with open('MoodyLyrics4Q.csv') as rfile: csv_reader = csv.reader(rfile, delimiter=",") count = 0 for row in csv_reader: try: new_row = [ row[0], row[1], row[2], row[3], lyrics.getlyrics(row[1], row[2], False) ] except: new_row = [row[0], row[1], row[2], row[3], ' '] csv_writer.writerow(new_row) count += 1 if count == 11: break # ly = lyrics.getlyrics("Eminem", "Stan", False) # print(ly)
soup_page1 = BeautifulSoup(requests.get("http://www.billboard.com/charts/christian-songs?page=1").text) soup_page2 = BeautifulSoup(requests.get("http://www.billboard.com/charts/christian-songs?page=2").text) soups.extend([soup_page0, soup_page1, soup_page2]) songs = [] for soup in soups: rawRows = soup.select(".song_review header") for row in rawRows: title, artist = getRnBSongs(row) songs.append((title, artist)) vocabulary = {} #hashmap of word, number of appearances unordered_list = file('output_chr.txt', 'w') for title, artist in songs: lyrics = getlyrics(artist, title) for word in TextBlob(lyrics).words: normalized = Word(word).lemmatize().lower() if normalized not in vocabulary: vocabulary[normalized] = 0 vocabulary[normalized] += 1 unordered_list.write(word.encode('utf8') + '\n') sorted_v = sorted(vocabulary.items(), key=operator.itemgetter(1)) f = file('output_chr.csv', 'w') for word, count in sorted_v: f.write(word.encode('utf8') + "," + str(count) + "\n")
stiowords = [x.strip(punctuation) for x in stopwords if len(x) > 2] lyrics_list = {} words_list = {} if not os.path.exists(output_words) or not os.path.exists(output_alllyrics): print("Fetching tracks lyrics...") outfile = open(output_alllyrics, "w") for track_item in tracks: try: artist = str(track_item[0].get_artist().name) title = str(track_item[0].get_title()) key = "{0} - {1}".format(artist, title) except: continue try: lyrics_now = lyrics.getlyrics(artist, title) if lyrics_now != '': lyrics_list[key] = lyrics_now # get words alphanumerics words = [x.lower() for x in lyrics_now.split() if \ re.match('^[\w-]+$', x) is not None] # remove stopwords words = [ x for x in words if len(x) > 2 and (not x in stopwords) ] # count frequencies wcount = {} for w in words: wcount[w] = wcount.get(w, 0) + 1 # write words to file outfile.write(" ".join(words))
import pyttsx3 #import text 2 speech/python package from lyrics import getlyrics #referrs to other document -> getlyrics function from sortwords import sortwords engine = pyttsx3.init() #initializing speech engine file = getlyrics(input('Enter song and artist: ') ) #returned lyrics from getlyrics funtion defined as file lyrics = file.get('lyrics') firstfive = sortwords(lyrics) print(lyrics.split()) for word in lyrics.split(): #split line into words if word in firstfive: print(word) #print top 5 words engine.say(word) #text to speech engine.runAndWait() #command for engine to start
stiowords = [x.strip(punctuation) for x in stopwords if len(x)>2] lyrics_list = {} words_list = {} if not os.path.exists(output_words) or not os.path.exists(output_alllyrics): print("Fetching tracks lyrics...") outfile = open(output_alllyrics, "w") for track_item in tracks: try: artist = str(track_item[0].get_artist().name) title = str(track_item[0].get_title()) key = "{0} - {1}".format(artist, title) except: continue try: lyrics_now = lyrics.getlyrics(artist, title) if lyrics_now != '': lyrics_list[key] = lyrics_now # get words alphanumerics words = [x.lower() for x in lyrics_now.split() if \ re.match('^[\w-]+$', x) is not None] # remove stopwords words = [x for x in words if len(x)>2 and (not x in stopwords)] # count frequencies wcount = {} for w in words: wcount[w] = wcount.get(w, 0) + 1 # write words to file outfile.write(" ".join(words)) outfile.write("\n") # take top N top = sorted(wcount.iteritems(), key=itemgetter(1), \
requests.get( "http://www.billboard.com/charts/christian-songs?page=2").text) soups.extend([soup_page0, soup_page1, soup_page2]) songs = [] for soup in soups: rawRows = soup.select(".song_review header") for row in rawRows: title, artist = getRnBSongs(row) songs.append((title, artist)) vocabulary = {} #hashmap of word, number of appearances unordered_list = file('output_chr.txt', 'w') for title, artist in songs: lyrics = getlyrics(artist, title) for word in TextBlob(lyrics).words: normalized = Word(word).lemmatize().lower() if normalized not in vocabulary: vocabulary[normalized] = 0 vocabulary[normalized] += 1 unordered_list.write(word.encode('utf8') + '\n') sorted_v = sorted(vocabulary.items(), key=operator.itemgetter(1)) f = file('output_chr.csv', 'w') for word, count in sorted_v: f.write(word.encode('utf8') + "," + str(count) + "\n")