Exemplo n.º 1
0
    def search_lyrics(self, query, max_pages=1):
        data = self.search(query, max_pages)
        lyrics = {}
        for entry in data["songs"]:
            url, num = data["songs"][entry]["url"].split("#")
            r = requests.get(url)
            html = r.text
            soup = BeautifulSoup(html, 'html.parser')
            bucket = self._parse_lyrics(soup)

            for song in bucket:
                if bucket[song]["song_number"] == num:
                    lyrics[song] = bucket[song]
        for entry in data["artists"]:
            url = data["artists"][entry]["url"]
            r = requests.get(url)
            html = r.text
            soup = BeautifulSoup(html, 'html.parser')
            bucket = self._parse_artist(soup)
            for album in bucket:
                songs = bucket[album]
                url = songs[0]["url"].replace("#1", "")
                r = requests.get(url)
                html = r.text
                soup = BeautifulSoup(html, 'html.parser')
                merge_dict(lyrics, self._parse_lyrics(soup))
        for entry in data["albums"]:
            url = data["albums"][entry]["url"]
            r = requests.get(url)
            html = r.text
            soup = BeautifulSoup(html, 'html.parser')
            merge_dict(lyrics, self._parse_lyrics(soup))

        return lyrics
Exemplo n.º 2
0
 def search_discography(self, query, max_pages=1):
     data = self.search(query, max_pages)
     disco = {}
     for k in data:
         for entry in data[k]:
             url = data[k][entry]["url"]
             r = requests.get(url)
             html = r.text
             soup = BeautifulSoup(html, 'html.parser')
             if "/lyrics/" not in url:
                 merge_dict(disco, self._parse_artist(soup))
     return disco
Exemplo n.º 3
0
 def search(self, query, max_pages=1):
     r = requests.get(self.search_url, {"q": query})
     html = r.text
     soup = BeautifulSoup(html, 'html.parser')
     data = self._parse_search(soup)
     if max_pages == -1:
         max_pages = data["num_pages"]
     if max_pages > 1 and data["num_pages"] > 1:
         for p in range(2, data["num_pages"] + 1):
             r = requests.get(self.search_url, {"q": query, "p": p})
             html = r.text
             soup = BeautifulSoup(html, 'html.parser')
             bucket = self._parse_search(soup)
             bucket.pop("num_pages")
             merge_dict(data, bucket)
             if p > max_pages:
                 break
     data.pop("num_pages")
     return data