def parse(self, url, songs): soup = self.get(url) if not soup: return for t in soup.find(id="content").select("a"): song = Song(artist=t.text.split(' - ')[1], title=t.text.split(' - ')[0]) key = song.__hash__() songs[key] = song
def parse(self, url, songs): soup = self.get(url) if not soup: return for t in soup.find(id="content").select("a"): song = Song( artist=t.text.split(' - ')[1], title=t.text.split(' - ')[0] ) key = song.__hash__() songs[key] = song
def test_all(self): song = Song("Artist", "Title") self.assertEqual(song, Song("Artist", "Title")) self.assertEqual(hash(song), -7098157806416884659) self.assertNotEqual(song, Song("Artist", "")) self.assertNotEqual(song, 1) self.assertEqual(song.tmp_filename(""), "Artist - Title.avi") self.assertEqual(song.tmp_filename("/tmp"), "/tmp/Artist - Title.avi") self.assertEqual(song.tmp_filename("/tmp/"), "/tmp/Artist - Title.avi") self.assertEqual(song.filename(""), "Artist - Title.mp3") self.assertEqual(song.filename("/tmp"), "/tmp/Artist - Title.mp3") self.assertEqual(song.filename("/tmp/"), "/tmp/Artist - Title.mp3") directory = os.path.join(os.path.dirname(__file__), "tmp") self.assertEqual(create_directory(directory), None) self.assertEqual(remove_and_create_directory(directory), None) os_query("rm -rf %(directory)s" % locals())
def test_fip(self): scraper = FipScraper() songs = scraper.parse( "http://www.fipradio.fr/archives-antenne?start_date=2015-03-05&start_hour=1" ) self.assertEqual(len(songs), 14) self.assertEqual(songs[5], Song("Brigitte", "Embrassez Vous")) songs = scraper.scrap(self.ts_beg, self.ts_end) self.assertEqual(len(songs), 41)
def parse(self, url, songs): soup = self.get(url) if not soup: return for t in soup.select('div#cest-quoi-ce-titre-results li'): try: key = t.select(".date")[0].string songs[key] = Song(artist=t.select(".artist")[0].string.title(), title=t.select(".title")[0].string.title()) except Exception as e: logger.error("Cannot parse %(t)s, %(e)s" % locals())
def parse(self, url, songs): soup = self.get(url) if not soup: return for t in soup.select('div.resultat'): try: songs[(t['class'][0]).split('_')[1]] = Song( artist=(t.h2.string if t.h2.string else t.h2.a.string).strip(), title=(t.h3.string if t.h3.string else t.h3.a.string).strip() ) except Exception as e: logger.error("Cannot parse %(t)s, %(e)s" % locals())
def parse(self, url): soup = BeautifulSoup(requests.get(url).json()[1]['data']) songs = [] if not soup: return songs divs = soup.select("div.son") for div in divs: titles = div.select("p.titre_title") artists = div.select("p.titre_artiste") if titles and artists and titles[0].string != 'FIP ACTUALITE': songs.append( Song(title=titles[0].string.title(), artist=artists[0].string.title())) return songs
def test_nostalgie(self): scraper = NostalgieScraper() songs = scraper.scrap(self.ts_beg, self.ts_end) self.assertEqual(len(songs), 47) self.assertEqual(songs[0], Song("Roger Glover", "Love Is All"))
def test_oui(self): scraper = OuiScraper() songs = scraper.scrap(self.ts_beg, self.ts_end) self.assertEqual(len(songs), 32) self.assertEqual(songs[0], Song("Every Time I Die", "The New Black"))
def test_nova(self): scraper = NovaScraper() songs = scraper.scrap(self.ts_beg, self.ts_end) self.assertEqual(len(songs), 45) self.assertEqual(songs[0], Song("Sporto Kantes", "Holiday"))