Ejemplo n.º 1
0
 def setUpClass(cls):
     id = '68428'
     d = Downloader()
     url = d.goodreads_url(id)
     results = d.download_ram(url)
     cls.soup = BeautifulSoup(results, "html.parser")
     cls.grs = GoodreadsScraper(cls.soup)
Ejemplo n.º 2
0
 def setUpClass(cls):
     id = '68428'
     d = Downloader()
     url = d.goodreads_url(id)
     results = d.download_ram(url)
     cls.soup = BeautifulSoup(results, "html.parser")
     cls.grs = GoodreadsScraper(cls.soup)
Ejemplo n.º 3
0
 def test_id_from_downloads(self):
     query = 'Well of Ascension'
     d = Downloader()
     url = d.goodreads_id_query(query)
     results = d.download_ram(url)
     soup = BeautifulSoup(results, 'html.parser')
     gs = GoogleScraper(soup)
     self.assertEqual(gs.id, '68429')
Ejemplo n.º 4
0
 def test_id_from_downloads(self):
     query = 'Well of Ascension'
     d = Downloader()
     url = d.goodreads_id_query(query)
     results = d.download_ram(url)
     soup = BeautifulSoup(results, 'html.parser')
     gs = GoogleScraper(soup)
     self.assertEqual(gs.id, '68429')
Ejemplo n.º 5
0
 DOWNLOADS = 'downloads'
 LIVE_DOWNLOADS = True #set equal to true after testing
 for direc in get_subfolders(DOWNLOADS):
     d = os.path.join(os.getcwd(), DOWNLOADS, direc)
     print ""
     ab = Audiobook(d)
     print "Started Processing: {}".format(ab.title)
     files = get_files(d)
     if len(files) == 1 and '.mp3' in files[0]:
         af_path = os.path.join(d, files[0])
         af = Audiofile(af_path)
         dl = Downloader()
         url = dl.goodreads_id_query(ab.title)
         print 'Getting Goodreads ID from Google'
         if LIVE_DOWNLOADS:
             results = dl.download_ram(url)
             soup = BeautifulSoup(results, 'html.parser')
             gs = GoogleScraper(soup)
             ab.id = gs.id
         else:
             ab.id = '18007564' #id for the Martian
         print 'ID: {}'.format(ab.id)
         url = dl.goodreads_url(ab.id)
         print 'Downloading info from Goodreads'
         if LIVE_DOWNLOADS:
             results = dl.download_ram(url)
             soup = BeautifulSoup(results, 'html.parser')
         else:
             #enable the following line to initially download the 
             #info file.  Then comment out for testing on local file
             #results = dl.download_file(url, ab.id + '.html')
Ejemplo n.º 6
0
 def test_download_ram(self):
     query = 'Well of Ascension'
     d = Downloader()
     url = d.goodreads_id_query(query)
     results = d.download_ram(url)
     self.assertTrue('68429' in results)