def run(self): while not self.stoprequest.isSet(): try: image_url, dump_path, attempt = self._queue.get(True, 0.05) except Queue.Empty: continue try: Downloader.download_image(self._queue, image_url, dump_path, attempt + 1, self.max_retries) except exceptions.DownloadError as e: print e.msg self.downloader.fail_list.append((e.url, e.path))
if LIVE_DOWNLOADS: results = dl.download_ram(url) soup = BeautifulSoup(results, 'html.parser') else: #enable the following line to initially download the #info file. Then comment out for testing on local file #results = dl.download_file(url, ab.id + '.html') soup = BeautifulSoup(open(ab.id + '.html'), "html.parser") grs = GoodreadsScraper(soup) ab.update_from_goodreads(grs) img_name = ab.id + '.' + ab.image_url[-3:] print 'Downloading image file' if LIVE_DOWNLOADS: url = ab.image_url results = dl.download_image(url, img_name) print 'Writing ID3 to file' ab.write_to_id3(af) ab.read_from_id3(af) ab.date_added = datetime.today() ab.local_url = BASE_URL + ab.id + '.mp3' ab.local_image_url = BASE_URL + 'images/{}'.format(img_name) print 'Adding book to Database' db = MYDB('audiobooks.db') db.insert_record('books', ab.__dict__) print "Moving files" move_file(af_path, os.path.join(BOOK_DEST, ab.id + '.mp3')) move_file(img_name, os.path.join(IMG_DEST, img_name)) else: