def NewzNabPlus(book=None, host=None, api_key=None, searchType=None): #logger.info('[NewzNabPlus] Searching term [%s] for author [%s] and title [%s] on host [%s] for a [%s] item' % (book['searchterm'], book['authorName'], book['bookName'], host, searchType)) logger.info('[NewzNabPlus] searchType [%s] with Host [%s] using api [%s] for item [%s]'%(searchType, host, api_key,str(book))) results = [] params = ReturnSearchTypeStructure(api_key, book, searchType) if not str(host)[:4] == "http": host = 'http://' + host URL = host + '/api?' + urllib.urlencode(params) try : request = urllib2.Request(URL) if lazylibrarian.PROXY_HOST: request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE) request.add_header('User-Agent', USER_AGENT) opener = urllib2.build_opener(SimpleCache.CacheHandler(".ProviderCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) try: data = ElementTree.parse(resp) except (urllib2.URLError, IOError, EOFError), e: logger.warn('Error fetching data from %s: %s' % (host, e)) data = None except Exception, e: logger.error("Error 403 openning url") data = None
def get_author_books(self, authorid=None, authorname=None, refresh=False): api_hits = 0 URL = 'http://www.goodreads.com/author/list/' + authorid + '.xml?' + urllib.urlencode( self.params) #Artist is loading myDB = database.DBConnection() controlValueDict = {"AuthorID": authorid} newValueDict = {"Status": "Loading"} myDB.upsert("authors", newValueDict, controlValueDict) try: # Cache our request request = urllib2.Request(URL) if lazylibrarian.PROXY_HOST: request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE) request.add_header('User-Agent', USER_AGENT) opener = urllib2.build_opener( SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) api_hits = api_hits + 1 sourcexml = ElementTree.parse(resp) except Exception, e: logger.error("Error fetching author info: " + str(e))
def NewzNab(book=None, newznabNumber=None): if (newznabNumber == "1"): HOST = lazylibrarian.NEWZNAB_HOST logger.info('Searching for %s.' % book['searchterm'] + " at: " + lazylibrarian.NEWZNAB_HOST) if (newznabNumber == "2"): HOST = lazylibrarian.NEWZNAB_HOST2 logger.info('Searching for %s.' % book['searchterm'] + " at: " + lazylibrarian.NEWZNAB_HOST2) results = [] if lazylibrarian.EBOOK_TYPE == None: params = { "t": "book", "apikey": lazylibrarian.NEWZNAB_API, #"cat": 7020, "author": book['searchterm'] } else: params = { "t": "search", "apikey": lazylibrarian.NEWZNAB_API, "cat": 7020, "q": book['searchterm'], "extended": 1, } if not str(HOST)[:4] == "http": HOST = 'http://' + HOST URL = HOST + '/api?' + urllib.urlencode(params) try: request = urllib2.Request(URL) if lazylibrarian.PROXY_HOST: request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE) request.add_header('User-Agent', common.USER_AGENT) opener = urllib2.build_opener( SimpleCache.CacheHandler(".ProviderCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) try: data = ElementTree.parse(resp) except (urllib2.URLError, IOError, EOFError), e: logger.warn('Error fetching data from %s: %s' % (lazylibrarian.NEWZNAB_HOST, e)) data = None except Exception, e: logger.error("Error 403 openning url") data = None
def get_author_books(self, authorid=None): URL = 'http://www.goodreads.com/author/list/' + authorid + '.xml?' + urllib.urlencode( self.params) try: # Cache our request request = urllib2.Request(URL) opener = urllib2.build_opener( SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) sourcexml = ElementTree.parse(resp) except Exception, e: logger.error("Error fetching author info: " + str(e))
def find_author_id(self): URL = 'http://www.goodreads.com/api/author_url/?' + urllib.urlencode( self.name) + '&' + urllib.urlencode(self.params) logger.debug("Searching for author with name: %s" % self.name) # Cache our request request = urllib2.Request(URL) opener = urllib2.build_opener(SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) try: sourcexml = ElementTree.parse(resp) except Exception, e: logger.error("Error fetching authorid: " + str(e))
def find_book(self, bookid=None, queue=None): threading.currentThread().name = "GR-ADD-BOOK" myDB = database.DBConnection() URL = 'https://www.goodreads.com/book/show/' + bookid + '?' + urllib.urlencode(self.params) try: # Cache our request request = urllib2.Request(URL) if lazylibrarian.PROXY_HOST: request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE) request.add_header('User-Agent', USER_AGENT) opener = urllib2.build_opener(SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) sourcexml = ElementTree.parse(resp) except Exception, e: logger.error("Error fetching book info: " + str(e))
def find_author_id(self): URL = 'http://www.goodreads.com/api/author_url/' + urllib.quote(self.name) + '?' + urllib.urlencode(self.params) logger.debug("Searching for author with name: %s" % self.name) # Cache our request request = urllib2.Request(URL) if lazylibrarian.PROXY_HOST: request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE) request.add_header('User-Agent', USER_AGENT) opener = urllib2.build_opener(SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) try: sourcexml = ElementTree.parse(resp) except Exception, e: logger.error("Error fetching authorid: " + str(e) + str(URL))
def get_author_info(self, authorid=None, authorname=None, refresh=False): URL = 'http://www.goodreads.com/author/show/' + authorid + '.xml?' + urllib.urlencode( self.params) # Cache our request request = urllib2.Request(URL) opener = urllib2.build_opener(SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) try: sourcexml = ElementTree.parse(resp) rootxml = sourcexml.getroot() resultxml = rootxml.find('author') author_dict = {} except Exception, e: logger.error("Error fetching author ID: " + str(e))
def search_tor_book(books=None, mags=None): if not (lazylibrarian.USE_TOR): return # rename this thread threading.currentThread().name = "SEARCHTORBOOKS" myDB = database.DBConnection() searchlist = [] searchlist1 = [] if books is None: # We are performing a backlog search searchbooks = myDB.select( 'SELECT BookID, AuthorName, Bookname from books WHERE Status="Wanted"' ) # Clear cache if os.path.exists(".ProviderCache"): for f in os.listdir(".ProviderCache"): os.unlink("%s/%s" % (".ProviderCache", f)) # Clearing throttling timeouts t = SimpleCache.ThrottlingProcessor() t.lastRequestTime.clear() else: # The user has added a new book searchbooks = [] if books != False: for book in books: searchbook = myDB.select( 'SELECT BookID, AuthorName, BookName from books WHERE BookID=? AND Status="Wanted"', [book['bookid']]) for terms in searchbook: searchbooks.append(terms) for searchbook in searchbooks: bookid = searchbook[0] author = searchbook[1] book = searchbook[2] dic = { '...': '', '.': ' ', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '', ',': '', '*': '', ':': '', ';': '' } dicSearchFormatting = {'.': ' +', ' + ': ' '} author = formatter.latinToAscii(formatter.replace_all(author, dic)) book = formatter.latinToAscii(formatter.replace_all(book, dic)) # TRY SEARCH TERM just using author name and book type author = formatter.latinToAscii( formatter.replace_all(author, dicSearchFormatting)) searchterm = author + ' ' + book # + ' ' + lazylibrarian.EBOOK_TYPE searchterm = re.sub('[\.\-\/]', ' ', searchterm).encode('utf-8') searchterm = re.sub(r'\(.*?\)', '', searchterm).encode('utf-8') searchterm = re.sub(r"\s\s+", " ", searchterm) # strip any double white space searchlist.append({ "bookid": bookid, "bookName": searchbook[2], "authorName": searchbook[1], "searchterm": searchterm.strip() }) if not lazylibrarian.KAT: logger.info('No download method is set, use SABnzbd or blackhole') counter = 0 for book in searchlist: #print book.keys() resultlist = providers.IterateOverTorrentSites(book, 'book') #if you can't find teh book specifically, you might find under general search if not resultlist: logger.info( "Searching for type book failed to find any books...moving to general search" ) resultlist = providers.IterateOverTorrentSites(book, 'general') if not resultlist: logger.debug("Adding book %s to queue." % book['searchterm']) else: dictrepl = { '...': '', '.': ' ', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '', ',': '', '*': '', '(': '', ')': '', '[': '', ']': '', '#': '', '0': '', '1': '', '2': '', '3': '', '4': '', '5': '', '6': '', '7': '', '8': '', '9': '', '\'': '', ':': '', '!': '', '-': '', '\s\s': ' ', ' the ': ' ', ' a ': ' ', ' and ': ' ', ' to ': ' ', ' of ': ' ', ' for ': ' ', ' my ': ' ', ' in ': ' ', ' at ': ' ', ' with ': ' ' } logger.debug(u'searchterm %s' % book['searchterm']) addedCounter = 0 for tor in resultlist: tor_Title = formatter.latinToAscii( formatter.replace_all( str(tor['tor_title']).lower(), dictrepl)).strip() tor_Title = re.sub(r"\s\s+", " ", tor_Title) #remove extra whitespace logger.debug(u'torName %s' % tor_Title) match_ratio = int(lazylibrarian.MATCH_RATIO) tor_Title_match = fuzz.token_sort_ratio( book['searchterm'].lower(), tor_Title) logger.debug("Torrent Title Match %: " + str(tor_Title_match)) if (tor_Title_match > match_ratio): logger.info(u'Found Torrent: %s' % tor['tor_title']) addedCounter = addedCounter + 1 bookid = book['bookid'] tor_Title = (book["authorName"] + ' - ' + book['bookName'] + ' LL.(' + book['bookid'] + ')').strip() tor_url = tor['tor_url'] tor_prov = tor['tor_prov'] tor_size_temp = tor[ 'tor_size'] #Need to cater for when this is NONE (Issue 35) if tor_size_temp is None: tor_size_temp = 1000 tor_size = str(round(float(tor_size_temp) / 1048576, 2)) + ' MB' controlValueDict = {"NZBurl": tor_url} newValueDict = { "NZBprov": tor_prov, "BookID": bookid, "NZBsize": tor_size, "NZBtitle": tor_Title, "Status": "Skipped" } myDB.upsert("wanted", newValueDict, controlValueDict) snatchedbooks = myDB.action( 'SELECT * from books WHERE BookID=? and Status="Snatched"', [bookid]).fetchone() if not snatchedbooks: snatch = DownloadMethod(bookid, tor_prov, tor_Title, tor_url) notifiers.notify_snatch(tor_Title + ' at ' + formatter.now()) break if addedCounter == 0: logger.info("No torrent's found for " + (book["authorName"] + ' ' + book['bookName']).strip() + ". Adding book to queue.") counter = counter + 1 # if not books or books==False: # snatched = searchmag.searchmagazines(mags) # for items in snatched: # snatch = DownloadMethod(items['bookid'], items['tor_prov'], items['tor_title'], items['tor_url']) # notifiers.notify_snatch(items['tor_title']+' at '+formatter.now()) logger.info("Search for Wanted items complete")
def find_results(self, authorname=None, queue=None): threading.currentThread().name = "GR-SEARCH" resultlist = [] api_hits = 0 url = urllib.quote_plus(authorname.encode('utf-8')) set_url = 'http://www.goodreads.com/search.xml?q=' + url + '&' + urllib.urlencode(self.params) logger.info('Now searching GoodReads API with keyword: ' + authorname) logger.debug('Searching for %s at: %s' % (authorname, set_url)) try: try: # Cache our request request = urllib2.Request(set_url) if lazylibrarian.PROXY_HOST: request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE) request.add_header('User-Agent', USER_AGENT) opener = urllib2.build_opener(SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) api_hits = api_hits + 1 sourcexml = ElementTree.parse(resp) except Exception, e: logger.error("Error finding results: " + str(e)) rootxml = sourcexml.getroot() resultxml = rootxml.getiterator('work') author_dict = [] resultcount = 0 for author in resultxml: bookdate = "0001-01-01" if (author.find('original_publication_year').text == None): bookdate = "0000" else: bookdate = author.find('original_publication_year').text authorNameResult = author.find('./best_book/author/name').text booksub = "" bookpub = "" booklang = "en" try: bookimg = author.find('./best_book/image_url').text if (bookimg == 'http://www.goodreads.com/assets/nocover/111x148.png'): bookimg = 'images/nocover.png' except KeyError: bookimg = 'images/nocover.png' except AttributeError: bookimg = 'images/nocover.png' try: bookrate = author.find('average_rating').text except KeyError: bookrate = 0 bookpages = '0' bookgenre = '' bookdesc = '' bookisbn = '' booklink = 'http://www.goodreads.com/book/show/'+author.find('./best_book/id').text if (author.find('./best_book/title').text == None): bookTitle = "" else: bookTitle = author.find('./best_book/title').text author_fuzz = fuzz.ratio(authorNameResult.lower(), authorname.lower()) book_fuzz = fuzz.ratio(bookTitle.lower(), authorname.lower()) try: isbn_check = int(authorname[:-1]) if (len(str(isbn_check)) == 9) or (len(str(isbn_check)) == 12): isbn_fuzz = int(100) else: isbn_fuzz = int(0) except: isbn_fuzz = int(0) highest_fuzz = max(author_fuzz, book_fuzz, isbn_fuzz) resultlist.append({ 'authorname': author.find('./best_book/author/name').text, 'bookid': author.find('./best_book/id').text, 'authorid' : author.find('./best_book/author/id').text, 'bookname': bookTitle.encode("ascii", "ignore"), 'booksub': booksub, 'bookisbn': bookisbn, 'bookpub': bookpub, 'bookdate': bookdate, 'booklang': booklang, 'booklink': booklink, 'bookrate': float(bookrate), 'bookimg': bookimg, 'bookpages': bookpages, 'bookgenre': bookgenre, 'bookdesc': bookdesc, 'author_fuzz': author_fuzz, 'book_fuzz': book_fuzz, 'isbn_fuzz': isbn_fuzz, 'highest_fuzz': highest_fuzz, 'num_reviews': float(bookrate) }) resultcount = resultcount+1
try: time.sleep(1) #sleep 1 second to respect goodreads api terms if (book.find('isbn13').text is not None): BOOK_URL = 'http://www.goodreads.com/book/isbn?isbn=' + book.find('isbn13').text + '&' + urllib.urlencode(self.params) logger.debug(u"Book URL: " + str(BOOK_URL)) try: # Cache our request request = urllib2.Request(BOOK_URL) if lazylibrarian.PROXY_HOST: request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE) request.add_header('User-Agent', USER_AGENT) opener = urllib2.build_opener(SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) except Exception, e: logger.error("Error finding results: ", e) BOOK_sourcexml = ElementTree.parse(resp) BOOK_rootxml = BOOK_sourcexml.getroot() bookLanguage = BOOK_rootxml.find('./book/language_code').text logger.debug(u"language: " + str(bookLanguage)) else: logger.debug("No ISBN provided, skipping") continue except Exception, e:
def initialize(): with INIT_LOCK: global __INITIALIZED__, FULL_PATH, PROG_DIR, LOGLEVEL, DAEMON, DATADIR, CONFIGFILE, CFG, LOGDIR, HTTP_HOST, HTTP_PORT, HTTP_USER, HTTP_PASS, HTTP_ROOT, HTTP_LOOK, LAUNCH_BROWSER, LOGDIR, CACHEDIR, \ IMP_ONLYISBN, IMP_PREFLANG, SAB_HOST, SAB_PORT, SAB_API, SAB_USER, SAB_PASS, DESTINATION_DIR, DESTINATION_COPY, DOWNLOAD_DIR, SAB_CAT, USENET_RETENTION, BLACKHOLE, BLACKHOLEDIR, GR_API, \ NZBMATRIX, NZBMATRIX_USER, NZBMATRIX_API, NEWZNAB, NEWZNAB_HOST, NEWZNAB_API, NEWZBIN, NEWZBIN_UID, NEWZBIN_PASS, NEWZNAB2, NEWZNAB_HOST2, NEWZNAB_API2, EBOOK_TYPE if __INITIALIZED__: return False CheckSection('General') CheckSection('SABnzbd') try: HTTP_PORT = check_setting_int(CFG, 'General', 'http_port', 8082) except: HTTP_PORT = 8082 if HTTP_PORT < 21 or HTTP_PORT > 65535: HTTP_PORT = 8082 HTTP_HOST = check_setting_str(CFG, 'General', 'http_host', '0.0.0.0') HTTP_USER = check_setting_str(CFG, 'General', 'http_user', '') HTTP_PASS = check_setting_str(CFG, 'General', 'http_pass', '') HTTP_ROOT = check_setting_str(CFG, 'General', 'http_root', '') HTTP_LOOK = check_setting_str(CFG, 'General', 'http_look', 'default') LAUNCH_BROWSER = bool( check_setting_int(CFG, 'General', 'launch_browser', 1)) LOGDIR = check_setting_str(CFG, 'General', 'logdir', '') IMP_PREFLANG = check_setting_str(CFG, 'General', 'imp_preflang', IMP_PREFLANG) IMP_ONLYISBN = bool( check_setting_int(CFG, 'General', 'imp_onlyisbn', 0)) SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '') SAB_PORT = check_setting_str(CFG, 'SABnzbd', 'sab_port', '') SAB_USER = check_setting_str(CFG, 'SABnzbd', 'sab_user', '') SAB_PASS = check_setting_str(CFG, 'SABnzbd', 'sab_pass', '') SAB_API = check_setting_str(CFG, 'SABnzbd', 'sab_api', '') SAB_CAT = check_setting_str(CFG, 'SABnzbd', 'sab_cat', '') DESTINATION_COPY = bool( check_setting_int(CFG, 'General', 'destination_copy', 0)) DESTINATION_DIR = check_setting_str(CFG, 'General', 'destination_dir', '') DOWNLOAD_DIR = check_setting_str(CFG, 'General', 'download_dir', '') BLACKHOLE = bool(check_setting_int(CFG, 'General', 'blackhole', 0)) BLACKHOLEDIR = check_setting_str(CFG, 'General', 'blackholedir', '') USENET_RETENTION = check_setting_str(CFG, 'General', 'usenet_retention', '') NZBMATRIX = bool(check_setting_int(CFG, 'NZBMatrix', 'nzbmatrix', 0)) NZBMATRIX_USER = check_setting_str(CFG, 'NZBMatrix', 'nzbmatrix_user', '') NZBMATRIX_API = check_setting_str(CFG, 'NZBMatrix', 'nzbmatrix_api', '') NEWZNAB = bool(check_setting_int(CFG, 'Newznab', 'newznab', 0)) NEWZNAB_HOST = check_setting_str(CFG, 'Newznab', 'newznab_host', '') NEWZNAB_API = check_setting_str(CFG, 'Newznab', 'newznab_api', '') NEWZNAB2 = bool(check_setting_int(CFG, 'Newznab2', 'newznab2', 0)) NEWZNAB_HOST2 = check_setting_str(CFG, 'Newznab2', 'newznab_host2', '') NEWZNAB_API2 = check_setting_str(CFG, 'Newznab2', 'newznab_api2', '') NEWZBIN = bool(check_setting_int(CFG, 'Newzbin', 'newzbin', 0)) NEWZBIN_UID = check_setting_str(CFG, 'Newzbin', 'newzbin_uid', '') NEWZBIN_PASS = check_setting_str(CFG, 'Newzbin', 'newzbin_pass', '') EBOOK_TYPE = check_setting_str(CFG, 'General', 'ebook_type', 'epub') GR_API = check_setting_str(CFG, 'General', 'gr_api', 'ckvsiSDsuqh7omh74ZZ6Q') if not LOGDIR: LOGDIR = os.path.join(DATADIR, 'Logs') # Put the cache dir in the data dir for now CACHEDIR = os.path.join(DATADIR, 'cache') if not os.path.exists(CACHEDIR): try: os.makedirs(CACHEDIR) except OSError: logger.error( 'Could not create cachedir. Check permissions of: ' + DATADIR) # Create logdir if not os.path.exists(LOGDIR): try: os.makedirs(LOGDIR) except OSError: if LOGLEVEL: print LOGDIR + ":" print ' Unable to create folder for logs. Only logging to console.' # Start the logger, silence console logging if we need to logger.lazylibrarian_log.initLogger(loglevel=LOGLEVEL) # Clearing cache if os.path.exists(".ProviderCache"): for f in os.listdir(".ProviderCache"): os.unlink("%s/%s" % (".ProviderCache", f)) # Clearing throttling timeouts t = SimpleCache.ThrottlingProcessor() t.lastRequestTime.clear() # Initialize the database try: dbcheck() except Exception, e: logger.error("Can't connect to the database: %s" % e) __INITIALIZED__ = True return True
try: time.sleep( 1) #sleep 1 second to respect goodreads api terms if (book.find('isbn13').text is not None): BOOK_URL = 'http://www.goodreads.com/book/isbn?isbn=' + book.find( 'isbn13').text + '&' + urllib.urlencode( self.params) logger.debug(u"Book URL: " + str(BOOK_URL)) try: # Cache our request request = urllib2.Request(BOOK_URL) opener = urllib2.build_opener( SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) except Exception, e: logger.error("Error finding results: ", e) BOOK_sourcexml = ElementTree.parse(resp) BOOK_rootxml = BOOK_sourcexml.getroot() bookLanguage = BOOK_rootxml.find( './book/language_code').text logger.debug(u"language: " + str(bookLanguage)) else: logger.debug("No ISBN provided, skipping") continue
def find_results(self, authorname=None): resultlist = [] logger.info(authorname) url = urllib.quote_plus(authorname.encode('utf-8')) set_url = 'http://www.goodreads.com/search.xml?q=' + url + '&' + urllib.urlencode( self.params) logger.info('Searching for author at: %s' % set_url) try: try: # Cache our request request = urllib2.Request(set_url) opener = urllib2.build_opener( SimpleCache.CacheHandler(".AuthorCache"), SimpleCache.ThrottlingProcessor(5)) resp = opener.open(request) sourcexml = ElementTree.parse(resp) except Exception, e: logger.error("Error finding results: " + str(e)) rootxml = sourcexml.getroot() resultxml = rootxml.getiterator('work') author_dict = [] resultcount = 0 for author in resultxml: bookdate = "0001-01-01" if (author.find('original_publication_year').text == None): bookdate = "0000" else: bookdate = author.find('original_publication_year').text authorNameResult = author.find('./best_book/author/name').text booksub = "" bookpub = "" booklang = "en" try: bookimg = author.find('./best_book/image_url').text if (bookimg == 'http://www.goodreads.com/assets/nocover/111x148.png' ): bookimg = 'images/nocover.png' except KeyError: bookimg = 'images/nocover.png' except AttributeError: bookimg = 'images/nocover.png' try: bookrate = author.find('average_rating').text except KeyError: bookrate = 0 bookpages = '0' bookgenre = '' bookdesc = 'Not available' bookisbn = author.find('./best_book/id').text if (author.find('./best_book/title').text == None): bookTitle = "" else: bookTitle = author.find('./best_book/title').text resultlist.append({ 'authorname': author.find('./best_book/author/name').text, 'bookid': author.find('./best_book/id').text, 'authorid': author.find('./best_book/author/id').text, 'bookname': bookTitle.encode("ascii", "ignore"), 'booksub': booksub, 'bookisbn': bookisbn, 'bookpub': bookpub, 'bookdate': bookdate, 'booklang': booklang, 'booklink': '/', 'bookrate': float(bookrate), 'bookimg': bookimg, 'bookpages': bookpages, 'bookgenre': bookgenre, 'bookdesc': bookdesc }) resultcount = resultcount + 1