def fetchVolumeData( self, series_id ): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher( ) cached_volume_result = cvc.get_volume_info( series_id ) if cached_volume_result is not None: return cached_volume_result volume_url = self.api_base_url + "/volume/" + CVTypeID.Volume + "-" + str(series_id) + "/?api_key=" + self.api_key + "&field_list=name,id,start_year,publisher,count_of_issues&format=json" content = self.getUrlContent(volume_url) cv_response = json.loads(content) if cv_response[ 'status_code' ] != 1: print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response[ 'error' ] ) return None volume_results = cv_response['results'] cvc.add_volume_info( volume_results ) return volume_results
def fetchCachedAlternateCoverURLs(self, issue_id): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher() url_list = cvc.get_alt_covers(issue_id) if url_list is not None: return url_list else: return None
def fetchIssuesByVolume( self, series_id ): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher( ) cached_volume_issues_result = cvc.get_volume_issues_info( series_id ) if cached_volume_issues_result is not None: return cached_volume_issues_result #--------------------------------- issues_url = self.api_base_url + "/issues/" + "?api_key=" + self.api_key + "&filter=volume:" + str(series_id) + "&field_list=id,volume,issue_number,name,image,cover_date,site_detail_url,description&format=json" content = self.getUrlContent(issues_url) cv_response = json.loads(content) if cv_response[ 'status_code' ] != 1: print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response[ 'error' ] ) return None #------------------------------------ limit = cv_response['limit'] current_result_count = cv_response['number_of_page_results'] total_result_count = cv_response['number_of_total_results'] #print "ATB total_result_count", total_result_count #print "ATB Found {0} of {1} results".format( cv_response['number_of_page_results'], cv_response['number_of_total_results']) volume_issues_result = cv_response['results'] page = 1 offset = 0 # see if we need to keep asking for more pages... while ( current_result_count < total_result_count ): #print "ATB getting another page of issue results {0} of {1}...".format( current_result_count, total_result_count) page += 1 offset += cv_response['number_of_page_results'] #print issues_url+ "&offset="+str(offset) content = self.getUrlContent(issues_url + "&offset="+str(offset)) cv_response = json.loads(content) if cv_response[ 'status_code' ] != 1: self.writeLog( "Comic Vine query failed with error: [{0}]. \n".format( cv_response[ 'error' ] )) return None volume_issues_result.extend( cv_response['results']) current_result_count += cv_response['number_of_page_results'] self.repairUrls( volume_issues_result ) cvc.add_volume_issues_info( series_id, volume_issues_result ) return volume_issues_result
def fetchVolumeData( self, series_id ): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher( ) cached_volume_result = cvc.get_volume_info( series_id ) if cached_volume_result is not None: return cached_volume_result volume_url = self.api_base_url + "/volume/" + CVTypeID.Volume + "-" + str(series_id) + "/?api_key=" + self.api_key + "&field_list=name,id,start_year,publisher,count_of_issues&format=json" cv_response = self.getCVContent(volume_url) volume_results = cv_response['results'] cvc.add_volume_info( volume_results ) return volume_results
def cacheAlternateCoverURLs(self, issue_id, url_list): cvc = ComicVineCacher() cvc.add_alt_covers(issue_id, url_list)
def cacheIssueSelectDetails( self, issue_id, image_url, thumb_url, cover_date, page_url): cvc = ComicVineCacher() cvc.add_issue_select_details( issue_id, image_url, thumb_url, cover_date, page_url)
def fetchCachedIssueSelectDetails(self, issue_id): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher() return cvc.get_issue_select_details(issue_id)
def searchForSeries(self, series_name, callback=None, refresh_cache=False): # remove cruft from the search string series_name = utils.removearticles(series_name).lower().strip() # before we search online, look in our cache, since we might have # done this same search recently cvc = ComicVineCacher() if not refresh_cache: cached_search_results = cvc.get_search_results(series_name) if len(cached_search_results) > 0: return cached_search_results original_series_name = series_name # We need to make the series name into an "AND"ed query list query_word_list = series_name.split() and_list = ['AND'] * (len(query_word_list) - 1) and_list.append('') # zipper up the two lists query_list = zip(query_word_list, and_list) # flatten the list query_list = [item for sublist in query_list for item in sublist] # convert back to a string query_string = " ".join(query_list).strip() # print "Query string = ", query_string query_string = urllib.quote_plus(query_string.encode("utf-8")) search_url = self.api_base_url + "/search/?api_key=" + self.api_key + "&format=json&resources=volume&query=" + \ query_string + \ "&field_list=name,id,start_year,publisher,image,description,count_of_issues" cv_response = self.getCVContent(search_url + "&page=1") search_results = list() # see http://api.comicvine.com/documentation/#handling_responses limit = cv_response['limit'] current_result_count = cv_response['number_of_page_results'] total_result_count = cv_response['number_of_total_results'] if callback is None: self.writeLog( "Found {0} of {1} results\n".format( cv_response['number_of_page_results'], cv_response['number_of_total_results'])) search_results.extend(cv_response['results']) page = 1 if callback is not None: callback(current_result_count, total_result_count) # see if we need to keep asking for more pages... while (current_result_count < total_result_count): if callback is None: self.writeLog( "getting another page of results {0} of {1}...\n".format( current_result_count, total_result_count)) page += 1 cv_response = self.getCVContent(search_url + "&page=" + str(page)) search_results.extend(cv_response['results']) current_result_count += cv_response['number_of_page_results'] if callback is not None: callback(current_result_count, total_result_count) # for record in search_results: #print(u"{0}: {1} ({2})".format(record['id'], record['name'] , record['start_year'])) # print(record) #record['count_of_issues'] = record['count_of_isssues'] #print(u"{0}: {1} ({2})".format(search_results['results'][0]['id'], search_results['results'][0]['name'] , search_results['results'][0]['start_year'])) # cache these search results cvc.add_search_results(original_series_name, search_results) return search_results
def clearCache(self): ImageFetcher().clearCache() ComicVineCacher().clearCache() QtGui.QMessageBox.information(self, self.name, "Cache has been cleared.")
def searchForSeries( self, series_name , callback=None, refresh_cache=False ): # remove cruft from the search string series_name = utils.removearticles( series_name ).lower().strip() # before we search online, look in our cache, since we might have # done this same search recently cvc = ComicVineCacher( ) if not refresh_cache: cached_search_results = cvc.get_search_results( series_name ) if len (cached_search_results) > 0: return cached_search_results original_series_name = series_name series_name = urllib.quote_plus(series_name.encode("utf-8")) #series_name = urllib.quote_plus(unicode(series_name)) search_url = self.api_base_url + "/search/?api_key=" + self.api_key + "&format=json&resources=volume&query=" + series_name + "&field_list=name,id,start_year,publisher,image,description,count_of_issues" content = self.getUrlContent(search_url + "&page=1") cv_response = json.loads(content) if cv_response[ 'status_code' ] != 1: self.writeLog( "Comic Vine query failed with error: [{0}]. \n".format( cv_response[ 'error' ] )) return None search_results = list() # see http://api.comicvine.com/documentation/#handling_responses limit = cv_response['limit'] current_result_count = cv_response['number_of_page_results'] total_result_count = cv_response['number_of_total_results'] if callback is None: self.writeLog( "Found {0} of {1} results\n".format( cv_response['number_of_page_results'], cv_response['number_of_total_results'])) search_results.extend( cv_response['results']) page = 1 if callback is not None: callback( current_result_count, total_result_count ) # see if we need to keep asking for more pages... while ( current_result_count < total_result_count ): if callback is None: self.writeLog("getting another page of results {0} of {1}...\n".format( current_result_count, total_result_count)) page += 1 content = self.getUrlContent(search_url + "&page="+str(page)) cv_response = json.loads(content) if cv_response[ 'status_code' ] != 1: self.writeLog( "Comic Vine query failed with error: [{0}]. \n".format( cv_response[ 'error' ] )) return None search_results.extend( cv_response['results']) current_result_count += cv_response['number_of_page_results'] if callback is not None: callback( current_result_count, total_result_count ) #for record in search_results: # #print( u"{0}: {1} ({2})".format(record['id'], record['name'] , record['start_year'] ) ) # #print record # #record['count_of_issues'] = record['count_of_isssues'] #print u"{0}: {1} ({2})".format(search_results['results'][0]['id'], search_results['results'][0]['name'] , search_results['results'][0]['start_year'] ) # cache these search results cvc.add_search_results( original_series_name, search_results ) return search_results