def clearCache(self): ImageFetcher().clearCache() ComicVineCacher().clearCache() QtGui.QMessageBox.information(self, self.name, "Cache has been cleared.")
def cacheAlternateCoverURLs(self, issue_id, url_list): cvc = ComicVineCacher() cvc.add_alt_covers(issue_id, url_list)
def fetchCachedIssueSelectDetails(self, issue_id): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher() return cvc.get_issue_select_details(issue_id)
def cacheIssueSelectDetails(self, issue_id, image_url, thumb_url, cover_date, page_url): cvc = ComicVineCacher() cvc.add_issue_select_details(issue_id, image_url, thumb_url, cover_date, page_url)
def searchForSeries(self, series_name, callback=None, refresh_cache=False): # remove cruft from the search string series_name = utils.removearticles(series_name).lower().strip() # before we search online, look in our cache, since we might have # done this same search recently cvc = ComicVineCacher() if not refresh_cache: cached_search_results = cvc.get_search_results(series_name) if len(cached_search_results) > 0: return cached_search_results original_series_name = series_name # We need to make the series name into an "AND"ed query list query_word_list = series_name.split() and_list = ['AND'] * (len(query_word_list) - 1) and_list.append('') # zipper up the two lists query_list = zip(query_word_list, and_list) # flatten the list query_list = [item for sublist in query_list for item in sublist] # convert back to a string query_string = " ".join(query_list).strip() #print "Query string = ", query_string query_string = urllib.quote_plus(query_string.encode("utf-8")) search_url = self.api_base_url + "/search/?api_key=" + self.api_key + "&format=json&resources=volume&query=" + query_string + "&field_list=name,id,start_year,publisher,image,description,count_of_issues" cv_response = self.getCVContent(search_url + "&page=1") search_results = list() # see http://api.comicvine.com/documentation/#handling_responses limit = cv_response['limit'] current_result_count = cv_response['number_of_page_results'] total_result_count = cv_response['number_of_total_results'] if callback is None: self.writeLog("Found {0} of {1} results\n".format( cv_response['number_of_page_results'], cv_response['number_of_total_results'])) search_results.extend(cv_response['results']) page = 1 if callback is not None: callback(current_result_count, total_result_count) # see if we need to keep asking for more pages... while (current_result_count < total_result_count): if callback is None: self.writeLog( "getting another page of results {0} of {1}...\n".format( current_result_count, total_result_count)) page += 1 cv_response = self.getCVContent(search_url + "&page=" + str(page)) search_results.extend(cv_response['results']) current_result_count += cv_response['number_of_page_results'] if callback is not None: callback(current_result_count, total_result_count) #for record in search_results: # #print( u"{0}: {1} ({2})".format(record['id'], record['name'] , record['start_year'] ) ) # #print record # #record['count_of_issues'] = record['count_of_isssues'] #print u"{0}: {1} ({2})".format(search_results['results'][0]['id'], search_results['results'][0]['name'] , search_results['results'][0]['start_year'] ) # cache these search results cvc.add_search_results(original_series_name, search_results) return search_results