def get_image_url(origin_url): photo_id = FlickrDownloader.get_photo_id(origin_url) call = ( "https://api.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=%s&photo_id=%s&format=json&nojsoncallback=1" % (Util.unxor(HASH, API_KEY), photo_id)) resp = Util.fetch_json(call) s = max(resp["sizes"]["size"], key=lambda size: int(size["width"])) return s["source"]
def on_image_set_as_wallpaper(self, img, meta): extraData = meta.get("extraData", None) if not extraData: return download_loc = extraData.get("unsplashDownloadLocation") reported = extraData.get("unsplashDownloadReported") if download_loc and not reported: url = "{}?client_id={}".format( download_loc, Util.unxor(UnsplashDownloader.HASH, UnsplashDownloader.API_KEY) ) Util.fetch(url) meta["extraData"]["unsplashDownloadReported"] = True Util.write_metadata(img, meta)
def count_results(self): call = ( "https://api.flickr.com/services/rest/?method=flickr.photos.search" "&api_key=%s&per_page=20&tag_mode=all&format=json&nojsoncallback=1" % Util.unxor(HASH, API_KEY)) for k, v in self.params.items(): call = call + "&" + k + "=" + v resp = FlickrDownloader.fetch(call) if resp["stat"] != "ok": raise Exception("Flickr returned error message: " + resp["message"]) return int(resp["photos"]["total"])
def get_extra_metadata(origin_url): photo_id = FlickrDownloader.get_photo_id(origin_url) call = ( "https://api.flickr.com/services/rest/?method=flickr.photos.getInfo&api_key=%s&photo_id=%s&format=json&nojsoncallback=1" % (Util.unxor(HASH, API_KEY), photo_id)) resp = Util.fetch_json(call) ph = resp["photo"] extra_meta = { "headline": ph["title"]["_content"], "description": ph["description"]["_content"], "author": ph["owner"]["realname"], "authorURL": "https://www.flickr.com/photos/%s" % ph["owner"]["nsid"], "keywords": [x["_content"] for x in ph["tags"]["tag"]], } return extra_meta
def fill_queue(self): queue = [] call = ( "https://api.flickr.com/services/rest/?method=flickr.photos.search" "&api_key=%s&per_page=500&tag_mode=all&format=json&nojsoncallback=1" % Util.unxor(HASH, API_KEY)) for k, v in self.params.items(): call = call + "&" + k + "=" + v resp = FlickrDownloader.fetch(call) if resp["stat"] != "ok": raise Exception("Flickr returned error message: " + resp["message"]) pages = int(resp["photos"]["pages"]) if pages < 1: return page = random.randint(1, pages) logger.info(lambda: "%d pages in the search results, using page %d" % (pages, page)) call = ( call + "&extras=owner_name,description,tags,o_dims,url_o,url_k,url_h,url_l&page=" + str(page)) resp = FlickrDownloader.fetch(call) if resp["stat"] != "ok": raise Exception("Flickr returned error message: " + resp["message"]) used = set(x[0] for x in queue) size_suffixes = ["o", "k", "h", "l"] for s in size_suffixes: self.process_photos_in_response(queue, resp, s, used) if len(queue) > 20: break random.shuffle(queue) if len(queue) >= 20: queue = queue[:len(queue) // 2] # only use randomly half the images from the page - # if we ever hit that same page again, we'll still have what to download return queue
def obtain_userid(url): try: logger.info(lambda: "Fetching flickr user_id from URL: " + url) call = ( "https://api.flickr.com/services/rest/?method=flickr.urls.lookupUser&api_key=%s&url=%s&format=json&nojsoncallback=1" % (Util.unxor(HASH, API_KEY), urllib.parse.quote_plus(url))) resp = FlickrDownloader.fetch(call) if resp["stat"] == "ok": logger.info(lambda: "Found " + resp["user"]["id"]) return True, "ok", resp["user"]["id"] else: logger.info(lambda: "Oops " + resp["message"]) return False, resp["message"], None except Exception as e: logger.exception(lambda: "Exception while checking Flickr user") return ( False, "Exception while checking user. Please run with -v and check log.", None)
def get_unsplash_api_url(self): return "https://api.unsplash.com/photos/random?count=30&client_id={}{}".format( Util.unxor(UnsplashDownloader.HASH, UnsplashDownloader.API_KEY), "&orientation=landscape" if self.get_variety().options.use_landscape_enabled else "", )