コード例 #1
0
 def get_image_url(origin_url):
     photo_id = FlickrDownloader.get_photo_id(origin_url)
     call = 'https://api.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=%s&photo_id=%s&format=json&nojsoncallback=1' % \
            (API_KEY, photo_id)
     resp = Util.fetch_json(call)
     s = max(resp['sizes']['size'], key=lambda size: int(size['width']))
     return s['source']
コード例 #2
0
    def fill_queue(self):
        logger.info(lambda: "Filling Bing queue from " + self.location)

        s = Util.fetch_json(BingDownloader.BING_JSON_URL)
        for item in s['images']:
            try:
                image_url = 'https://www.bing.com' + item['url']
                filename = item['url'].split('/')[-1]
                name = filename[0:filename.find('_EN')]
                src_url = 'https://www.bing.com/gallery/#images/%s' % name
                try:
                    date = datetime.strptime(item['startdate'], '%Y%m%d').strftime('%Y-%m-%d')
                except:
                    date = item['startdate']
                extra_metadata = {
                    'sourceType': 'bing',
                    'sfwRating': 100,
                    'headline': 'Bing Photo of the Day, %s' % date,
                    'description': item['copyright'],
                }
                self.queue.append((src_url, image_url, extra_metadata))
            except:
                logger.exception(lambda: "Could not process an item in the Bing json result")

        random.shuffle(self.queue)
        logger.info(lambda: "Bing queue populated with %d URLs" % len(self.queue))
コード例 #3
0
 def ajax(self, url, data, error_msg_handler):
     try:
         return Util.fetch_json(url, data)
     except requests.exceptions.HTTPError, e:
         logger.exception(lambda: 'HTTPError for ' + url)
         error_msg_handler(_('Oops, server returned error (%s)') % e.response.status_code)
         raise
コード例 #4
0
ファイル: BingDownloader.py プロジェクト: GLolol/variety-deb
    def fill_queue(self):
        logger.info(lambda: "Filling Bing queue from " + self.location)

        s = Util.fetch_json(BingDownloader.BING_JSON_URL)
        for item in s['images']:
            try:
                image_url = 'https://www.bing.com' + item['url']
                filename = item['url'].split('/')[-1]
                name = filename[0:filename.find('_EN')]
                src_url = 'https://www.bing.com/gallery/#images/%s' % name
                try:
                    date = datetime.strptime(item['startdate'], '%Y%m%d').strftime('%Y-%m-%d')
                except:
                    date = item['startdate']
                extra_metadata = {
                    'sourceType': 'bing',
                    'sfwRating': 100,
                    'headline': 'Bing Photo of the Day, %s' % date,
                    'description': item['copyright'],
                }
                self.queue.append((src_url, image_url, extra_metadata))
            except:
                logger.exception(lambda: "Could not process an item in the Bing json result")

        random.shuffle(self.queue)
        logger.info(lambda: "Bing queue populated with %d URLs" % len(self.queue))
コード例 #5
0
 def get_image_url(origin_url):
     photo_id = FlickrDownloader.get_photo_id(origin_url)
     call = 'https://api.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=%s&photo_id=%s&format=json&nojsoncallback=1' % \
            (API_KEY, photo_id)
     resp = Util.fetch_json(call)
     s = max(resp['sizes']['size'], key=lambda size: int(size['width']))
     return s['source']
コード例 #6
0
    def fill_queue(self):
        logger.info(lambda: "Reddit URL: " + self.config)

        queue = []
        json_url = RedditDownloader.build_json_url(self.config)
        s = Util.fetch_json(json_url)
        for item in s["data"]["children"]:
            try:
                data = item["data"]
                image_url = data["url"]
                if re.match(r"^http(s)?://imgur\.com/\w+$", image_url):
                    image_url = image_url.replace("://", "://i.") + ".jpg"

                if image_url.lower().endswith((".jpg", ".jpeg", ".png")):
                    src_url = "https://www.reddit.com" + data["permalink"]
                    extra_metadata = {"sourceType": "reddit"}
                    if data["over_18"]:
                        extra_metadata["sfwRating"] = 0
                        if self.is_safe_mode_enabled():
                            continue
                    queue.append((src_url, image_url, extra_metadata))
            except Exception:
                logger.exception(
                    lambda:
                    "Could not process an item in the Reddit json result")

        random.shuffle(queue)
        return queue
コード例 #7
0
    def download_queue_item(self, queue_item):
        if not self.api_url:
            return self.legacy_downloader.download_queue_item(queue_item)

        wallpaper_url = queue_item["url"]
        logger.info(lambda: "Wallpaper URL: " + wallpaper_url)

        src_url = queue_item["path"]
        logger.info(lambda: "Image src URL: " + src_url)

        extra_metadata = {}
        try:
            wallpaper_info = Util.fetch_json(
                WALLPAPER_INFO_URL % urllib.parse.quote(queue_item["id"])
            )
            extra_metadata["keywords"] = [tag["name"] for tag in wallpaper_info["data"]["tags"]]
        except:
            pass

        try:
            purity = queue_item["purity"]
            sfw_rating = {"sfw": 100, "sketchy": 50, "nsfw": 0}[purity]
            extra_metadata["sfwRating"] = sfw_rating

            if self.is_safe_mode_enabled() and sfw_rating < 100:
                logger.info(
                    lambda: "Skipping non-safe download from Wallhaven. "
                    "Is the source %s suitable for Safe mode?" % self.config
                )
                return None
        except:
            pass

        return self.save_locally(wallpaper_url, src_url, extra_metadata=extra_metadata)
コード例 #8
0
    def fill_queue(self):
        queue = []
        s = Util.fetch_json(BingDownloader.BING_JSON_URL)
        for item in s["images"]:
            try:
                if not item["wp"]:
                    # not marked as a wallpaper
                    continue

                image_url = "https://www.bing.com" + item["url"]
                filename = item["url"].split("/")[-1]
                name = filename[0 : filename.find("_EN")]
                src_url = "https://www.bing.com/gallery/#images/%s" % name
                try:
                    date = datetime.strptime(item["startdate"], "%Y%m%d").strftime("%Y-%m-%d")
                except:
                    date = item["startdate"]
                extra_metadata = {
                    "sourceType": "bing",
                    "sfwRating": 100,
                    "headline": "Bing Photo of the Day, %s" % date,
                    "description": item["copyright"],
                }
                queue.append((src_url, image_url, extra_metadata))
            except:
                logger.exception(lambda: "Could not process an item in the Bing json result")

        random.shuffle(queue)
        return queue
コード例 #9
0
    def fill_queue(self):
        logger.info(lambda: "Reddit URL: " + self.location)

        json_url = RedditDownloader.build_json_url(self.location)
        s = Util.fetch_json(json_url)
        for item in s['data']['children']:
            try:
                data = item['data']
                image_url = data['url']
                if re.match(r'^http(s)?://imgur\.com/\w+$', image_url):
                    image_url = image_url.replace('://', '://i.') + '.jpg'

                if image_url.lower().endswith(('.jpg', '.jpeg', '.png')):
                    src_url = 'https://www.reddit.com' + data['permalink']
                    extra_metadata = {'sourceType': 'reddit'}
                    if data['over_18']:
                        extra_metadata['sfwRating'] = 0
                        if self.parent and self.parent.options.safe_mode:
                            continue
                    self.queue.append((src_url, image_url, extra_metadata))
            except Exception:
                logger.exception(lambda: "Could not process an item in the Reddit json result")

        random.shuffle(self.queue)
        logger.info(lambda: "Reddit queue populated with %d URLs" % len(self.queue))
コード例 #10
0
ファイル: RedditDownloader.py プロジェクト: yasarciv/variety
    def fill_queue(self):
        logger.info(lambda: "Reddit URL: " + self.location)

        json_url = RedditDownloader.build_json_url(self.location)
        s = Util.fetch_json(json_url)
        for item in s['data']['children']:
            try:
                data = item['data']
                image_url = data['url']
                if re.match(r'^http(s)?://imgur\.com/\w+$', image_url):
                    image_url = image_url.replace('://', '://i.') + '.jpg'

                if image_url.lower().endswith(('.jpg', '.jpeg', '.png')):
                    src_url = 'https://www.reddit.com' + data['permalink']
                    extra_metadata = {'sourceType': 'reddit'}
                    if data['over_18']:
                        extra_metadata['sfwRating'] = 0
                        if self.parent and self.parent.options.safe_mode:
                            continue
                    self.queue.append((src_url, image_url, extra_metadata))
            except Exception:
                logger.exception(
                    lambda:
                    "Could not process an item in the Reddit json result")

        random.shuffle(self.queue)
        logger.info(
            lambda: "Reddit queue populated with %d URLs" % len(self.queue))
コード例 #11
0
 def get_image_url(origin_url):
     photo_id = FlickrDownloader.get_photo_id(origin_url)
     call = (
         "https://api.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=%s&photo_id=%s&format=json&nojsoncallback=1"
         % (Util.unxor(HASH, API_KEY), photo_id))
     resp = Util.fetch_json(call)
     s = max(resp["sizes"]["size"], key=lambda size: int(size["width"]))
     return s["source"]
コード例 #12
0
    def fill_queue(self):
        response = Util.fetch_json("https://api.desktoppr.co/1/wallpapers/random")

        if response["response"]["review_state"] != "safe":
            logger.info(lambda: "Non-safe image returned by Desktoppr, skipping")
            return None

        origin_url = response["response"]["url"]
        image_url = response["response"]["image"]["url"]
        return [QueueItem(origin_url, image_url, {})]
コード例 #13
0
    def search(self, page=None):
        if not self.api_url:
            return self.legacy_downloader.search(page)

        url = self.api_url
        if page:
            url = url + ("&" if "?" in self.api_url else "?") + "page=" + str(page)
        logger.info(lambda: "Performing wallhaven search: url=%s" % url)
        response = Util.fetch_json(url)
        count = response["meta"]["total"]
        return response, count
コード例 #14
0
 def validate(self, config):
     try:
         url = self.UnsplashConfigurableDownloader(self, config).get_unsplash_api_url()
         data = Util.fetch_json(url)
         valid = "errors" not in data
         return config, None if valid else _("No images found")
     except UnsupportedConfig:
         return config, _("We do not support this type of URL")
     except Exception as e:
         if isinstance(e, HTTPError) and e.response.status_code == 404:
             return config, _("No images found")
         else:
             return config, _("Oops, this didn't work. Is the remote service up?")
コード例 #15
0
    def download_one(self):
        logger.info(lambda: "Downloading a random image from desktoppr.co")

        response = Util.fetch_json(self.location)

        if response["response"]["review_state"] != "safe":
            logger.info(lambda: "Non-safe image returned by Desktoppr, skipping")
            return None

        origin_url = response["response"]["url"]
        image_url = response["response"]["image"]["url"]

        return self.save_locally(origin_url, image_url)
コード例 #16
0
 def get_extra_metadata(origin_url):
     photo_id = FlickrDownloader.get_photo_id(origin_url)
     call = 'https://api.flickr.com/services/rest/?method=flickr.photos.getInfo&api_key=%s&photo_id=%s&format=json&nojsoncallback=1' % \
            (API_KEY, photo_id)
     resp = Util.fetch_json(call)
     ph = resp['photo']
     extra_meta = {
         'headline': ph['title']['_content'],
         'description': ph['description']['_content'],
         'author': ph['owner']['realname'],
         'authorURL': 'https://www.flickr.com/photos/%s' % ph['owner']['nsid'],
         'keywords': [x['_content'] for x in ph['tags']['tag']],
     }
     return extra_meta
コード例 #17
0
 def get_extra_metadata(origin_url):
     photo_id = FlickrDownloader.get_photo_id(origin_url)
     call = 'https://api.flickr.com/services/rest/?method=flickr.photos.getInfo&api_key=%s&photo_id=%s&format=json&nojsoncallback=1' % \
            (API_KEY, photo_id)
     resp = Util.fetch_json(call)
     ph = resp['photo']
     extra_meta = {
         'headline': ph['title']['_content'],
         'description': ph['description']['_content'],
         'author': ph['owner']['realname'],
         'authorURL': 'https://www.flickr.com/photos/%s' % ph['owner']['nsid'],
         'keywords': [x['_content'] for x in ph['tags']['tag']],
     }
     return extra_meta
コード例 #18
0
 def get_extra_metadata(origin_url):
     photo_id = FlickrDownloader.get_photo_id(origin_url)
     call = (
         "https://api.flickr.com/services/rest/?method=flickr.photos.getInfo&api_key=%s&photo_id=%s&format=json&nojsoncallback=1"
         % (Util.unxor(HASH, API_KEY), photo_id))
     resp = Util.fetch_json(call)
     ph = resp["photo"]
     extra_meta = {
         "headline": ph["title"]["_content"],
         "description": ph["description"]["_content"],
         "author": ph["owner"]["realname"],
         "authorURL":
         "https://www.flickr.com/photos/%s" % ph["owner"]["nsid"],
         "keywords": [x["_content"] for x in ph["tags"]["tag"]],
     }
     return extra_meta
コード例 #19
0
    def get_random(self):
        dict_dict = Util.fetch_json(
            "https://api.urbandictionary.com/v0/random")

        def _clean(s):
            return s.strip().replace("[", "").replace("]", "")

        result = []
        for entry in dict_dict["list"]:
            word = entry["word"]
            definition = _clean(entry["definition"])
            example = _clean(entry["example"])
            quote = ('"{}"'.format(word) + "\n\n" + definition +
                     ("\n\nExample:\n{}".format(example) if example else ""))

            result.append({
                "quote": quote,
                "author": entry["author"],
                "sourceName": "Urban Dictionary",
                "link": entry["permalink"],
            })

        return result
コード例 #20
0
 def fill_queue(self):
     queue = Util.fetch_json(DATA_URL)
     images = queue["result"]["pageContext"]["node"]["data"]["content"][
         "images"]
     return images
コード例 #21
0
 def fetch(call):
     logger.info(lambda: "Making flickr API call: " + call)
     return Util.fetch_json(call)
コード例 #22
0
 def fill_queue(self):
     queue = Util.fetch_json(DATA_URL)
     random.shuffle(queue)
     return queue
コード例 #23
0
 def fill_queue(self):
     manifest = Util.fetch_json(MANIFEST_URL)
     queue = manifest["wallpaper_list"]
     self.tags = manifest["tags"]
     random.shuffle(queue)
     return queue
コード例 #24
0
 def fetch(call):
     logger.info(lambda: "Making flickr API call: " + call)
     return Util.fetch_json(call)
コード例 #25
0
 def search(self, _from, _to):
     url = PanoramioDownloader.API_URL % (_from, _to, self.minx, self.miny, self.maxx, self.maxy)
     logger.info(lambda: "Performing Panoramio API call: url=%s" % url)
     return Util.fetch_json(url)