Ejemplo n.º 1
0
    def download(self,
                 size=SIZE_XLARGE,
                 thumbnail=False,
                 wait=60,
                 asynchronous=False):
        """ Downloads this image to cache.
        
        Calling the download() method instantiates an asynchronous URLAccumulator
        that will fetch the image's URL from Flickr.
        A second process then downloads the file at the retrieved URL.
        
        Once it is done downloading, this image will have its path property
        set to an image file in the cache.
        
        """

        if thumbnail == True: size = SIZE_THUMBNAIL  # backwards compatibility
        self._size = disambiguate_size(size)
        self._wait = wait
        self._asynchronous = asynchronous

        url = "http://api.flickr.com/services/rest/?method=flickr.photos.getSizes"
        url += "&photo_id=" + self.id
        url += "&api_key=" + API_KEY
        URLAccumulator.__init__(self, url, wait, asynchronous, "flickr",
                                ".xml", 2)

        if not asynchronous:
            return self.path
Ejemplo n.º 2
0
    def __init__(self, q, page=0, wait=10, asynchronous=False, cached=True):

        """ Parses color themes from Adobe Kuler.

        Valid queries are "popular", "rating",
        a theme id as an integer, or a search string.

        """

        if cached:
            cache = "kuler"
        else:
            cache = None

        # Requests for search, popular, rating and id have different url.
        url  = "http://kuler.adobe.com/kuler/services/"
        self.id_string = url + "theme/get.cfm?themeId="
        if isinstance(q, int):
            url  = self.id_string + str(q)
        elif q in ["popular", "rating"]:
            url += "theme/getList.cfm?listType="+q
            url += "&startIndex="+str(page*30)+"&itemsPerPage=30"
        else:
            url += "search/get.cfm?searchQuery="+quote(q)
            url += "&startIndex="+str(page*30)+"&itemsPerPage=30"

        # Refresh cached results every day
        # for highest rating or popular requests.
        if q in ["popular", "rating"]:
            if cached and Cache(cache).age(url) > 0:
                Cache(cache).remove(url)

        URLAccumulator.__init__(self, url, wait, asynchronous, cache, type=".xml", throttle=3)
Ejemplo n.º 3
0
    def __init__(self, url, wait=10, asynchronous=False, cached=True):

        if cached:
            cache = "html"
        else:
            cache = None
        URLAccumulator.__init__(self, url, wait, asynchronous, cache)
Ejemplo n.º 4
0
    def __init__(self, q, start=1, count=100, wait=10, asynchronous=False, cached=True, 
                 sort=SORT_RELEVANCE, match=MATCH_ANY):

        try: q = q.encode("utf-8")
        except:
            pass

        if cached: 
            cache = "flickr"
        else:
            cache = None
        
        url  = "http://api.flickr.com/services/rest/?method="
        if q == "recent":
            url += "flickr.photos.getRecent"
        else:
            url += "flickr.photos.search"
        if isinstance(q, (list, tuple)):
            q = [quote_plus(q) for q in q]
            q = ",".join(q)
            url += "&tags=" + quote_plus(q)
            url += "&tag_mode=" + match
        else:
            url += "&text=" + quote_plus(q)
        url += "&page=" + str(start)
        url += "&per_page=" + str(count)
        url += "&sort=" + disambiguate_sort(sort)
        url += "&api_key=" + API_KEY
        
        URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".xml", 1)
Ejemplo n.º 5
0
    def download(self, size=SIZE_XLARGE, thumbnail=False, wait=60, asynchronous=False):
        
        """ Downloads this image to cache.
        
        Calling the download() method instantiates an asynchronous URLAccumulator
        that will fetch the image's URL from Flickr.
        A second process then downloads the file at the retrieved URL.
        
        Once it is done downloading, this image will have its path property
        set to an image file in the cache.
        
        """
        
        if thumbnail == True: size = SIZE_THUMBNAIL # backwards compatibility
        self._size = disambiguate_size(size)
        self._wait = wait
        self._asynchronous = asynchronous

        url  = "http://api.flickr.com/services/rest/?method=flickr.photos.getSizes"
        url += "&photo_id=" + self.id
        url += "&api_key=" + API_KEY
        URLAccumulator.__init__(self, url, wait, asynchronous, "flickr", ".xml", 2)

        if not asynchronous:
            return self.path
Ejemplo n.º 6
0
    def __init__(self, url, wait=10, asynchronous=False, cached=True):

        if cached:
            cache = "html"
        else:
            cache = None
        URLAccumulator.__init__(self, url, wait, asynchronous, cache)
Ejemplo n.º 7
0
 def __init__(self, q, page=0, wait=10, asynchronous=False, cached=True):
     
     """ Parses color themes from Adobe Kuler.
     
     Valid queries are "popular", "rating", 
     a theme id as an integer, or a search string.
     
     """
     
     if cached: 
         cache = "colr"
     else:
         cache = None
     
     url  = "http://www.colr.org/json/"
     if isinstance(q, int):
         url += "scheme/" + str(q)  
     elif q in ["latest", "random"]:
         url += "scheme/" + q
     else:
         url += "tag/" + quote(q)
     
     # Refresh cached results every day
     # for latest requests.
     if q == "latest":
         if cached and Cache(cache).age(url) > 0:
             Cache(cache).remove(url)
     if q == "random":
         Cache(cache).remove(url)
         
     URLAccumulator.__init__(self, url, wait, asynchronous, cache, type=".xml", throttle=3)
Ejemplo n.º 8
0
    def __init__(self, eq, wait=10, asynchronous=False):

        url = "http://www.forkosh.dreamhost.com/mimetex.cgi?" + quote(eq)
        URLAccumulator.__init__(self,
                                url,
                                wait,
                                asynchronous,
                                "mimetex",
                                type=".gif",
                                throttle=1)
Ejemplo n.º 9
0
 def __init__(self, eq, type="png", dpi=120, color="", wait=10, asynchronous=False):
     
     eq = "\\"+type+" "+eq
     eq = "\dpi{"+str(dpi)+"} " + eq
     if color: 
         eq = "\usepackage{color} \color{"+color+"} " + eq
     
     print eq
     url = "http://www.forkosh.dreamhost.com/mathtex.cgi?"+quote(eq)
     URLAccumulator.__init__(self, url, wait, asynchronous, "mathtex", type="."+type, throttle=1)
Ejemplo n.º 10
0
 def __init__(self, q, author=False, max=100, wait=10, asynchronous=False, cached=True):
     
     if cached: 
         cache = "morguefile"
     else:
         cache = None
 
     arg = "terms"
     if author == True: arg = "author"
     url = "http://morguefile.com/archive/archivexml.php"
     url += "?" + arg + "=" + quote_plus(q) + "&archive_max_image=" + str(max)
     URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".xml", 1)
Ejemplo n.º 11
0
 def __init__(self, url, wait=10, asynchronous=False, cached=True, none=""):
     
     self._feed = None
     self._none = none
     
     if cached: 
         cache = "newsfeed"
     else:
         cache = None
         
     # Refresh cached news results every day.
     if cached and Cache(cache).age(url) > 0:
         Cache(cache).remove(url)
         
     URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".xml")
Ejemplo n.º 12
0
    def __init__(self, url, wait=10, asynchronous=False, cached=True, none=""):

        self._feed = None
        self._none = none

        if cached:
            cache = "newsfeed"
        else:
            cache = None

        # Refresh cached news results every day.
        if cached and Cache(cache).age(url) > 0:
            Cache(cache).remove(url)

        URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".xml")
Ejemplo n.º 13
0
    def _done(self):

        done = URLAccumulator._done(self)
        if self._download:
            if self._download.done:
                # Step three: set the path to the cached image.
                self.path = self._download._cache.hash(self._download.url)
            return done and self._download.done
        else:
            return done
Ejemplo n.º 14
0
 def _done(self):
     
     done = URLAccumulator._done(self)
     if self._download:
         if self._download.done: 
             # Step three: set the path to the cached image.
             self.path = self._download._cache.hash(self._download.url)
         return done and self._download.done
     else:
         return done
Ejemplo n.º 15
0
    def __init__(self,
                 q,
                 start=1,
                 count=10,
                 service=YAHOO_SEARCH,
                 context=None,
                 wait=10,
                 asynchronous=False,
                 cached=True):
        """ Searches Yahoo for the given query.
    
        By default, return cached results whenever possible.
        Otherwise, go online and update the local cache.
        The number of results is limited to count and starts at the given index.
    
        The returned results depend on the service used: 
        web pages, images, news, spelling suggestion or contextual links.
    
        """

        self.query = q
        self.service = service

        if cached:
            cache = "yahoo"
        else:
            cache = None

        url = "http://search.yahooapis.com/"
        if service == YAHOO_SEARCH and context == None:
            url += "WebSearchService/V1/webSearch?"
        if service == YAHOO_SEARCH and context != None:
            url += "WebSearchService/V1/contextSearch?"
        if service == YAHOO_IMAGES: url += "ImageSearchService/V1/imageSearch?"
        if service == YAHOO_NEWS: url += "NewsSearchService/V1/newsSearch?"
        if service == YAHOO_SPELLING:
            url += "WebSearchService/V1/spellingSuggestion?"
        arg = urllib.urlencode(
            (("appid", YAHOO_ID), ("query", q), ("start", start),
             ("results", count), ("context", unicode(context))))

        url += arg
        URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".xml")
Ejemplo n.º 16
0
    def __init__(self,
                 q,
                 start=0,
                 service=GOOGLE_SEARCH,
                 size="",
                 wait=10,
                 asynchronous=False,
                 cached=True):
        """ Searches Google for the given query.
    
        By default, return cached results whenever possible.
        Otherwise, go online and update the local cache.
        The number of results is limited to 8 and starts at the given index.
        You can only return up to 32 results.
    
        The returned results depend on the service used: 
        web pages, images, news or blogs.
    
        """

        self.query = q
        self.service = service

        if cached:
            cache = "google"
        else:
            cache = None
        url = "http://search.yahooapis.com/"
        url = "http://ajax.googleapis.com/ajax/services/search/"
        if service == GOOGLE_SEARCH: url += "web?"
        if service == GOOGLE_IMAGES:
            url += "images?"
        if service == GOOGLE_NEWS: url += "news?"
        if service == GOOGLE_BLOGS: url += "blogs?"
        arg = urllib.urlencode(
            (("v", 1.0), ("q", q), ("start", start), ("rsz", "large"),
             ("key", GOOGLE_ID), ("imgsz", disambiguate_size(size))))

        url += arg
        URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".txt")
Ejemplo n.º 17
0
    def __init__(
        self, q, start=1, count=10, service=YAHOO_SEARCH, context=None, wait=10, asynchronous=False, cached=True
    ):

        """ Searches Yahoo for the given query.
    
        By default, return cached results whenever possible.
        Otherwise, go online and update the local cache.
        The number of results is limited to count and starts at the given index.
    
        The returned results depend on the service used: 
        web pages, images, news, spelling suggestion or contextual links.
    
        """

        self.query = q
        self.service = service

        if cached:
            cache = "yahoo"
        else:
            cache = None

        url = "http://search.yahooapis.com/"
        if service == YAHOO_SEARCH and context == None:
            url += "WebSearchService/V1/webSearch?"
        if service == YAHOO_SEARCH and context != None:
            url += "WebSearchService/V1/contextSearch?"
        if service == YAHOO_IMAGES:
            url += "ImageSearchService/V1/imageSearch?"
        if service == YAHOO_NEWS:
            url += "NewsSearchService/V1/newsSearch?"
        if service == YAHOO_SPELLING:
            url += "WebSearchService/V1/spellingSuggestion?"
        arg = urllib.urlencode(
            (("appid", YAHOO_ID), ("query", q), ("start", start), ("results", count), ("context", unicode(context)))
        )

        url += arg
        URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".xml")
Ejemplo n.º 18
0
    def __init__(self,
                 q,
                 start=1,
                 count=100,
                 wait=10,
                 asynchronous=False,
                 cached=True,
                 sort=SORT_RELEVANCE,
                 match=MATCH_ANY):

        try:
            q = q.encode("utf-8")
        except:
            pass

        if cached:
            cache = "flickr"
        else:
            cache = None

        url = "http://api.flickr.com/services/rest/?method="
        if q == "recent":
            url += "flickr.photos.getRecent"
        else:
            url += "flickr.photos.search"
        if isinstance(q, (list, tuple)):
            q = [quote_plus(q) for q in q]
            q = ",".join(q)
            url += "&tags=" + quote_plus(q)
            url += "&tag_mode=" + match
        else:
            url += "&text=" + quote_plus(q)
        url += "&page=" + str(start)
        url += "&per_page=" + str(count)
        url += "&sort=" + disambiguate_sort(sort)
        url += "&api_key=" + API_KEY

        URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".xml",
                                1)
Ejemplo n.º 19
0
    def __init__(self,
                 eq,
                 type="png",
                 dpi=120,
                 color="",
                 wait=10,
                 asynchronous=False):

        eq = "\\" + type + " " + eq
        eq = "\dpi{" + str(dpi) + "} " + eq
        if color:
            eq = "\usepackage{color} \color{" + color + "} " + eq

        print eq
        url = "http://www.forkosh.dreamhost.com/mathtex.cgi?" + quote(eq)
        URLAccumulator.__init__(self,
                                url,
                                wait,
                                asynchronous,
                                "mathtex",
                                type="." + type,
                                throttle=1)
Ejemplo n.º 20
0
    def __init__(self, q, page=0, wait=10, asynchronous=False, cached=True):
        """ Parses color themes from Adobe Kuler.
        
        Valid queries are "popular", "rating", 
        a theme id as an integer, or a search string.
        
        """

        if cached:
            cache = "kuler"
        else:
            cache = None

        # Requests for search, popular, rating and id have different url.
        url = "http://kuler.adobe.com/kuler/services/"
        self.id_string = url + "theme/get.cfm?themeId="
        if isinstance(q, int):
            url = self.id_string + str(q)
        elif q in ["popular", "rating"]:
            url += "theme/getList.cfm?listType=" + q
            url += "&startIndex=" + str(page * 30) + "&itemsPerPage=30"
        else:
            url += "search/get.cfm?searchQuery=" + quote(q)
            url += "&startIndex=" + str(page * 30) + "&itemsPerPage=30"

        # Refresh cached results every day
        # for highest rating or popular requests.
        if q in ["popular", "rating"]:
            if cached and Cache(cache).age(url) > 0:
                Cache(cache).remove(url)

        URLAccumulator.__init__(self,
                                url,
                                wait,
                                asynchronous,
                                cache,
                                type=".xml",
                                throttle=3)
Ejemplo n.º 21
0
    def __init__(self, q, start=0, service=GOOGLE_SEARCH, size="",
                 wait=10, asynchronous=False, cached=True):

        """ Searches Google for the given query.

        By default, return cached results whenever possible.
        Otherwise, go online and update the local cache.
        The number of results is limited to 8 and starts at the given index.
        You can only return up to 32 results.

        The returned results depend on the service used:
        web pages, images, news or blogs.

        """

        self.query = q
        self.service = service

        if cached:
            cache = "google"
        else:
            cache = None
        url = "http://search.yahooapis.com/"
        url = "http://ajax.googleapis.com/ajax/services/search/"
        if service == GOOGLE_SEARCH : url += "web?"
        if service == GOOGLE_IMAGES :
            url += "images?"
        if service == GOOGLE_NEWS   : url += "news?"
        if service == GOOGLE_BLOGS  : url += "blogs?"
        arg = urllib.urlencode((("v", 1.0),
                                ("q", q),
                                ("start", start),
                                ("rsz", "large"),
                                ("key", GOOGLE_ID),
                                ("imgsz", disambiguate_size(size))))

        url += arg
        URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".txt")
Ejemplo n.º 22
0
 def download(self, size=SIZE_LARGE, thumbnail=False, wait=60, asynchronous=False):
     
     """ Downloads this image to cache.
     
     Calling the download() method instantiates an asynchronous URLAccumulator.
     Once it is done downloading, this image will have its path property
     set to an image file in the cache.
     
     """
     
     if thumbnail == True: size = SIZE_THUMBNAIL # backwards compatibility
     self._size = disambiguate_size(size)
     if self._size != SIZE_THUMBNAIL:
         url = self.url.replace("thumbnails", "lowrez")
     else:
         url = self.url
     
     cache = "morguefile"
     extension = os.path.basename(self.url)[-4:]
     URLAccumulator.__init__(self, url, wait, asynchronous, cache, extension, 2)
     
     if not asynchronous:
         return self.path
Ejemplo n.º 23
0
    def __init__(self, q, page=0, wait=10, asynchronous=False, cached=True):
        """ Parses color themes from Adobe Kuler.
        
        Valid queries are "popular", "rating", 
        a theme id as an integer, or a search string.
        
        """

        if cached:
            cache = "colr"
        else:
            cache = None

        url = "http://www.colr.org/json/"
        if isinstance(q, int):
            url += "scheme/" + str(q)
        elif q in ["latest", "random"]:
            url += "scheme/" + q
        else:
            url += "tag/" + quote(q)

        # Refresh cached results every day
        # for latest requests.
        if q == "latest":
            if cached and Cache(cache).age(url) > 0:
                Cache(cache).remove(url)
        if q == "random":
            Cache(cache).remove(url)

        URLAccumulator.__init__(self,
                                url,
                                wait,
                                asynchronous,
                                cache,
                                type=".xml",
                                throttle=3)
Ejemplo n.º 24
0
    def load(self, data):

        # Step one: fetch the image location from the Flickr API.
        if self.url.startswith("http://api.flickr.com"):
            dom = parseString(data)
            for e in dom.getElementsByTagName("size"):
                self.url = e.getAttribute("source")
                label = e.getAttribute("label")
                # We pick the requested size.
                if label == self._size: break

            # Step two: we know where the image is located,
            # now start downloading it.
            extension = os.path.basename(self.url)[-4:]
            self._download = URLAccumulator(self.url, self._wait,
                                            self._asynchronous, "flickr",
                                            extension, 2)
Ejemplo n.º 25
0
 def __init__(self, eq, wait=10, asynchronous=False):
     
     url = "http://www.forkosh.dreamhost.com/mimetex.cgi?"+quote(eq)
     URLAccumulator.__init__(self, url, wait, asynchronous, "mimetex", type=".gif", throttle=1)