Пример #1
0
    """
    if 'env' not in web.ctx:
        delegate.fakeload()
    
    q = {"bot": False, "limit": 100}
    changes = web.ctx.site.recentchanges(q)
    
    def render(c):
        t = get_template("recentchanges/" + c.kind + "/message") or get_template("recentchanges/default/message")
        return t(c)

    messages = [render(c) for c in changes]
    messages = [m for m in messages if str(m.get("ignore", "false")).lower() != "true"]
    return messages
    
_get_recent_changes = web.memoize(_get_recent_changes, expires=5*60, background=True)
_get_recent_changes2 = web.memoize(_get_recent_changes2, expires=5*60, background=True)

@public
def get_random_recent_changes(n):
    if "recentchanges_v2" in web.ctx.get("features", []):
        changes = _get_recent_changes2()
    else:
        changes = _get_recent_changes()
    
    if len(changes) > n:
        return random.sample(changes, n)  
    else:
        return changes
        
def _get_blog_feeds():
Пример #2
0
            (c.author and c.author.type.key == '/type/delete'))

    def render(c):
        t = get_template("recentchanges/" + c.kind + "/message"
                         ) or get_template("recentchanges/default/message")
        return t(c)

    messages = [render(c) for c in changes if not is_ignored(c)]
    messages = [
        m for m in messages if str(m.get("ignore", "false")).lower() != "true"
    ]
    return messages


_get_recent_changes = web.memoize(_get_recent_changes,
                                  expires=5 * 60,
                                  background=True)
_get_recent_changes2 = web.memoize(_get_recent_changes2,
                                   expires=5 * 60,
                                   background=True)


@public
def get_random_recent_changes(n):
    if "recentchanges_v2" in web.ctx.get("features", []):
        changes = _get_recent_changes2()
    else:
        changes = _get_recent_changes()

    _changes = random.sample(changes, n) if len(changes) > n else changes
    for i, change in enumerate(_changes):
Пример #3
0
    return d

def _get_metadata(itemid):
    """Returns metadata by querying the archive.org metadata API.
    """
    url = "http://www.archive.org/metadata/%s" % itemid
    try:
        stats.begin("archive.org", url=url)
        text = urllib2.urlopen(url).read()
        stats.end()
        return simplejson.loads(text)
    except (IOError, ValueError):
        return None

# cache the results in memcache for a minute
_get_metadata = web.memoize(_get_metadata, expires=60)

def locate_item(itemid):
    """Returns (hostname, path) for the item.
    """
    d = _get_metadata(itemid)
    return d.get('server'), d.get('dir')

def edition_from_item_metadata(itemid, metadata):
    """Converts the item metadata into a form suitable to be used as edition
    in Open Library.

    This is used to show fake editon pages like '/books/ia:foo00bar' when
    that item is not yet imported into Open Library.
    """
    if ItemEdition.is_valid_item(itemid, metadata):
Пример #4
0
import simplejson
import urllib2

def wget(url):
    return urllib2.urlopen(url).read()

def get_ol_dumps():
    """Get list of all archive.org items in the in the ol_exports collection uploaded of archive.org staff."""
    url = 'http://www.archive.org/advancedsearch.php?q=(ol_dump_*+OR+ol_cdump_*)+AND+collection:ol_exports&fl[]=identifier&output=json&rows=1000'
    
    d = simplejson.loads(wget(url))
    return sorted(doc['identifier'] for doc in d['response']['docs'])
    
# cache the result for half an hour
get_ol_dumps = web.memoize(get_ol_dumps, 30*60, background=True)
#public(get_ol_dumps)

def download_url(item, filename):
    return "http://www.archive.org/download/%s/%s" % (item, filename)

class ol_dump_latest(delegate.page):
    path = "/data/ol_dump(|_authors|_editions|_works|_deworks)_latest.txt.gz"
    def GET(self, prefix):
        items = [item for item in get_ol_dumps() if item.startswith("ol_dump")]
        if not items:
            raise web.notfound()
            
        item = items[-1]
        filename = item.replace("dump", "dump" + prefix) + ".txt.gz"
        raise web.found(download_url(item, filename))
Пример #5
0
IA_BASE_URL = config.get('ia_base_url')


def wget(url):
    return urllib.request.urlopen(url).read()


def get_ol_dumps():
    """Get list of all archive.org items in the ol_exports collection uploaded by archive.org staff."""
    url = IA_BASE_URL + '/advancedsearch.php?q=(ol_dump+OR+ol_cdump)+AND+collection:ol_exports&fl[]=identifier&output=json&rows=1000'
    d = simplejson.loads(wget(url))
    return sorted(doc['identifier'] for doc in d['response']['docs'])


# cache the result for half an hour
get_ol_dumps = web.memoize(get_ol_dumps, 30 * 60, background=True)
#public(get_ol_dumps)


def download_url(item, filename):
    return "%s/download/%s/%s" % (IA_BASE_URL, item, filename)


class ol_dump_latest(delegate.page):
    path = "/data/ol_dump(|_authors|_editions|_works|_deworks)_latest.txt.gz"

    def GET(self, prefix):
        items = [item for item in get_ol_dumps() if item.startswith("ol_dump")]
        if not items:
            raise web.notfound()
Пример #6
0
    result = result[:50]

    def process_thing(thing):
        t = web.storage()
        for k in ["key", "title", "name", "displayname"]:
            t[k] = thing[k]
        t['type'] = web.storage(key=thing.type.key)
        return t
    
    for r in result:
        r.author = r.author and process_thing(r.author)
        r.thing = process_thing(site.get(r.key, r.revision))
        
    return result
    
_get_recent_changes = web.memoize(_get_recent_changes, expires=5*60, background=True)

@public
def get_random_recent_changes(n):
    changes = _get_recent_changes()
    return random.sample(changes, n)    

@public
def sprintf(s, *a, **kw):
    args = kw or a
    if args:
        return s % args
    else:
        return s
        
def _get_blog_feeds():