def generic_carousel(query=None, subject=None, work_id=None, _type=None, sorts=None, limit=None, timeout=None): memcache_key = 'home.ia_carousel_books' cached_ia_carousel_books = cache.memcache_memoize( get_ia_carousel_books, memcache_key, timeout=timeout or cache.DEFAULT_CACHE_LIFETIME) books = cached_ia_carousel_books(query=query, subject=subject, work_id=work_id, _type=_type, sorts=sorts, limit=limit) if not books: books = cached_ia_carousel_books.update(query=query, subject=subject, work_id=work_id, _type=_type, sorts=sorts, limit=limit)[0] return storify(books) if books else books
def readonline_carousel(id="read-carousel"): try: data = random_ebooks() if len(data) > 120: data = random.sample(data, 120) return render_template("books/carousel", storify(data), id=id) except Exception: return None
def render_returncart(limit=60, randomize=True): data = get_returncart(limit * 5) if randomize: random.shuffle(data) data = data[:limit] return render_template("books/carousel", storify(data), id="returncart_carousel")
def carousel_from_list(key, randomize=False, limit=60): id = key.split("/")[-1] + "_carousel" data = format_list_editions(key) if randomize: random.shuffle(data) data = data[:limit] return render_template("books/carousel", storify(data), id=id)
def generic_carousel(query=None, subject=None, work_id=None, _type=None, sorts=None, limit=None, timeout=None): memcache_key = 'home.ia_carousel_books' cached_ia_carousel_books = cache.memcache_memoize( get_ia_carousel_books, memcache_key, timeout=timeout or cache.DEFAULT_CACHE_LIFETIME) books = cached_ia_carousel_books( query=query, subject=subject, work_id=work_id, _type=_type, sorts=sorts, limit=limit) return storify(books)
def readonline_carousel(id="read-carousel"): try: data = random_ebooks() if len(data) > 120: data = random.sample(data, 120) return render_template("books/carousel", storify(data), id=id) except Exception: logger.error("Failed to compute data for readonline_carousel", exc_info=True) return None
def sponsor_me_carousel(): try: data = random_ebooks() if len(data) > 60: data = random.sample(data,60) return storify(data) except Exception: logger.error("Failed to compute data for sponsor_me_carousel", exc_info=True) return None
def render_returncart(limit=60, randomize=True): data = get_returncart(limit*5) # Remove all inlibrary books if we not in a participating library if not inlibrary.get_library(): data = [d for d in data if 'inlibrary_borrow_url' not in d] if randomize: random.shuffle(data) data = data[:limit] return render_template("books/carousel", storify(data), id="returncart_carousel")
def loans_carousel(loans=None, cssid="loans_carousel", pixel="CarouselLoans"): """Generates 'Your Loans' carousel on home page""" if not loans: return '' books = [] for loan in loans: loan_book = web.ctx.site.get(loan['book']) if loan_book: books.append(format_book_data(loan_book)) return render_template( 'books/carousel', storify(books), id=cssid, pixel=pixel, loans=True ) if books else ''
def process_change(change): change = Changeset.create(web.ctx.site, storify(change)) change.thing = page change.key = page.key change.revision = first(c.revision for c in change.changes if c.key == page.key) change.created = change.timestamp change.get = change.__dict__.get change.get_comment = lambda: get_comment(change) change.machine_comment = change.data.get("machine_comment") return change
def readonline_carousel(id="read-carousel"): """Return template code for books pulled from search engine. TODO: If problems, use stock list. """ try: data = random_ebooks() if len(data) > 120: data = random.sample(data, 120) return render_template("books/carousel", storify(data), id=id) except Exception: logger.error("Failed to compute data for readonline_carousel", exc_info=True) return None
def carousel_from_list(key, randomize=False, limit=60): css_id = key.split("/")[-1] + "_carousel" data = format_list_editions(key) if randomize: random.shuffle(data) data = data[:limit] add_checkedout_status(data) return render_template("books/carousel", storify(data), id=css_id, pixel="CarouselList")
def loans_carousel(loans=None, cssid="loans_carousel", pixel="CarouselLoans"): """Generates 'Your Loans' carousel on home page""" if not loans: return '' books = [] for loan in loans: loan_book = web.ctx.site.get(loan['book']) if loan_book: books.append(format_book_data(loan_book)) return render_template( 'books/carousel', storify(books), id=cssid, pixel=pixel, loans=True) if books else ''
def render_returncart(limit=60, randomize=True): data = get_returncart(limit * 5) # Remove all inlibrary books if we not in a participating library if not inlibrary.get_library(): data = [d for d in data if 'inlibrary_borrow_url' not in d] if randomize: random.shuffle(data) data = data[:limit] return render_template("books/carousel", storify(data), id="returncart_carousel")
def readonline_carousel(): """Return template code for books pulled from search engine. TODO: If problems, use stock list. """ try: data = random_ebooks() if len(data) > 60: data = random.sample(data, 60) return storify(data) except Exception: logger.error("Failed to compute data for readonline_carousel", exc_info=True) return None
def POST(self): global local_ip if local_ip is None: local_ip = socket.gethostbyname(socket.gethostname()) if web.ctx.ip != "127.0.0.1" and web.ctx.ip.rsplit(".", 1)[0] != local_ip.rsplit(".", 1)[0]: raise Forbidden("Allowed only in the local network.") data = simplejson.loads(web.data()) if not isinstance(data, list): data = [data] for d in data: thing = client.Thing(web.ctx.site, d['key'], client.storify(d)) client._run_hooks('on_new_version', thing)
def readonline_carousel(): """Return template code for books pulled from search engine. TODO: If problems, use stock list. """ try: data = random_ebooks() if len(data) > 30: data = lending.add_availability(random.sample(data, 30)) data = [d for d in data if d['availability']['is_readable']] return storify(data) except Exception: logger.error("Failed to compute data for readonline_carousel", exc_info=True) return None
def POST(self): global local_ip if local_ip is None: local_ip = socket.gethostbyname(socket.gethostname()) if web.ctx.ip != '127.0.0.1' and web.ctx.ip.rsplit('.', 1)[0] != local_ip.rsplit('.', 1)[0]: raise Forbidden('Allowed only in the local network.') data = json.loads(web.data()) if not isinstance(data, list): data = [data] for d in data: thing = client.Thing(web.ctx.site, d['key'], client.storify(d)) client._run_hooks('on_new_version', thing) return delegate.RawText('ok')
def readonline_carousel(cssid='classics_carousel', pixel="CarouselClassics"): """Return template code for books pulled from search engine. TODO: If problems, use stock list. """ try: data = random_ebooks() if len(data) > 120: data = random.sample(data, 120) return render_template("books/carousel", storify(data), id=cssid, pixel=pixel) except Exception: logger.error("Failed to compute data for readonline_carousel", exc_info=True) return None
def get_cached_sponsorable_editions(): return storify(cache.memcache_memoize( get_cachable_sponsorable_editions, "books.sponsorable_editions", timeout=dateutil.HOUR_SECS)())
def render_returncart(limit=60, randomize=True): data = get_returncart(limit*5) if randomize: random.shuffle(data) data = data[:limit] return render_template("books/carousel", storify(data), id="returncart_carousel")
def popular_carousel(available_limit=30, waitlist_limit=18, loan_check_batch_size=100): """Renders a carousel of popular editions, which are available for reading or borrowing, from user lists (borrowable or downloadable; excludes daisy only). Args: available_limit (int) - Load the popular carousel with how many items? (preferably divisible by 6; number of books shown per page) waitlist_limit (int)) - limit waitlist to how many books loan_check_batch_size (int) - Bulk submits this many archive.org itemids at a time to see if they are available to be borrowed (only considers waitinglist and bookreader borrows, no acs4) Selected Lists: popular.popular is a mapping of OL ids to archive.org identifiers for popular book editions coming from the following OL lists: /people/mekBot/lists/OL104041L is a manually curated collection of popular available books which was constructed by looking at goodreads (http://www.goodreads.com/list/show/1.Best_Books_Ever) Best Ever list. Because this list is more highly curated and has more overall recognizable and popular books, we prioritize drawing from this list (shuffled) first and then fallback to other lists as this one is depleted (i.e. all books become unavailable for checking out). /people/openlibrary/lists/OL104411L comes from the "top 2000+ most requested print disabled eBooks in California" displayed from the /lists page. Popular List Construction: https://github.com/internetarchive/openlibrary/pull/406#issuecomment-268090607 The expensive part about automatically checking the list seeds above for availability is there's no apparent easy way to get ocaids for a collection of editions at once. Thus, web.ctx.site.get needs be used on each Edition (which is expensive) before a batch of editions can be checked for availability. If we had the ocaids of list seeds upfront and could query them in bulk, this would eliminate the problem. As a work-around, we periodically create a flatfile cache of the above list.seed keys mapped ahead of time to their ocaids (i.e. `popular.popular`). For steps on (re)generating `popular.popular`, see: data.py popular.generate_popular_list() Ideally, solr should be used as cache instead of hard-coded as `popular.popular`. Returns: returns a tuple (available_books, waitlisted_books) """ available_books = [] waitlisted_books = [] seeds = popular.popular while seeds and len(available_books) < available_limit: batch = seeds[:loan_check_batch_size] seeds = seeds[loan_check_batch_size:] random.shuffle(batch) responses = lending.is_borrowable([seed[0] for seed in batch]) for seed in batch: ocaid, key = seed if len(available_books) == available_limit: continue book_data = web.ctx.site.get(key) if book_data: book = format_book_data(book_data) if ocaid not in responses: # If book is not accounted for, err on the side of inclusion available_books.append(book) elif 'status' in responses[ocaid]: if responses[ocaid]['status'] == 'available': available_books.append(book) elif len(waitlisted_books) < waitlist_limit: waitlisted_books.append(book) return storify(available_books), storify(waitlisted_books)
def popular_carousel(available_limit=30, waitlist_limit=18, loan_check_batch_size=100): """Renders a carousel of popular editions, which are available for reading or borrowing, from user lists (borrowable or downloadable; excludes daisy only). Args: available_limit (int) - Load the popular carousel with how many items? (preferably divisible by 6; number of books shown per page) waitlist_limit (int)) - limit waitlist to how many books loan_check_batch_size (int) - Bulk submits this many archive.org itemids at a time to see if they are available to be borrowed (only considers waitinglist and bookreader borrows, no acs4) Selected Lists: popular.popular is a mapping of OL ids to archive.org identifiers for popular book editions coming from the following OL lists: /people/mekBot/lists/OL104041L is a manually curated collection of popular available books which was constructed by looking at goodreads (http://www.goodreads.com/list/show/1.Best_Books_Ever) Best Ever list. Because this list is more highly curated and has more overall recognizable and popular books, we prioritize drawing from this list (shuffled) first and then fallback to other lists as this one is depleted (i.e. all books become unavailable for checking out). /people/openlibrary/lists/OL104411L comes from the "top 2000+ most requested print disabled eBooks in California" displayed from the /lists page. Popular List Construction: https://github.com/internetarchive/openlibrary/pull/406#issuecomment-268090607 The expensive part about automatically checking the list seeds above for availability is there's no apparent easy way to get ocaids for a collection of editions at once. Thus, web.ctx.site.get needs be used on each Edition (which is expensive) before a batch of editions can be checked for availability. If we had the ocaids of list seeds upfront and could query them in bulk, this would eliminate the problem. As a work-around, we periodically create a flatfile cache of the above list.seed keys mapped ahead of time to their ocaids (i.e. `popular.popular`). For steps on (re)generating `popular.popular`, see: data.py popular.generate_popular_list() Ideally, solr should be used as cache instead of hard-coded as `popular.popular`. Returns: returns a tuple (available_books, waitlisted_books) """ available_books = [] waitlisted_books = [] seeds = popular.popular while seeds and len(available_books) < available_limit: batch = seeds[:loan_check_batch_size] seeds = seeds[loan_check_batch_size:] random.shuffle(batch) responses = lending.is_borrowable([seed[0] for seed in batch]) for seed in batch: ocaid, key = seed if len(available_books) == available_limit: continue if ocaid not in responses: # If book is not accounted for, err on the side of inclusion available_books.append(format_book_data(web.ctx.site.get(key))) elif 'status' in responses[ocaid]: if responses[ocaid]['status'] == 'available': available_books.append( format_book_data(web.ctx.site.get(key))) elif len(waitlisted_books) < waitlist_limit: waitlisted_books.append( format_book_data(web.ctx.site.get(key))) return storify(available_books), storify(waitlisted_books)