def cdb_copy_to_library(ctx, rd, target_library_id, library_id): db_src = get_db(ctx, rd, library_id) db_dest = get_db(ctx, rd, target_library_id) if ctx.restriction_for(rd, db_src) or ctx.restriction_for(rd, db_dest): raise HTTPForbidden( 'Cannot use the copy to library interface with a user who has per library restrictions' ) data = load_payload_data(rd) try: book_ids = {int(x) for x in data['book_ids']} move_books = bool(data.get('move', False)) preserve_date = bool(data.get('preserve_date', True)) duplicate_action = data.get('duplicate_action') or 'add' automerge_action = data.get('automerge_action') or 'overwrite' except Exception: raise HTTPBadRequest( 'Invalid encoded data, must be of the form: {book_ids: [id1, id2, ..]}' ) if duplicate_action not in ('add', 'add_formats_to_existing', 'ignore'): raise HTTPBadRequest( 'duplicate_action must be one of: add, add_formats_to_existing, ignore' ) if automerge_action not in ('overwrite', 'ignore', 'new record'): raise HTTPBadRequest( 'automerge_action must be one of: overwrite, ignore, new record') response = {} identical_books_data = None if duplicate_action != 'add': identical_books_data = db_dest.data_for_find_identical_books() to_remove = set() from calibre.db.copy_to_library import copy_one_book for book_id in book_ids: try: rdata = copy_one_book(book_id, db_src, db_dest, duplicate_action=duplicate_action, automerge_action=automerge_action, preserve_uuid=move_books, preserve_date=preserve_date, identical_books_data=identical_books_data) if move_books: to_remove.add(book_id) response[book_id] = {'ok': True, 'payload': rdata} except Exception: import traceback response[book_id] = { 'ok': False, 'payload': traceback.format_exc() } if to_remove: db_src.remove_books(to_remove, permanent=True) return response
def cdb_set_fields(ctx, rd, book_id, library_id): db = get_db(ctx, rd, library_id) if ctx.restriction_for(rd, db): raise HTTPForbidden('Cannot use the set fields interface with a user who has per library restrictions') data = load_payload_data(rd) try: changes, loaded_book_ids = data['changes'], frozenset(map(int, data.get('loaded_book_ids', ()))) all_dirtied = bool(data.get('all_dirtied')) if not isinstance(changes, dict): raise TypeError('changes must be a dict') except Exception: raise HTTPBadRequest( '''Data must be of the form {'changes': {'title': 'New Title', ...}, 'loaded_book_ids':[book_id1, book_id2, ...]'}''') dirtied = set() cdata = changes.pop('cover', False) if cdata is not False: if cdata is not None: try: cdata = from_base64_bytes(cdata.split(',', 1)[-1]) except Exception: raise HTTPBadRequest('Cover data is not valid base64 encoded data') try: fmt = what(None, cdata) except Exception: fmt = None if fmt not in ('jpeg', 'png'): raise HTTPBadRequest('Cover data must be either JPEG or PNG') dirtied |= db.set_cover({book_id: cdata}) for field, value in iteritems(changes): dirtied |= db.set_field(field, {book_id: value}) ctx.notify_changes(db.backend.library_path, metadata(dirtied)) all_ids = dirtied if all_dirtied else (dirtied & loaded_book_ids) all_ids |= {book_id} return {bid: book_as_json(db, bid) for bid in all_ids}
def cdb_set_fields(ctx, rd, book_id, library_id): db = get_db(ctx, rd, library_id) if ctx.restriction_for(rd, db): raise HTTPForbidden('Cannot use the set fields interface with a user who has per library restrictions') data = load_payload_data(rd) try: changes, loaded_book_ids = data['changes'], frozenset(map(int, data.get('loaded_book_ids', ()))) all_dirtied = bool(data.get('all_dirtied')) if not isinstance(changes, dict): raise TypeError('changes must be a dict') except Exception: raise HTTPBadRequest( '''Data must be of the form {'changes': {'title': 'New Title', ...}, 'loaded_book_ids':[book_id1, book_id2, ...]'}''') dirtied = set() cdata = changes.pop('cover', False) if cdata is not False: if cdata is not None: try: cdata = standard_b64decode(cdata.split(',', 1)[-1].encode('ascii')) except Exception: raise HTTPBadRequest('Cover data is not valid base64 encoded data') try: fmt = what(None, cdata) except Exception: fmt = None if fmt not in ('jpeg', 'png'): raise HTTPBadRequest('Cover data must be either JPEG or PNG') dirtied |= db.set_cover({book_id: cdata}) for field, value in iteritems(changes): dirtied |= db.set_field(field, {book_id: value}) ctx.notify_changes(db.backend.library_path, metadata(dirtied)) all_ids = dirtied if all_dirtied else (dirtied & loaded_book_ids) all_ids |= {book_id} return {bid: book_as_json(db, bid) for bid in all_ids}
def get_annotations(ctx, rd, library_id, which): ''' Get annotations and last read position data for the specified books, where which is of the form: book_id1-fmt1_book_id2-fmt2,... ''' db = get_db(ctx, rd, library_id) user = rd.username or '*' ans = {} allowed_book_ids = ctx.allowed_book_ids(rd, db) for item in which.split('_'): book_id, fmt = item.partition('-')[::2] try: book_id = int(book_id) except Exception: continue if book_id not in allowed_book_ids: continue key = '{}:{}'.format(book_id, fmt) ans[key] = { 'last_read_positions': db.get_last_read_positions(book_id, fmt, user), 'annotations_map': db.annotations_map_for_book( book_id, fmt, user_type='web', user=user) if user else {} } return ans
def cdb_add_book(ctx, rd, job_id, add_duplicates, filename, library_id): ''' Add a file as a new book. The file contents must be in the body of the request. The response will also have the title/authors/languages read from the metadata of the file/filename. It will contain a `book_id` field specifying the id of the newly added book, or if add_duplicates is not specified and a duplicate was found, no book_id will be present. It will also return the value of `job_id` as the `id` field and `filename` as the `filename` field. ''' db = get_db(ctx, rd, library_id) if ctx.restriction_for(rd, db): raise HTTPForbidden('Cannot use the add book interface with a user who has per library restrictions') if not filename: raise HTTPBadRequest('An empty filename is not allowed') sfilename = sanitize_file_name_unicode(filename) fmt = os.path.splitext(sfilename)[1] fmt = fmt[1:] if fmt else None if not fmt: raise HTTPBadRequest('An filename with no extension is not allowed') if isinstance(rd.request_body_file, BytesIO): raise HTTPBadRequest('A request body containing the file data must be specified') add_duplicates = add_duplicates in ('y', '1') path = os.path.join(rd.tdir, sfilename) rd.request_body_file.name = path rd.request_body_file.seek(0) mi = get_metadata(rd.request_body_file, stream_type=fmt, use_libprs_metadata=True) rd.request_body_file.seek(0) ids, duplicates = db.add_books([(mi, {fmt: rd.request_body_file})], add_duplicates=add_duplicates) ans = {'title': mi.title, 'authors': mi.authors, 'languages': mi.languages, 'filename': filename, 'id': job_id} if ids: ans['book_id'] = ids[0] books_added(ids) return ans
def books_in(ctx, rd, encoded_category, encoded_item, library_id): ''' Return the books (as list of ids) present in the specified category. Optional: ?num=100&offset=0&sort=title&sort_order=asc&get_additional_fields= ''' db = get_db(ctx, rd, library_id) with db.safe_read_lock: try: dname, ditem = map(decode_name, (encoded_category, encoded_item)) except: raise HTTPNotFound('Invalid encoded param: %r' % (encoded_category, encoded_item)) num, offset = get_pagination(rd.query) sort, sort_order = rd.query.get('sort', 'title'), rd.query.get('sort_order') sort_order = ensure_val(sort_order, 'asc', 'desc') sfield = sanitize_sort_field_name(db.field_metadata, sort) if sfield not in db.field_metadata.sortable_field_keys(): raise HTTPNotFound('%s is not a valid sort field'%sort) if dname in ('allbooks', 'newest'): ids = ctx.allowed_book_ids(rd, db) elif dname == 'search': try: ids = ctx.search(rd, db, 'search:"%s"'%ditem) except Exception: raise HTTPNotFound('Search: %r not understood'%ditem) else: try: cid = int(ditem) except Exception: raise HTTPNotFound('Category id %r not an integer'%ditem) if dname == 'news': dname = 'tags' ids = db.get_books_for_category(dname, cid).intersection(ctx.allowed_book_ids(rd, db)) ids = db.multisort(fields=[(sfield, sort_order == 'asc')], ids_to_sort=ids) total_num = len(ids) ids = ids[offset:offset+num] result = { 'total_num': total_num, 'sort_order':sort_order, 'offset':offset, 'num':len(ids), 'sort':sort, 'base_url':ctx.url_for(books_in, encoded_category=encoded_category, encoded_item=encoded_item, library_id=db.server_library_id), 'book_ids':ids } get_additional_fields = rd.query.get('get_additional_fields') if get_additional_fields: additional_fields = {} for field in get_additional_fields.split(','): field = field.strip() if field: flist = additional_fields[field] = [] for id_ in ids: flist.append(db.field_for(field, id_, default_value=None)) if additional_fields: result['additional_fields'] = additional_fields return result
def cdb_set_cover(ctx, rd, book_id, library_id): db = get_db(ctx, rd, library_id) if ctx.restriction_for(rd, db): raise HTTPForbidden('Cannot use the add book interface with a user who has per library restrictions') rd.request_body_file.seek(0) dirtied = db.set_cover({book_id: rd.request_body_file}) ctx.notify_changes(db.backend.library_path, metadata(dirtied)) return tuple(dirtied)
def categories(ctx, rd, library_id): ''' Return the list of top-level categories as a list of dictionaries. Each dictionary is of the form:: { 'name': Display Name, 'url':URL that gives the JSON object corresponding to all entries in this category, 'icon': URL to icon of this category, 'is_category': False for the All Books and Newest categories, True for everything else } ''' db = get_db(ctx, rd, library_id) with db.safe_read_lock: ans = {} categories = ctx.get_categories(rd, db) category_meta = db.field_metadata library_id = db.server_library_id def getter(x): return category_meta[x]['name'] displayed_custom_fields = custom_fields_to_display(db) for category in sorted(categories, key=lambda x: sort_key(getter(x))): if len(categories[category]) == 0: continue if category in ('formats', 'identifiers'): continue meta = category_meta.get(category, None) if meta is None: continue if category_meta.is_ignorable_field(category) and \ category not in displayed_custom_fields: continue display_name = meta['name'] if category.startswith('@'): category = category.partition('.')[0] display_name = category[1:] url = force_unicode(category) icon = category_icon(category, meta) ans[url] = (display_name, icon) ans = [{'url':k, 'name':v[0], 'icon':v[1], 'is_category':True} for k, v in ans.iteritems()] ans.sort(key=lambda x: sort_key(x['name'])) for name, url, icon in [ (_('All books'), 'allbooks', 'book.png'), (_('Newest'), 'newest', 'forward.png'), ]: ans.insert(0, {'name':name, 'url':url, 'icon':icon, 'is_category':False}) for c in ans: c['url'] = ctx.url_for(globals()['category'], encoded_name=encode_name(c['url']), library_id=library_id) c['icon'] = ctx.url_for(get_icon, which=c['icon']) return ans
def get(ctx, rd, what, book_id, library_id): book_id, rest = book_id.partition('_')[::2] try: book_id = int(book_id) except Exception: raise HTTPNotFound('Book with id %r does not exist' % book_id) db = get_db(ctx, rd, library_id) if db is None: raise HTTPNotFound('Library %r not found' % library_id) with db.safe_read_lock: if not ctx.has_id(rd, db, book_id): raise BookNotFound(book_id, db) library_id = db.server_library_id # in case library_id was None if what == 'thumb': sz = rd.query.get('sz') w, h = 60, 80 if sz is None: if rest: try: w, h = map(int, rest.split('_')) except Exception: pass elif sz == 'full': w = h = None elif 'x' in sz: try: w, h = map(int, sz.partition('x')[::2]) except Exception: pass else: try: w = h = int(sz) except Exception: pass return cover(ctx, rd, library_id, db, book_id, width=w, height=h) elif what == 'cover': return cover(ctx, rd, library_id, db, book_id) elif what == 'opf': mi = db.get_metadata(book_id, get_cover=False) rd.outheaders[ 'Content-Type'] = 'application/oebps-package+xml; charset=UTF-8' rd.outheaders['Last-Modified'] = http_date( timestampfromdt(mi.last_modified)) return metadata_to_opf(mi) elif what == 'json': from calibre.srv.ajax import book_to_json data, last_modified = book_to_json(ctx, rd, db, book_id) rd.outheaders['Last-Modified'] = http_date( timestampfromdt(last_modified)) return json(ctx, rd, get, data) else: try: return book_fmt(ctx, rd, library_id, db, book_id, what.lower()) except NoSuchFormat: raise HTTPNotFound('No %s format for the book %r' % (what.lower(), book_id))
def books(ctx, rd, library_id): ''' Return the metadata for the books as a JSON dictionary. Query parameters: ?ids=all&category_urls=true&id_is_uuid=false&device_for_template=None If category_urls is true the returned dictionary also contains a mapping of category (field) names to URLs that return the list of books in the given category. If id_is_uuid is true then the book_id is assumed to be a book uuid instead. ''' db = get_db(ctx, rd, library_id) with db.safe_read_lock: id_is_uuid = rd.query.get('id_is_uuid', 'false') ids = rd.query.get('ids') if ids is None or ids == 'all': ids = db.all_book_ids() else: ids = ids.split(',') if id_is_uuid == 'true': ids = {db.lookup_by_uuid(x) for x in ids} ids.discard(None) else: try: ids = {int(x) for x in ids} except Exception: raise HTTPNotFound( 'ids must a comma separated list of integers') last_modified = None category_urls = rd.query.get('category_urls', 'true').lower() == 'true' device_compatible = rd.query.get('device_compatible', 'false').lower() == 'true' device_for_template = rd.query.get('device_for_template', None) ans = {} allowed_book_ids = ctx.allowed_book_ids(rd, db) for book_id in ids: if book_id not in allowed_book_ids: ans[book_id] = None continue data, lm = book_to_json(ctx, rd, db, book_id, get_category_urls=category_urls, device_compatible=device_compatible, device_for_template=device_for_template) last_modified = lm if last_modified is None else max( lm, last_modified) ans[book_id] = data if last_modified is not None: rd.outheaders['Last-Modified'] = http_date( timestampfromdt(last_modified)) return ans
def cdb_delete_book(ctx, rd, book_ids, library_id): db = get_db(ctx, rd, library_id) if ctx.restriction_for(rd, db): raise HTTPForbidden('Cannot use the delete book interface with a user who has per library restrictions') try: ids = {int(x) for x in book_ids.split(',')} except Exception: raise HTTPBadRequest('invalid book_ids: {}'.format(book_ids)) db.remove_books(ids) books_deleted(ids) return {}
def search(ctx, rd, library_id): ''' Return the books (as list of ids) matching the specified search query. Optional: ?num=100&offset=0&sort=title&sort_order=asc&query= ''' db = get_db(ctx, rd, library_id) query = rd.query.get('query') num, offset = get_pagination(rd.query) with db.safe_read_lock: return search_result(ctx, rd, db, query, num, offset, rd.query.get('sort', 'title'), rd.query.get('sort_order', 'asc'))
def cdb_copy_to_library(ctx, rd, target_library_id, library_id): db_src = get_db(ctx, rd, library_id) db_dest = get_db(ctx, rd, target_library_id) if ctx.restriction_for(rd, db_src) or ctx.restriction_for(rd, db_dest): raise HTTPForbidden('Cannot use the copy to library interface with a user who has per library restrictions') data = load_payload_data(rd) try: book_ids = {int(x) for x in data['book_ids']} move_books = bool(data.get('move', False)) preserve_date = bool(data.get('preserve_date', True)) duplicate_action = data.get('duplicate_action') or 'add' automerge_action = data.get('automerge_action') or 'overwrite' except Exception: raise HTTPBadRequest('Invalid encoded data, must be of the form: {book_ids: [id1, id2, ..]}') if duplicate_action not in ('add', 'add_formats_to_existing', 'ignore'): raise HTTPBadRequest('duplicate_action must be one of: add, add_formats_to_existing, ignore') if automerge_action not in ('overwrite', 'ignore', 'new record'): raise HTTPBadRequest('automerge_action must be one of: overwrite, ignore, new record') response = {} identical_books_data = None if duplicate_action != 'add': identical_books_data = db_dest.data_for_find_identical_books() to_remove = set() from calibre.db.copy_to_library import copy_one_book for book_id in book_ids: try: rdata = copy_one_book( book_id, db_src, db_dest, duplicate_action=duplicate_action, automerge_action=automerge_action, preserve_uuid=move_books, preserve_date=preserve_date, identical_books_data=identical_books_data) if move_books: to_remove.add(book_id) response[book_id] = {'ok': True, 'payload': rdata} except Exception: import traceback response[book_id] = {'ok': False, 'payload': traceback.format_exc()} if to_remove: db_src.remove_books(to_remove, permanent=True) return response
def cdb_set_fields(ctx, rd, book_id, library_id): db = get_db(ctx, rd, library_id) if ctx.restriction_for(rd, db): raise HTTPForbidden( 'Cannot use the set fields interface with a user who has per library restrictions' ) raw = rd.read() ct = rd.inheaders.get('Content-Type', all=True) ct = {x.lower().partition(';')[0] for x in ct} try: if MSGPACK_MIME in ct: data = msgpack_loads(raw) elif 'application/json' in ct: data = json_loads(raw) else: raise HTTPBadRequest('Only JSON or msgpack requests are supported') except Exception: raise HTTPBadRequest('Invalid encoded data') try: changes, loaded_book_ids = data['changes'], frozenset( map(int, data.get('loaded_book_ids', ()))) all_dirtied = bool(data.get('all_dirtied')) if not isinstance(changes, dict): raise TypeError('changes must be a dict') except Exception: raise HTTPBadRequest( '''Data must be of the form {'changes': {'title': 'New Title', ...}, 'loaded_book_ids':[book_id1, book_id2, ...]'}''' ) dirtied = set() cdata = changes.pop('cover', False) if cdata is not False: if cdata is not None: try: cdata = standard_b64decode( cdata.split(',', 1)[-1].encode('ascii')) except Exception: raise HTTPBadRequest( 'Cover data is not valid base64 encoded data') try: fmt = what(None, cdata) except Exception: fmt = None if fmt not in ('jpeg', 'png'): raise HTTPBadRequest('Cover data must be either JPEG or PNG') dirtied |= db.set_cover({book_id: cdata}) for field, value in changes.iteritems(): dirtied |= db.set_field(field, {book_id: value}) ctx.notify_changes(db.backend.library_path, metadata(dirtied)) all_ids = dirtied if all_dirtied else (dirtied & loaded_book_ids) all_ids |= {book_id} return {bid: book_as_json(db, book_id) for bid in all_ids}
def get(ctx, rd, what, book_id, library_id): book_id, rest = book_id.partition('_')[::2] try: book_id = int(book_id) except Exception: raise HTTPNotFound('Book with id %r does not exist' % book_id) db = get_db(ctx, rd, library_id) if db is None: raise HTTPNotFound('Library %r not found' % library_id) with db.safe_read_lock: if not ctx.has_id(rd, db, book_id): raise BookNotFound(book_id, db) library_id = db.server_library_id # in case library_id was None if what == 'thumb': sz = rd.query.get('sz') w, h = 60, 80 if sz is None: if rest: try: w, h = map(int, rest.split('_')) except Exception: pass elif sz == 'full': w = h = None elif 'x' in sz: try: w, h = map(int, sz.partition('x')[::2]) except Exception: pass else: try: w = h = int(sz) except Exception: pass return cover(ctx, rd, library_id, db, book_id, width=w, height=h) elif what == 'cover': return cover(ctx, rd, library_id, db, book_id) elif what == 'opf': mi = db.get_metadata(book_id, get_cover=False) rd.outheaders['Content-Type'] = 'application/oebps-package+xml; charset=UTF-8' rd.outheaders['Last-Modified'] = http_date(timestampfromdt(mi.last_modified)) return metadata_to_opf(mi) elif what == 'json': from calibre.srv.ajax import book_to_json data, last_modified = book_to_json(ctx, rd, db, book_id) rd.outheaders['Last-Modified'] = http_date(timestampfromdt(last_modified)) return json(ctx, rd, get, data) else: try: return book_fmt(ctx, rd, library_id, db, book_id, what.lower()) except NoSuchFormat: raise HTTPNotFound('No %s format for the book %r' % (what.lower(), book_id))
def set_last_read_position(ctx, rd, library_id, book_id, fmt): db = get_db(ctx, rd, library_id) user = rd.username or None allowed_book_ids = ctx.allowed_book_ids(rd, db) if book_id not in allowed_book_ids: raise HTTPNotFound('No book with id {} found'.format(book_id)) try: data = jsonlib.load(rd.request_body_file) device, cfi, pos_frac = data['device'], data['cfi'], data['pos_frac'] except Exception: raise HTTPNotFound('Invalid data') db.set_last_read_position( book_id, fmt, user=user, device=device, cfi=cfi or None, pos_frac=pos_frac)
def set_last_read_position(ctx, rd, library_id, book_id, fmt): db = get_db(ctx, rd, library_id) user = rd.username or None if not ctx.has_id(rd, db, book_id): raise BookNotFound(book_id, db) try: data = jsonlib.load(rd.request_body_file) device, cfi, pos_frac = data['device'], data['cfi'], data['pos_frac'] except Exception: raise HTTPNotFound('Invalid data') db.set_last_read_position( book_id, fmt, user=user, device=device, cfi=cfi or None, pos_frac=pos_frac) rd.outheaders['Content-type'] = 'text/plain' return b''
def search(ctx, rd, library_id): ''' Return the books matching the specified search query. The returned object is a dict with the field book_ids which is a list of matched book ids. For all the other fields in the object, see :func:`search_result`. Optional: ?num=100&offset=0&sort=title&sort_order=asc&query=&vl= ''' db = get_db(ctx, rd, library_id) query = rd.query.get('query') num, offset = get_pagination(rd.query) with db.safe_read_lock: return search_result(ctx, rd, db, query, num, offset, rd.query.get('sort', 'title'), rd.query.get('sort_order', 'asc'), rd.query.get('vl') or '')
def set_last_read_position(ctx, rd, library_id, book_id, fmt): db = get_db(ctx, rd, library_id) user = rd.username or None allowed_book_ids = ctx.allowed_book_ids(rd, db) if book_id not in allowed_book_ids: raise HTTPNotFound('No book with id {} found'.format(book_id)) try: data = jsonlib.load(rd.request_body_file) device, cfi, pos_frac = data['device'], data['cfi'], data['pos_frac'] except Exception: raise HTTPNotFound('Invalid data') db.set_last_read_position( book_id, fmt, user=user, device=device, cfi=cfi or None, pos_frac=pos_frac) rd.outheaders['Content-type'] = 'text/plain' return b''
def update_annotations(ctx, rd, library_id, book_id, fmt): db = get_db(ctx, rd, library_id) user = rd.username or '*' if not ctx.has_id(rd, db, book_id): raise BookNotFound(book_id, db) try: amap = jsonlib.load(rd.request_body_file) except Exception: raise HTTPNotFound('Invalid data') alist = [] for val in itervalues(amap): if val: alist.extend(val) db.merge_annotations_for_book(book_id, fmt, alist, user_type='web', user=user) return b''
def books(ctx, rd, library_id): ''' Return the metadata for the books as a JSON dictionary. Query parameters: ?ids=all&category_urls=true&id_is_uuid=false&device_for_template=None If category_urls is true the returned dictionary also contains a mapping of category (field) names to URLs that return the list of books in the given category. If id_is_uuid is true then the book_id is assumed to be a book uuid instead. ''' db = get_db(ctx, rd, library_id) with db.safe_read_lock: id_is_uuid = rd.query.get('id_is_uuid', 'false') ids = rd.query.get('ids') if ids is None or ids == 'all': ids = db.all_book_ids() else: ids = ids.split(',') if id_is_uuid == 'true': ids = {db.lookup_by_uuid(x) for x in ids} ids.discard(None) else: try: ids = {int(x) for x in ids} except Exception: raise HTTPNotFound('ids must a comma separated list of integers') last_modified = None category_urls = rd.query.get('category_urls', 'true').lower() == 'true' device_compatible = rd.query.get('device_compatible', 'false').lower() == 'true' device_for_template = rd.query.get('device_for_template', None) ans = {} restricted_to = ctx.allowed_book_ids(rd, db) for book_id in ids: if book_id not in restricted_to: ans[book_id] = None continue data, lm = book_to_json( ctx, rd, db, book_id, get_category_urls=category_urls, device_compatible=device_compatible, device_for_template=device_for_template) last_modified = lm if last_modified is None else max(lm, last_modified) ans[book_id] = data if last_modified is not None: rd.outheaders['Last-Modified'] = http_date(timestampfromdt(last_modified)) return ans
def get_last_read_position(ctx, rd, library_id, which): ''' Get last read position data for the specified books, where which is of the form: book_id1-fmt1_book_id2-fmt2,... ''' db = get_db(ctx, rd, library_id) user = rd.username or None ans = {} allowed_book_ids = ctx.allowed_book_ids(rd, db) for item in which.split('_'): book_id, fmt = item.partition('-')[::2] try: book_id = int(book_id) except Exception: continue if book_id not in allowed_book_ids: continue key = '{}:{}'.format(book_id, fmt) ans[key] = db.get_last_read_positions(book_id, fmt, user) return ans
def book(ctx, rd, book_id, library_id): ''' Return the metadata of the book as a JSON dictionary. Query parameters: ?category_urls=true&id_is_uuid=false&device_for_template=None If category_urls is true the returned dictionary also contains a mapping of category (field) names to URLs that return the list of books in the given category. If id_is_uuid is true then the book_id is assumed to be a book uuid instead. ''' db = get_db(ctx, rd, library_id) with db.safe_read_lock: id_is_uuid = rd.query.get('id_is_uuid', 'false') oid = book_id if id_is_uuid == 'true': book_id = db.lookup_by_uuid(book_id) else: try: book_id = int(book_id) if not db.has_id(book_id): book_id = None except Exception: book_id = None if book_id is None or not ctx.has_id(rd, db, book_id): raise BookNotFound(oid, db) category_urls = rd.query.get('category_urls', 'true').lower() device_compatible = rd.query.get('device_compatible', 'false').lower() device_for_template = rd.query.get('device_for_template', None) data, last_modified = book_to_json( ctx, rd, db, book_id, get_category_urls=category_urls == 'true', device_compatible=device_compatible == 'true', device_for_template=device_for_template) rd.outheaders['Last-Modified'] = http_date(timestampfromdt(last_modified)) return data
def get_last_read_position(ctx, rd, library_id, which): ''' Get last read position data for the specified books, where which is of the form: book_id1-fmt1_book_id2-fmt2,... ''' db = get_db(ctx, rd, library_id) user = rd.username or None if not user: raise HTTPNotFound('login required for sync') ans = {} allowed_book_ids = ctx.allowed_book_ids(rd, db) for item in which.split('_'): book_id, fmt = item.partition('-')[::2] try: book_id = int(book_id) except Exception: continue if book_id not in allowed_book_ids: continue key = f'{book_id}:{fmt}' ans[key] = db.get_last_read_positions(book_id, fmt, user) return ans
def cdb_set_fields(ctx, rd, book_id, library_id): db = get_db(ctx, rd, library_id) if ctx.restriction_for(rd, db): raise HTTPForbidden('Cannot use the set fields interface with a user who has per library restrictions') raw = rd.read() ct = rd.inheaders.get('Content-Type', all=True) ct = {x.lower().partition(';')[0] for x in ct} try: if MSGPACK_MIME in ct: data = msgpack_loads(raw) elif 'application/json' in ct: data = json_loads(raw) else: raise HTTPBadRequest('Only JSON or msgpack requests are supported') changes, loaded_book_ids = data['changes'], frozenset(map(int, data['loaded_book_ids'])) except Exception: raise HTTPBadRequest('Invalid encoded data') dirtied = set() for field, value in changes.iteritems(): dirtied |= db.set_field(field, {book_id: value}) metadata(dirtied) return {bid: book_as_json(db, book_id) for bid in (dirtied & loaded_book_ids) | {book_id}}
def book(ctx, rd, book_id, library_id): ''' Return the metadata of the book as a JSON dictionary. Query parameters: ?category_urls=true&id_is_uuid=false&device_for_template=None If category_urls is true the returned dictionary also contains a mapping of category (field) names to URLs that return the list of books in the given category. If id_is_uuid is true then the book_id is assumed to be a book uuid instead. ''' db = get_db(ctx, rd, library_id) with db.safe_read_lock: id_is_uuid = rd.query.get('id_is_uuid', 'false') oid = book_id if id_is_uuid == 'true': book_id = db.lookup_by_uuid(book_id) else: try: book_id = int(book_id) if not db.has_id(book_id): book_id = None except Exception: book_id = None if book_id is None or book_id not in ctx.allowed_book_ids(rd, db): raise HTTPNotFound('Book with id %r does not exist' % oid) category_urls = rd.query.get('category_urls', 'true').lower() device_compatible = rd.query.get('device_compatible', 'false').lower() device_for_template = rd.query.get('device_for_template', None) data, last_modified = book_to_json(ctx, rd, db, book_id, get_category_urls=category_urls == 'true', device_compatible=device_compatible == 'true', device_for_template=device_for_template) rd.outheaders['Last-Modified'] = http_date(timestampfromdt(last_modified)) return data
def cdb_set_fields(ctx, rd, book_id, library_id): db = get_db(ctx, rd, library_id) if ctx.restriction_for(rd, db): raise HTTPForbidden('Cannot use the set fields interface with a user who has per library restrictions') raw = rd.read() ct = rd.inheaders.get('Content-Type', all=True) ct = {x.lower().partition(';')[0] for x in ct} try: if MSGPACK_MIME in ct: data = msgpack_loads(raw) elif 'application/json' in ct: data = json_loads(raw) else: raise HTTPBadRequest('Only JSON or msgpack requests are supported') changes, loaded_book_ids = data['changes'], frozenset(map(int, data['loaded_book_ids'])) except Exception: raise HTTPBadRequest('Invalid encoded data') dirtied = set() cdata = changes.pop('cover', False) if cdata is not False: if cdata is not None: try: cdata = standard_b64decode(cdata.split(',', 1)[-1].encode('ascii')) except Exception: raise HTTPBadRequest('Cover data is not valid base64 encoded data') try: fmt = what(None, cdata) except Exception: fmt = None if fmt not in ('jpeg', 'png'): raise HTTPBadRequest('Cover data must be either JPEG or PNG') dirtied |= db.set_cover({book_id: cdata}) for field, value in changes.iteritems(): dirtied |= db.set_field(field, {book_id: value}) ctx.notify_changes(db.backend.library_path, metadata(dirtied)) return {bid: book_as_json(db, book_id) for bid in (dirtied & loaded_book_ids) | {book_id}}
def category(ctx, rd, encoded_name, library_id): ''' Return a dictionary describing the category specified by name. The Optional: ?num=100&offset=0&sort=name&sort_order=asc The dictionary looks like:: { 'category_name': Category display name, 'base_url': Base URL for this category, 'total_num': Total numberof items in this category, 'offset': The offset for the items returned in this result, 'num': The number of items returned in this result, 'sort': How the returned items are sorted, 'sort_order': asc or desc 'subcategories': List of sub categories of this category. 'items': List of items in this category, } Each subcategory is a dictionary of the same form as those returned by /ajax/categories Each item is a dictionary of the form:: { 'name': Display name, 'average_rating': Average rating for books in this item, 'count': Number of books in this item, 'url': URL to get list of books in this item, 'has_children': If True this item contains sub categories, look for an entry corresponding to this item in subcategories int he main dictionary, } :param sort: How to sort the returned items. Choices are: name, rating, popularity :param sort_order: asc or desc To learn how to create subcategories see https://manual.calibre-ebook.com/sub_groups.html ''' db = get_db(ctx, rd, library_id) with db.safe_read_lock: num, offset = get_pagination(rd.query) sort, sort_order = rd.query.get('sort'), rd.query.get('sort_order') sort = ensure_val(sort, 'name', 'rating', 'popularity') sort_order = ensure_val(sort_order, 'asc', 'desc') try: dname = decode_name(encoded_name) except: raise HTTPNotFound('Invalid encoding of category name %r'%encoded_name) base_url = ctx.url_for(globals()['category'], encoded_name=encoded_name, library_id=db.server_library_id) if dname in ('newest', 'allbooks'): sort, sort_order = 'timestamp', 'desc' rd.query['sort'], rd.query['sort_order'] = sort, sort_order return books_in(ctx, rd, encoded_name, encode_name('0'), library_id) fm = db.field_metadata categories = ctx.get_categories(rd, db) hierarchical_categories = db.pref('categories_using_hierarchy', ()) subcategory = dname toplevel = subcategory.partition('.')[0] if toplevel == subcategory: subcategory = None if toplevel not in categories or toplevel not in fm: raise HTTPNotFound('Category %r not found'%toplevel) # Find items and sub categories subcategories = [] meta = fm[toplevel] item_names = {} children = set() if meta['kind'] == 'user': fullname = ((toplevel + '.' + subcategory) if subcategory is not None else toplevel) try: # User categories cannot be applied to books, so this is the # complete set of items, no need to consider sub categories items = categories[fullname] except: raise HTTPNotFound('User category %r not found'%fullname) parts = fullname.split('.') for candidate in categories: cparts = candidate.split('.') if len(cparts) == len(parts)+1 and cparts[:-1] == parts: subcategories.append({'name':cparts[-1], 'url':candidate, 'icon':category_icon(toplevel, meta)}) category_name = toplevel[1:].split('.') # When browsing by user categories we ignore hierarchical normal # columns, so children can be empty elif toplevel in hierarchical_categories: items = [] category_names = [x.original_name.split('.') for x in categories[toplevel] if '.' in x.original_name] if subcategory is None: children = set(x[0] for x in category_names) category_name = [meta['name']] items = [x for x in categories[toplevel] if '.' not in x.original_name] else: subcategory_parts = subcategory.split('.')[1:] category_name = [meta['name']] + subcategory_parts lsp = len(subcategory_parts) children = set('.'.join(x) for x in category_names if len(x) == lsp+1 and x[:lsp] == subcategory_parts) items = [x for x in categories[toplevel] if x.original_name in children] item_names = {x:x.original_name.rpartition('.')[-1] for x in items} # Only mark the subcategories that have children themselves as # subcategories children = set('.'.join(x[:lsp+1]) for x in category_names if len(x) > lsp+1 and x[:lsp] == subcategory_parts) subcategories = [{'name':x.rpartition('.')[-1], 'url':toplevel+'.'+x, 'icon':category_icon(toplevel, meta)} for x in children] else: items = categories[toplevel] category_name = meta['name'] for x in subcategories: x['url'] = ctx.url_for(globals()['category'], encoded_name=encode_name(x['url']), library_id=db.server_library_id) x['icon'] = ctx.url_for(get_icon, which=x['icon']) x['is_category'] = True sort_keygen = { 'name': lambda x: sort_key(x.sort if x.sort else x.original_name), 'popularity': lambda x: x.count, 'rating': lambda x: x.avg_rating } items.sort(key=sort_keygen[sort], reverse=sort_order == 'desc') total_num = len(items) items = items[offset:offset+num] items = [{ 'name':item_names.get(x, x.original_name), 'average_rating': x.avg_rating, 'count': x.count, 'url': ctx.url_for(books_in, encoded_category=encode_name(x.category if x.category else toplevel), encoded_item=encode_name(x.original_name if x.id is None else unicode(x.id)), library_id=db.server_library_id ), 'has_children': x.original_name in children, } for x in items] return { 'category_name': category_name, 'base_url': base_url, 'total_num': total_num, 'offset':offset, 'num':len(items), 'sort':sort, 'sort_order':sort_order, 'subcategories':subcategories, 'items':items, }