def open_for_write(fname): try: return share_open(fname, 'w+b') except EnvironmentError: try: os.makedirs(os.path.dirname(fname)) except EnvironmentError: pass return share_open(fname, 'w+b')
def icon(ctx, rd, which): sz = rd.query.get("sz") if sz != "full": try: sz = int(rd.query.get("sz", 48)) except Exception: sz = 48 if which in {"", "_"}: raise HTTPNotFound() if which.startswith("_"): base = os.path.join(config_dir, "tb_icons") path = os.path.abspath(os.path.join(base, *which[1:].split("/"))) if not path.startswith(base) or ":" in which: raise HTTPNotFound("Naughty, naughty!") else: base = P("images", allow_user_override=False) path = os.path.abspath(os.path.join(base, *which.split("/"))) if not path.startswith(base) or ":" in which: raise HTTPNotFound("Naughty, naughty!") path = os.path.relpath(path, base).replace(os.sep, "/") path = P("images/" + path) if sz == "full": try: return share_open(path, "rb") except EnvironmentError: raise HTTPNotFound() with lock: tdir = os.path.join(rd.tdir, "icons") cached = os.path.join(tdir, "%d-%s.png" % (sz, which)) try: return share_open(cached, "rb") except EnvironmentError: pass try: src = share_open(path, "rb") except EnvironmentError: raise HTTPNotFound() with src: img = Image() img.load(src.read()) width, height = img.size scaled, width, height = fit_image(width, height, sz, sz) if scaled: img.size = (width, height) try: ans = share_open(cached, "w+b") except EnvironmentError: try: os.mkdir(tdir) except EnvironmentError: pass ans = share_open(cached, "w+b") ans.write(img.export("png")) ans.seek(0) return ans
def icon(ctx, rd, which): sz = rd.query.get('sz') if sz != 'full': try: sz = int(rd.query.get('sz', 48)) except Exception: sz = 48 if which in {'', '_'}: raise HTTPNotFound() if which.startswith('_'): base = os.path.join(config_dir, 'tb_icons') path = os.path.abspath(os.path.join(base, *which[1:].split('/'))) if not path.startswith(base) or ':' in which: raise HTTPNotFound('Naughty, naughty!') else: base = P('images', allow_user_override=False) path = os.path.abspath(os.path.join(base, *which.split('/'))) if not path.startswith(base) or ':' in which: raise HTTPNotFound('Naughty, naughty!') path = os.path.relpath(path, base).replace(os.sep, '/') path = P('images/' + path) if sz == 'full': try: return share_open(path, 'rb') except EnvironmentError: raise HTTPNotFound() with lock: tdir = os.path.join(rd.tdir, 'icons') cached = os.path.join(tdir, '%d-%s.png' % (sz, which)) try: return share_open(cached, 'rb') except EnvironmentError: pass try: src = share_open(path, 'rb') except EnvironmentError: raise HTTPNotFound() with src: img = Image() img.load(src.read()) width, height = img.size scaled, width, height = fit_image(width, height, sz, sz) if scaled: img.size = (width, height) try: ans = share_open(cached, 'w+b') except EnvironmentError: try: os.mkdir(tdir) except EnvironmentError: pass ans = share_open(cached, 'w+b') ans.write(img.export('png')) ans.seek(0) return ans
def icon(ctx, rd, which): sz = rd.query.get('sz') if sz != 'full': try: sz = int(rd.query.get('sz', 48)) except Exception: sz = 48 if which in {'', '_'}: raise HTTPNotFound() if which.startswith('_'): base = os.path.join(config_dir, 'tb_icons') path = os.path.abspath(os.path.join(base, *which[1:].split('/'))) if not path.startswith(base) or ':' in which: raise HTTPNotFound('Naughty, naughty!') else: base = P('images', allow_user_override=False) path = os.path.abspath(os.path.join(base, *which.split('/'))) if not path.startswith(base) or ':' in which: raise HTTPNotFound('Naughty, naughty!') path = os.path.relpath(path, base).replace(os.sep, '/') path = P('images/' + path) if sz == 'full': try: return share_open(path, 'rb') except EnvironmentError: raise HTTPNotFound() with lock: tdir = os.path.join(rd.tdir, 'icons') cached = os.path.join(tdir, '%d-%s.png' % (sz, which)) try: return share_open(cached, 'rb') except EnvironmentError: pass try: src = share_open(path, 'rb') except EnvironmentError: raise HTTPNotFound() with src: idata = src.read() img = image_from_data(idata) scaled, width, height = fit_image(img.width(), img.height(), sz, sz) if scaled: idata = scale_image(img, width, height, as_png=True)[-1] try: ans = share_open(cached, 'w+b') except EnvironmentError: try: os.mkdir(tdir) except EnvironmentError: pass ans = share_open(cached, 'w+b') ans.write(idata) ans.seek(0) return ans
def create_file_copy(ctx, rd, prefix, library_id, book_id, ext, mtime, copy_func, extra_etag_data=''): ''' We cannot copy files directly from the library folder to the output socket, as this can potentially lock the library for an extended period. So instead we copy out the data from the library folder into a temp folder. We make sure to only do this copy once, using the previous copy, if there have been no changes to the data for the file since the last copy. ''' global rename_counter # Avoid too many items in a single directory for performance base = os.path.join(rd.tdir, 'fcache', (('%x' % book_id)[-3:])) if iswindows: base = '\\\\?\\' + os.path.abspath(base) # Ensure fname is not too long for windows' API bname = '%s-%s-%x.%s' % (prefix, library_id, book_id, ext) if '\\' in bname or '/' in bname: raise ValueError('File components must not contain path separators') fname = os.path.join(base, bname) used_cache = 'no' with lock: previous_mtime = mtimes.get(bname) if previous_mtime is None or previous_mtime < mtime: if previous_mtime is not None: # File exists and may be open, so we cannot change its # contents, as that would lead to corrupted downloads in any # clients that are currently downloading the file. if iswindows: # On windows in order to re-use bname, we have to rename it # before deleting it rename_counter += 1 dname = os.path.join(base, '_%x' % rename_counter) atomic_rename(fname, dname) os.remove(dname) else: os.remove(fname) try: ans = share_open(fname, 'w+b') except EnvironmentError: try: os.makedirs(base) except EnvironmentError: pass ans = share_open(fname, 'w+b') mtimes[bname] = mtime copy_func(ans) ans.seek(0) else: ans = share_open(fname, 'rb') used_cache = 'yes' if ctx.testing: rd.outheaders['Used-Cache'] = used_cache rd.outheaders['Tempfile'] = hexlify(fname.encode('utf-8')) return rd.filesystem_file_with_custom_etag(ans, prefix, library_id, book_id, mtime, extra_etag_data)
def view_server_logs(self): from calibre.srv.embedded import log_paths log_error_file, log_access_file = log_paths() d = QDialog(self) d.resize(QSize(800, 600)) layout = QVBoxLayout() d.setLayout(layout) layout.addWidget(QLabel(_('Error log:'))) el = QPlainTextEdit(d) layout.addWidget(el) try: el.setPlainText( share_open(log_error_file, 'rb').read().decode('utf8', 'replace') ) except EnvironmentError: el.setPlainText(_('No error log found')) layout.addWidget(QLabel(_('Access log:'))) al = QPlainTextEdit(d) layout.addWidget(al) try: al.setPlainText( share_open(log_access_file, 'rb').read().decode('utf8', 'replace') ) except EnvironmentError: al.setPlainText(_('No access log found')) loc = QLabel(_('The server log files are in: {}').format(os.path.dirname(log_error_file))) loc.setWordWrap(True) layout.addWidget(loc) bx = QDialogButtonBox(QDialogButtonBox.Ok) layout.addWidget(bx) bx.accepted.connect(d.accept) b = bx.addButton(_('&Clear logs'), bx.ActionRole) def clear_logs(): if getattr(self.server, 'is_running', False): return error_dialog(d, _('Server running'), _( 'Cannot clear logs while the server is running. First stop the server.'), show=True) if self.server: self.server.access_log.clear() self.server.log.clear() else: for x in (log_error_file, log_access_file): try: os.remove(x) except EnvironmentError as err: if err.errno != errno.ENOENT: raise el.setPlainText(''), al.setPlainText('') b.clicked.connect(clear_logs) d.show()
def set_output(self): if ispy3: if iswindows: self.stream = share_open(self.filename, 'ab') else: # see https://bugs.python.org/issue27805 self.stream = open(os.open(self.filename, os.O_WRONLY|os.O_APPEND|os.O_CREAT|os.O_CLOEXEC), 'wb') else: self.stream = share_open(self.filename, 'ab', -1 if iswindows else 1) # line buffered try: self.current_pos = self.stream.tell() except EnvironmentError: # Happens if filename is /dev/stdout for example self.current_pos = 0 self.max_size = None
def parse(self): src = u'' migrate = False path = self.config_file_path if os.path.exists(path): with ExclusiveFile(path) as f: try: src = f.read().decode('utf-8') except ValueError: print("Failed to parse", path) traceback.print_exc() if not src: path = path.rpartition('.')[0] from calibre.utils.shared_file import share_open try: with share_open(path, 'rb') as f: src = f.read().decode('utf-8') except Exception: pass else: migrate = bool(src) ans = self.option_set.parse_string(src) if migrate: new_src = self.option_set.serialize(ans, ignore_unserializable=True) with ExclusiveFile(self.config_file_path) as f: f.seek(0), f.truncate() f.write(new_src) return ans
def parse(self): src = '' migrate = False path = self.config_file_path with suppress(FileNotFoundError): src_bytes = read_data(path) try: src = src_bytes.decode('utf-8') except ValueError: print("Failed to parse", path) traceback.print_exc() if not src: path = path.rpartition('.')[0] from calibre.utils.shared_file import share_open try: with share_open(path, 'rb') as f: src = f.read().decode('utf-8') except Exception: pass else: migrate = bool(src) ans = self.option_set.parse_string(src) if migrate: new_src = self.option_set.serialize(ans, ignore_unserializable=True) commit_data(self.config_file_path, new_src) return ans
def convert_book(path_to_ebook, opf_path, cover_path, output_fmt, recs): from calibre.customize.conversion import OptionRecommendation from calibre.ebooks.conversion.plumber import Plumber from calibre.utils.logging import Log recs.append(('verbose', 2, OptionRecommendation.HIGH)) recs.append( ('read_metadata_from_opf', opf_path, OptionRecommendation.HIGH)) if cover_path: recs.append(('cover', cover_path, OptionRecommendation.HIGH)) log = Log() os.chdir(os.path.dirname(path_to_ebook)) status_file = share_open('status', 'wb') def notification(percent, msg): status_file.write('{}:{}|||\n'.format(percent, msg).encode('utf-8')) status_file.flush() output_path = os.path.abspath('output.' + output_fmt.lower()) plumber = Plumber(path_to_ebook, output_path, log, report_progress=notification, override_input_metadata=True) plumber.merge_ui_recommendations(recs) plumber.run()
def parse(self): src = '' migrate = False path = self.config_file_path if os.path.exists(path): with ExclusiveFile(path) as f: try: src = f.read().decode('utf-8') except ValueError: print("Failed to parse", path) traceback.print_exc() if not src: path = path.rpartition('.')[0] from calibre.utils.shared_file import share_open try: with share_open(path, 'rb') as f: src = f.read().decode('utf-8') except Exception: pass else: migrate = bool(src) ans = self.option_set.parse_string(src) if migrate: new_src = self.option_set.serialize(ans, ignore_unserializable=True) with ExclusiveFile(self.config_file_path) as f: f.seek(0), f.truncate() f.write(new_src) return ans
def set_output(self): self.stream = share_open(self.filename, 'ab', -1 if iswindows else 1) # line buffered try: self.current_pos = self.stream.tell() except EnvironmentError: # Happens if filename is /dev/stdout for example self.current_pos = 0 self.max_size = None
def icon(ctx, rd, which): sz = rd.query.get('sz') if sz != 'full': try: sz = int(rd.query.get('sz', 48)) except Exception: sz = 48 if which in {'', '_'}: raise HTTPNotFound() if which.startswith('_'): base = os.path.join(config_dir, 'tb_icons') path = os.path.abspath(os.path.join(base, *which[1:].split('/'))) if not path.startswith(base) or ':' in which: raise HTTPNotFound('Naughty, naughty!') else: base = P('images', allow_user_override=False) path = os.path.abspath(os.path.join(base, *which.split('/'))) if not path.startswith(base) or ':' in which: raise HTTPNotFound('Naughty, naughty!') path = os.path.relpath(path, base).replace(os.sep, '/') path = P('images/' + path) if sz == 'full': try: return share_open(path, 'rb') except EnvironmentError: raise HTTPNotFound() with lock: cached = os.path.join(rd.tdir, 'icons', '%d-%s.png' % (sz, which)) try: return share_open(cached, 'rb') except EnvironmentError: pass try: src = share_open(path, 'rb') except EnvironmentError: raise HTTPNotFound() with src: idata = src.read() img = image_from_data(idata) scaled, width, height = fit_image(img.width(), img.height(), sz, sz) if scaled: idata = scale_image(img, width, height, as_png=True)[-1] ans = open_for_write(cached) ans.write(idata) ans.seek(0) return ans
def get_data(name): path = get_path_for_name(name) if path is None: return None, None try: with share_open(path, 'rb') as f: return f.read(), guess_type(name) except EnvironmentError as err: prints('Failed to read from book file: {} with error: {}'.format(name, as_unicode(err))) return None, None
def set_output(self): if iswindows: self.stream = share_open(self.filename, 'a', newline='') else: # see https://bugs.python.org/issue27805 self.stream = open(os.open(self.filename, os.O_WRONLY|os.O_APPEND|os.O_CREAT|os.O_CLOEXEC), 'w') try: self.stream.tell() except OSError: # Happens if filename is /dev/stdout for example self.max_size = None
def set_output(self): if ispy3: if iswindows: self.stream = share_open(self.filename, 'ab') else: # see https://bugs.python.org/issue27805 self.stream = open( os.open( self.filename, os.O_WRONLY | os.O_APPEND | os.O_CREAT | os.O_CLOEXEC), 'wb') else: self.stream = share_open(self.filename, 'ab', -1 if iswindows else 1) # line buffered try: self.current_pos = self.stream.tell() except EnvironmentError: # Happens if filename is /dev/stdout for example self.current_pos = 0 self.max_size = None
def get_data(name): path = get_path_for_name(name) if path is None: return None, None try: with share_open(path, 'rb') as f: return f.read(), guess_type(name) except OSError as err: prints( f'Failed to read from book file: {name} with error: {as_unicode(err)}' ) return None, None
def current_status(self): try: with share_open(os.path.join(self.tdir, 'status'), 'rb') as f: lines = f.read().decode('utf-8').splitlines() except Exception: lines = () for line in reversed(lines): if line.endswith('|||'): p, msg = line.partition(':')[::2] percent = float(p) msg = msg[:-3] return percent, msg return 0, ''
def static(ctx, rd, what): if not what: raise HTTPNotFound() base = P('content-server', allow_user_override=False) path = os.path.abspath(os.path.join(base, *what.split('/'))) if not path.startswith(base) or ':' in what: raise HTTPNotFound('Naughty, naughty!') path = os.path.relpath(path, base).replace(os.sep, '/') path = P('content-server/' + path) try: return share_open(path, 'rb') except EnvironmentError: raise HTTPNotFound()
def static(ctx, rd, what): if not what: raise HTTPNotFound() base = P("content-server", allow_user_override=False) path = os.path.abspath(os.path.join(base, *what.split("/"))) if not path.startswith(base) or ":" in what: raise HTTPNotFound("Naughty, naughty!") path = os.path.relpath(path, base).replace(os.sep, "/") path = P("content-server/" + path) try: return share_open(path, "rb") except EnvironmentError: raise HTTPNotFound()
def read_old_serialized_representation(self): from calibre.utils.shared_file import share_open from calibre.utils.serialize import pickle_loads path = self.file_path.rpartition('.')[0] try: with share_open(path, 'rb') as f: raw = f.read() except EnvironmentError: raw = b'' try: d = pickle_loads(raw).copy() except Exception: d = {} return d
def convert_book(path_to_ebook, opf_path, cover_path, output_fmt, recs): from calibre.customize.conversion import OptionRecommendation from calibre.ebooks.conversion.plumber import Plumber from calibre.utils.logging import Log recs.append(('verbose', 2, OptionRecommendation.HIGH)) recs.append(('read_metadata_from_opf', opf_path, OptionRecommendation.HIGH)) if cover_path: recs.append(('cover', cover_path, OptionRecommendation.HIGH)) log = Log() os.chdir(os.path.dirname(path_to_ebook)) status_file = share_open('status', 'wb') def notification(percent, msg=''): status_file.write('{}:{}|||\n'.format(percent, msg).encode('utf-8')) status_file.flush() output_path = os.path.abspath('output.' + output_fmt.lower()) plumber = Plumber(path_to_ebook, output_path, log, report_progress=notification, override_input_metadata=True) plumber.merge_ui_recommendations(recs) plumber.run()
def test_get(self): # {{{ 'Test /get' with self.create_server() as server: db = server.handler.router.ctx.library_broker.get(None) conn = server.connect() def get(what, book_id, library_id=None, q=''): q = ('?' + q) if q else q conn.request( 'GET', '/get/%s/%s' % (what, book_id) + (('/' + library_id) if library_id else '') + q) r = conn.getresponse() return r, r.read() # Test various invalid parameters def bad(*args): r, data = get(*args) self.ae(r.status, http_client.NOT_FOUND) bad('xxx', 1) bad('fmt1', 10) bad('fmt1', 1, 'zzzz') bad('fmt1', 'xx') # Test simple fetching of format without metadata update r, data = get('fmt1', 1, db.server_library_id) self.ae(data, db.format(1, 'fmt1')) self.assertIsNotNone(r.getheader('Content-Disposition')) self.ae(r.getheader('Used-Cache'), 'no') r, data = get('fmt1', 1) self.ae(data, db.format(1, 'fmt1')) self.ae(r.getheader('Used-Cache'), 'yes') # Test fetching of format with metadata update raw = P('quick_start/eng.epub', data=True) r, data = get('epub', 1) self.ae(r.status, http_client.OK) etag = r.getheader('ETag') self.assertIsNotNone(etag) self.ae(r.getheader('Used-Cache'), 'no') self.assertTrue(data.startswith(b'PK')) self.assertGreaterEqual(len(data), len(raw)) db.set_field('title', {1: 'changed'}) r, data = get('epub', 1) self.assertNotEqual(r.getheader('ETag'), etag) etag = r.getheader('ETag') self.ae(r.getheader('Used-Cache'), 'no') mi = get_metadata(BytesIO(data), extract_cover=False) self.ae(mi.title, 'changed') r, data = get('epub', 1) self.ae(r.getheader('Used-Cache'), 'yes') # Test plugboards import calibre.library.save_to_disk as c orig, c.DEBUG = c.DEBUG, False try: db.set_pref( 'plugboards', { u'epub': { u'content_server': [[u'changed, {title}', u'title']] } }) # this is needed as the cache is not invalidated for plugboard changes db.set_field('title', {1: 'again'}) r, data = get('epub', 1) self.assertNotEqual(r.getheader('ETag'), etag) etag = r.getheader('ETag') self.ae(r.getheader('Used-Cache'), 'no') mi = get_metadata(BytesIO(data), extract_cover=False) self.ae(mi.title, 'changed, again') finally: c.DEBUG = orig # Test the serving of covers def change_cover(count, book_id=2): cpath = db.format_abspath(book_id, '__COVER_INTERNAL__') db.set_cover({2: I('lt.png', data=True)}) t = time.time() + 1 + count # Ensure mtime changes, needed on OS X where HFS+ has a 1s # mtime resolution os.utime(cpath, (t, t)) r, data = get('cover', 1) self.ae(r.status, http_client.OK) self.ae(data, db.cover(1)) self.ae(r.getheader('Used-Cache'), 'no') self.ae(r.getheader('Content-Type'), 'image/jpeg') r, data = get('cover', 1) self.ae(r.status, http_client.OK) self.ae(data, db.cover(1)) self.ae(r.getheader('Used-Cache'), 'yes') r, data = get('cover', 3) self.ae(r.status, http_client.OK) # Auto generated cover r, data = get('thumb', 1) self.ae(r.status, http_client.OK) self.ae(identify(data), ('jpeg', 60, 60)) self.ae(r.getheader('Used-Cache'), 'no') r, data = get('thumb', 1) self.ae(r.status, http_client.OK) self.ae(r.getheader('Used-Cache'), 'yes') r, data = get('thumb', 1, q='sz=100') self.ae(r.status, http_client.OK) self.ae(identify(data), ('jpeg', 100, 100)) self.ae(r.getheader('Used-Cache'), 'no') r, data = get('thumb', 1, q='sz=100x100') self.ae(r.status, http_client.OK) self.ae(r.getheader('Used-Cache'), 'yes') change_cover(1, 1) r, data = get('thumb', 1, q='sz=100') self.ae(r.status, http_client.OK) self.ae(identify(data), ('jpeg', 100, 100)) self.ae(r.getheader('Used-Cache'), 'no') # Test file sharing in cache r, data = get('cover', 2) self.ae(r.status, http_client.OK) self.ae(data, db.cover(2)) self.ae(r.getheader('Used-Cache'), 'no') path = from_hex_unicode(r.getheader('Tempfile')) f, fdata = share_open(path, 'rb'), data # Now force an update change_cover(1) r, data = get('cover', 2) self.ae(r.status, http_client.OK) self.ae(data, db.cover(2)) self.ae(r.getheader('Used-Cache'), 'no') path = from_hex_unicode(r.getheader('Tempfile')) f2, f2data = share_open(path, 'rb'), data # Do it again change_cover(2) r, data = get('cover', 2) self.ae(r.status, http_client.OK) self.ae(data, db.cover(2)) self.ae(r.getheader('Used-Cache'), 'no') self.ae(f.read(), fdata) self.ae(f2.read(), f2data) # Test serving of metadata as opf r, data = get('opf', 1) self.ae(r.status, http_client.OK) self.ae(r.getheader('Content-Type'), 'application/oebps-package+xml; charset=UTF-8') self.assertIsNotNone(r.getheader('Last-Modified')) opf = OPF(BytesIO(data), populate_spine=False, try_to_guess_cover=False) self.ae(db.field_for('title', 1), opf.title) self.ae(db.field_for('authors', 1), tuple(opf.authors)) conn.request('GET', '/get/opf/1', headers={'Accept-Encoding': 'gzip'}) r = conn.getresponse() self.ae(r.status, http_client.OK), self.ae(r.getheader('Content-Encoding'), 'gzip') raw = r.read() self.ae(zlib.decompress(raw, 16 + zlib.MAX_WBITS), data) # Test serving metadata as json r, data = get('json', 1) self.ae(r.status, http_client.OK) self.ae(db.field_for('title', 1), json.loads(data)['title']) conn.request('GET', '/get/json/1', headers={'Accept-Encoding': 'gzip'}) r = conn.getresponse() self.ae(r.status, http_client.OK), self.ae(r.getheader('Content-Encoding'), 'gzip') raw = r.read() self.ae(zlib.decompress(raw, 16 + zlib.MAX_WBITS), data)
def create_file_copy(ctx, rd, prefix, library_id, book_id, ext, mtime, copy_func, extra_etag_data=''): ''' We cannot copy files directly from the library folder to the output socket, as this can potentially lock the library for an extended period. So instead we copy out the data from the library folder into a temp folder. We make sure to only do this copy once, using the previous copy, if there have been no changes to the data for the file since the last copy. ''' global rename_counter # Avoid too many items in a single directory for performance base = os.path.join(rd.tdir, 'fcache', (('%x' % book_id)[-3:])) if iswindows: base = '\\\\?\\' + os.path.abspath( base) # Ensure fname is not too long for windows' API bname = f'{prefix}-{library_id}-{book_id:x}.{ext}' if '\\' in bname or '/' in bname: raise ValueError('File components must not contain path separators') fname = os.path.join(base, bname) used_cache = 'no' def safe_mtime(): with suppress(OSError): return os.path.getmtime(fname) mt = mtime if isinstance(mtime, (int, float)) else timestampfromdt(mtime) with lock: previous_mtime = safe_mtime() if previous_mtime is None or previous_mtime < mt: if previous_mtime is not None: # File exists and may be open, so we cannot change its # contents, as that would lead to corrupted downloads in any # clients that are currently downloading the file. if iswindows: # On windows in order to re-use bname, we have to rename it # before deleting it rename_counter += 1 dname = os.path.join(base, '_%x' % rename_counter) atomic_rename(fname, dname) os.remove(dname) else: os.remove(fname) ans = open_for_write(fname) copy_func(ans) ans.seek(0) else: try: ans = share_open(fname, 'rb') used_cache = 'yes' except OSError as err: if err.errno != errno.ENOENT: raise ans = open_for_write(fname) copy_func(ans) ans.seek(0) if ctx.testing: rd.outheaders['Used-Cache'] = used_cache rd.outheaders['Tempfile'] = as_hex_unicode(fname) return rd.filesystem_file_with_custom_etag(ans, prefix, library_id, book_id, mt, extra_etag_data)
def apple_touch_icon(ctx, rd): return share_open(I('apple-touch-icon.png'), 'rb')
def favicon(ctx, rd): return share_open(I('lt.png'), 'rb')
def test_get(self): # {{{ 'Test /get' with self.create_server() as server: db = server.handler.router.ctx.library_broker.get(None) conn = server.connect() def get(what, book_id, library_id=None, q=''): q = ('?' + q) if q else q conn.request('GET', '/get/%s/%s' % (what, book_id) + (('/' + library_id) if library_id else '') + q) r = conn.getresponse() return r, r.read() # Test various invalid parameters def bad(*args): r, data = get(*args) self.ae(r.status, httplib.NOT_FOUND) bad('xxx', 1) bad('fmt1', 10) bad('fmt1', 1, 'zzzz') bad('fmt1', 'xx') # Test simple fetching of format without metadata update r, data = get('fmt1', 1, db.server_library_id) self.ae(data, db.format(1, 'fmt1')) self.assertIsNotNone(r.getheader('Content-Disposition')) self.ae(r.getheader('Used-Cache'), 'no') r, data = get('fmt1', 1) self.ae(data, db.format(1, 'fmt1')) self.ae(r.getheader('Used-Cache'), 'yes') # Test fetching of format with metadata update raw = P('quick_start/eng.epub', data=True) r, data = get('epub', 1) self.ae(r.status, httplib.OK) etag = r.getheader('ETag') self.assertIsNotNone(etag) self.ae(r.getheader('Used-Cache'), 'no') self.assertTrue(data.startswith(b'PK')) self.assertGreaterEqual(len(data), len(raw)) db.set_field('title', {1:'changed'}) r, data = get('epub', 1) self.assertNotEqual(r.getheader('ETag'), etag) etag = r.getheader('ETag') self.ae(r.getheader('Used-Cache'), 'no') mi = get_metadata(BytesIO(data), extract_cover=False) self.ae(mi.title, 'changed') r, data = get('epub', 1) self.ae(r.getheader('Used-Cache'), 'yes') # Test plugboards import calibre.library.save_to_disk as c orig, c.DEBUG = c.DEBUG, False try: db.set_pref('plugboards', {u'epub': {u'content_server': [[u'changed, {title}', u'title']]}}) # this is needed as the cache is not invalidated for plugboard changes db.set_field('title', {1:'again'}) r, data = get('epub', 1) self.assertNotEqual(r.getheader('ETag'), etag) etag = r.getheader('ETag') self.ae(r.getheader('Used-Cache'), 'no') mi = get_metadata(BytesIO(data), extract_cover=False) self.ae(mi.title, 'changed, again') finally: c.DEBUG = orig # Test the serving of covers def change_cover(count, book_id=2): cpath = db.format_abspath(book_id, '__COVER_INTERNAL__') db.set_cover({2:I('lt.png', data=True)}) t = time.time() + 1 + count # Ensure mtime changes, needed on OS X where HFS+ has a 1s # mtime resolution os.utime(cpath, (t, t)) r, data = get('cover', 1) self.ae(r.status, httplib.OK) self.ae(data, db.cover(1)) self.ae(r.getheader('Used-Cache'), 'no') self.ae(r.getheader('Content-Type'), 'image/jpeg') r, data = get('cover', 1) self.ae(r.status, httplib.OK) self.ae(data, db.cover(1)) self.ae(r.getheader('Used-Cache'), 'yes') r, data = get('cover', 3) self.ae(r.status, httplib.OK) # Auto generated cover r, data = get('thumb', 1) self.ae(r.status, httplib.OK) self.ae(identify(data), ('jpeg', 60, 60)) self.ae(r.getheader('Used-Cache'), 'no') r, data = get('thumb', 1) self.ae(r.status, httplib.OK) self.ae(r.getheader('Used-Cache'), 'yes') r, data = get('thumb', 1, q='sz=100') self.ae(r.status, httplib.OK) self.ae(identify(data), ('jpeg', 100, 100)) self.ae(r.getheader('Used-Cache'), 'no') r, data = get('thumb', 1, q='sz=100x100') self.ae(r.status, httplib.OK) self.ae(r.getheader('Used-Cache'), 'yes') change_cover(1, 1) r, data = get('thumb', 1, q='sz=100') self.ae(r.status, httplib.OK) self.ae(identify(data), ('jpeg', 100, 100)) self.ae(r.getheader('Used-Cache'), 'no') # Test file sharing in cache r, data = get('cover', 2) self.ae(r.status, httplib.OK) self.ae(data, db.cover(2)) self.ae(r.getheader('Used-Cache'), 'no') path = binascii.unhexlify(r.getheader('Tempfile')).decode('utf-8') f, fdata = share_open(path, 'rb'), data # Now force an update change_cover(1) r, data = get('cover', 2) self.ae(r.status, httplib.OK) self.ae(data, db.cover(2)) self.ae(r.getheader('Used-Cache'), 'no') path = binascii.unhexlify(r.getheader('Tempfile')).decode('utf-8') f2, f2data = share_open(path, 'rb'), data # Do it again change_cover(2) r, data = get('cover', 2) self.ae(r.status, httplib.OK) self.ae(data, db.cover(2)) self.ae(r.getheader('Used-Cache'), 'no') self.ae(f.read(), fdata) self.ae(f2.read(), f2data) # Test serving of metadata as opf r, data = get('opf', 1) self.ae(r.status, httplib.OK) self.ae(r.getheader('Content-Type'), 'application/oebps-package+xml; charset=UTF-8') self.assertIsNotNone(r.getheader('Last-Modified')) opf = OPF(BytesIO(data), populate_spine=False, try_to_guess_cover=False) self.ae(db.field_for('title', 1), opf.title) self.ae(db.field_for('authors', 1), tuple(opf.authors)) conn.request('GET', '/get/opf/1', headers={'Accept-Encoding':'gzip'}) r = conn.getresponse() self.ae(r.status, httplib.OK), self.ae(r.getheader('Content-Encoding'), 'gzip') raw = r.read() self.ae(zlib.decompress(raw, 16+zlib.MAX_WBITS), data) # Test serving metadata as json r, data = get('json', 1) self.ae(r.status, httplib.OK) self.ae(db.field_for('title', 1), json.loads(data)['title']) conn.request('GET', '/get/json/1', headers={'Accept-Encoding':'gzip'}) r = conn.getresponse() self.ae(r.status, httplib.OK), self.ae(r.getheader('Content-Encoding'), 'gzip') raw = r.read() self.ae(zlib.decompress(raw, 16+zlib.MAX_WBITS), data)
def favicon(ctx, rd): return share_open(I("lt.png"), "rb")