def read_ncx(sections, index, codec): index_entries = [] if index != NULL_INDEX: table, cncx = read_index(sections, index, codec) for num, x in enumerate(iteritems(table)): text, tag_map = x entry = default_entry.copy() entry['name'] = text entry['num'] = num for tag in tag_fieldname_map: fieldname, i = tag_fieldname_map[tag] if tag in tag_map: fieldvalue = tag_map[tag][i] if tag == 6: # Appears to be an idx into the KF8 elems table with an # offset fieldvalue = tuple(tag_map[tag]) entry[fieldname] = fieldvalue for which, name in iteritems({3:'text', 5:'kind', 70:'description', 71:'author', 72:'image_caption', 73:'image_attribution'}): if tag == which: entry[name] = cncx.get(fieldvalue, default_entry[name]) index_entries.append(entry) return index_entries
def flatten_spine(self): names = defaultdict(int) styles, pseudo_styles = {}, defaultdict(dict) for item in self.items: html = item.data stylizer = self.stylizers[item] if self.specializer is not None: self.specializer(item, stylizer) body = html.find(XHTML('body')) fsize = self.context.dest.fbase self.flatten_node(body, stylizer, names, styles, pseudo_styles, fsize, item.id) items = sorted(((key, val) for (val, key) in iteritems(styles)), key=lambda x:numeric_sort_key(x[0])) # :hover must come after link and :active must come after :hover psels = sorted(pseudo_styles, key=lambda x : {'hover':1, 'active':2}.get(x, 0)) for psel in psels: styles = pseudo_styles[psel] if not styles: continue x = sorted(((k+':'+psel, v) for v, k in iteritems(styles))) items.extend(x) css = ''.join(".%s {\n%s;\n}\n\n" % (key, val) for key, val in items) href = self.replace_css(css) global_css = self.collect_global_css() for item in self.items: stylizer = self.stylizers[item] self.flatten_head(item, href, global_css[item])
def register_keyboard_shortcuts(gui=None, finalize=False): if gui is None: from calibre.gui2.ui import get_gui gui = get_gui() if gui is None: return for unique_name, action in iteritems(registered_shortcuts): gui.keyboard.unregister_shortcut(unique_name) gui.removeAction(action) registered_shortcuts.clear() for filetype, applications in iteritems(oprefs['entries']): for application in applications: text = entry_to_icon_text(application, only_text=True) t = _('cover image') if filetype.upper() == 'COVER_IMAGE' else filetype.upper() name = _('Open {0} files with {1}').format(t, text) ac = QAction(gui) unique_name = application['uuid'] func = partial(gui.open_with_action_triggerred, filetype, application) ac.triggered.connect(func) gui.keyboard.register_shortcut(unique_name, name, action=ac, group=_('Open With')) gui.addAction(ac) registered_shortcuts[unique_name] = ac if finalize: gui.keyboard.finalize()
def set_sort_names(self, aus_map, db): aus_map = {aid:(a or '').strip() for aid, a in iteritems(aus_map)} aus_map = {aid:a for aid, a in iteritems(aus_map) if a != self.asort_map.get(aid, None)} self.asort_map.update(aus_map) db.executemany('UPDATE authors SET sort=? WHERE id=?', [(v, k) for k, v in iteritems(aus_map)]) return aus_map
def subset(self, character_map, extra_glyphs): from calibre.utils.fonts.sfnt.cff.writer import Subset # Map codes from the cmap table to glyph names, this will be used to # reconstruct character_map for the subset font charset_map = {code:self.cff.charset.safe_lookup(glyph_id) for code, glyph_id in iteritems(character_map)} charset = set(itervalues(charset_map)) charset.discard(None) if not charset and character_map: raise NoGlyphs('This font has no glyphs for the specified characters') charset |= { self.cff.charset.safe_lookup(glyph_id) for glyph_id in extra_glyphs} charset.discard(None) s = Subset(self.cff, charset) # Rebuild character_map with the glyph ids from the subset font character_map.clear() for code, charname in iteritems(charset_map): glyph_id = s.charname_map.get(charname, None) if glyph_id: character_map[code] = glyph_id # Check that raw is parseable CFF(s.raw) self.raw = s.raw
def test_remove_books(self): # {{{ 'Test removal of books' cl = self.cloned_library cache = self.init_cache() af, ae = self.assertFalse, self.assertEqual authors = cache.fields['authors'].table # Delete a single book, with no formats and check cleaning self.assertIn('Unknown', set(itervalues(authors.id_map))) olen = len(authors.id_map) item_id = {v:k for k, v in iteritems(authors.id_map)}['Unknown'] cache.remove_books((3,)) for c in (cache, self.init_cache()): table = c.fields['authors'].table self.assertNotIn(3, c.all_book_ids()) self.assertNotIn('Unknown', set(itervalues(table.id_map))) self.assertNotIn(item_id, table.asort_map) self.assertNotIn(item_id, table.alink_map) ae(len(table.id_map), olen-1) # Check that files are removed fmtpath = cache.format_abspath(1, 'FMT1') bookpath = os.path.dirname(fmtpath) authorpath = os.path.dirname(bookpath) os.mkdir(os.path.join(authorpath, '.DS_Store')) open(os.path.join(authorpath, 'Thumbs.db'), 'wb').close() item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two'] cache.remove_books((1,), permanent=True) for x in (fmtpath, bookpath, authorpath): af(os.path.exists(x), 'The file %s exists, when it should not' % x) for c in (cache, self.init_cache()): table = c.fields['authors'].table self.assertNotIn(1, c.all_book_ids()) self.assertNotIn('Author Two', set(itervalues(table.id_map))) self.assertNotIn(6, set(itervalues(c.fields['rating'].table.id_map))) self.assertIn('A Series One', set(itervalues(c.fields['series'].table.id_map))) self.assertNotIn('My Series Two', set(itervalues(c.fields['#series'].table.id_map))) self.assertNotIn(item_id, c.fields['#series'].table.col_book_map) self.assertNotIn(1, c.fields['#series'].table.book_col_map) # Test emptying the db cache.remove_books(cache.all_book_ids(), permanent=True) for f in ('authors', 'series', '#series', 'tags'): table = cache.fields[f].table self.assertFalse(table.id_map) self.assertFalse(table.book_col_map) self.assertFalse(table.col_book_map) # Test the delete service from calibre.db.delete_service import delete_service cache = self.init_cache(cl) # Check that files are removed fmtpath = cache.format_abspath(1, 'FMT1') bookpath = os.path.dirname(fmtpath) authorpath = os.path.dirname(bookpath) item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two'] cache.remove_books((1,)) delete_service().wait() for x in (fmtpath, bookpath, authorpath): af(os.path.exists(x), 'The file %s exists, when it should not' % x)
def stringify(data, metadata, for_machine): for field, m in iteritems(metadata): if field == 'authors': data[field] = { k: authors_to_string(v) for k, v in iteritems(data[field]) } else: dt = m['datatype'] if dt == 'datetime': data[field] = { k: isoformat(v, as_utc=for_machine) if v else 'None' for k, v in iteritems(data[field]) } elif not for_machine: ism = m['is_multiple'] if ism: data[field] = { k: ism['list_to_ui'].join(v) for k, v in iteritems(data[field]) } if field == 'formats': data[field] = { k: '[' + v + ']' for k, v in iteritems(data[field]) }
def register(): base = os.path.dirname(sys.executable) for program, data in iteritems(default_programs()): data = data.copy() exe = os.path.join(base, program) capabilities_path = cap_path(data) ext_map = {ext.lower():guess_type('file.' + ext.lower())[0] for ext in extensions(program)} ext_map = {ext:mt for ext, mt in iteritems(ext_map) if mt} prog_id_map = {ext:progid_name(data['assoc_name'], ext) for ext in ext_map} with Key(capabilities_path) as key: for k, v in iteritems({'ApplicationDescription':'description', 'ApplicationName':'name'}): key.set(k, data[v]) key.set('ApplicationIcon', '%s,0' % exe) key.set_default_value(r'shell\open\command', '"%s" "%%1"' % exe) with Key('FileAssociations', root=key) as fak, Key('MimeAssociations', root=key) as mak: # previous_associations = set(fak.values()) for ext, prog_id in iteritems(prog_id_map): mt = ext_map[ext] fak.set('.' + ext, prog_id) mak.set(mt, prog_id) for ext, prog_id in iteritems(prog_id_map): create_prog_id(ext, prog_id, ext_map, exe) with Key(r'Software\RegisteredApplications') as key: key.set(data['name'], capabilities_path) from win32com.shell import shell, shellcon shell.SHChangeNotify(shellcon.SHCNE_ASSOCCHANGED, shellcon.SHCNF_DWORD | shellcon.SHCNF_FLUSH, 0, 0)
def next_entry(self, spine_pos, anchor_map, viewport_rect, in_paged_mode, backwards=False, current_entry=None): current_entry = (self.currently_viewed_entry if current_entry is None else current_entry) if current_entry is None: return items = reversed(self.all_items) if backwards else self.all_items found = False if in_paged_mode: start = viewport_rect[0] anchor_map = {k:v[0] for k, v in iteritems(anchor_map)} else: start = viewport_rect[1] anchor_map = {k:v[1] for k, v in iteritems(anchor_map)} for item in items: if found: start_pos = anchor_map.get(item.start_anchor, 0) if backwards and item.is_being_viewed and start_pos >= start: # This item will not cause any scrolling continue if item.starts_at != spine_pos or item.start_anchor: return item if item is current_entry: found = True
def render(self): ans = ['*'*10 + ' Index Header ' + '*'*10] a = ans.append if self.header is not None: for field in INDEX_HEADER_FIELDS: a('%-12s: %r'%(FIELD_NAMES.get(field, field), self.header[field])) ans.extend(['', '']) ans += ['*'*10 + ' Index Record Headers (%d records) ' % len(self.index_headers) + '*'*10] for i, header in enumerate(self.index_headers): ans += ['*'*10 + ' Index Record %d ' % i + '*'*10] for field in INDEX_HEADER_FIELDS: a('%-12s: %r'%(FIELD_NAMES.get(field, field), header[field])) if self.cncx: a('*'*10 + ' CNCX ' + '*'*10) for offset, val in iteritems(self.cncx): a('%10s: %s'%(offset, val)) ans.extend(['', '']) if self.table is not None: a('*'*10 + ' %d Index Entries '%len(self.table) + '*'*10) for k, v in iteritems(self.table): a('%s: %r'%(k, v)) if self.records: ans.extend(['', '', '*'*10 + ' Parsed Entries ' + '*'*10]) for f in self.records: a(repr(f)) return ans + ['']
def test_get_formats(self): # {{{ 'Test reading ebook formats using the format() method' from calibre.library.database2 import LibraryDatabase2 from calibre.db.cache import NoSuchFormat old = LibraryDatabase2(self.library_path) ids = old.all_ids() lf = {i:set(old.formats(i, index_is_id=True).split(',')) if old.formats( i, index_is_id=True) else set() for i in ids} formats = {i:{f:old.format(i, f, index_is_id=True) for f in fmts} for i, fmts in iteritems(lf)} old.conn.close() old = None cache = self.init_cache(self.library_path) for book_id, fmts in iteritems(lf): self.assertEqual(fmts, set(cache.formats(book_id)), 'Set of formats is not the same') for fmt in fmts: old = formats[book_id][fmt] self.assertEqual(old, cache.format(book_id, fmt), 'Old and new format disagree') f = cache.format(book_id, fmt, as_file=True) self.assertEqual(old, f.read(), 'Failed to read format as file') with open(cache.format(book_id, fmt, as_path=True, preserve_filename=True), 'rb') as f: self.assertEqual(old, f.read(), 'Failed to read format as path') with open(cache.format(book_id, fmt, as_path=True), 'rb') as f: self.assertEqual(old, f.read(), 'Failed to read format as path') buf = BytesIO() self.assertRaises(NoSuchFormat, cache.copy_format_to, 99999, 'X', buf, 'copy_format_to() failed to raise an exception for non-existent book') self.assertRaises(NoSuchFormat, cache.copy_format_to, 1, 'X', buf, 'copy_format_to() failed to raise an exception for non-existent format')
def changed_files(list_of_names1, list_of_names2, get_data1, get_data2): list_of_names1, list_of_names2 = frozenset(list_of_names1), frozenset(list_of_names2) changed_names = set() cache = Cache() common_names = list_of_names1.intersection(list_of_names2) for name in common_names: left, right = get_data1(name), get_data2(name) if len(left) == len(right) and left == right: continue cache.set_left(name, left), cache.set_right(name, right) changed_names.add(name) removals = list_of_names1 - common_names adds = set(list_of_names2 - common_names) adata, rdata = {a:get_data2(a) for a in adds}, {r:get_data1(r) for r in removals} ahash = {a:hash(d) for a, d in iteritems(adata)} rhash = {r:hash(d) for r, d in iteritems(rdata)} renamed_names, removed_names, added_names = {}, set(), set() for name, rh in iteritems(rhash): for n, ah in iteritems(ahash): if ah == rh: renamed_names[name] = n adds.discard(n) break else: cache.set_left(name, rdata[name]) removed_names.add(name) for name in adds: cache.set_right(name, adata[name]) added_names.add(name) return cache, changed_names, renamed_names, removed_names, added_names
def set_links(self, link_map, db): link_map = {aid:(l or '').strip() for aid, l in iteritems(link_map)} link_map = {aid:l for aid, l in iteritems(link_map) if l != self.alink_map.get(aid, None)} self.alink_map.update(link_map) db.executemany('UPDATE authors SET link=? WHERE id=?', [(v, k) for k, v in iteritems(link_map)]) return link_map
def __call__(self, **kwargs): positions = {} for name, val in iteritems(kwargs): if name not in self: raise KeyError('Not a valid header field: %r'%name) self[name] = val buf = BytesIO() buf.write(as_bytes(self.HEADER_NAME)) for name, val in iteritems(self): val = self.format_value(name, val) positions[name] = buf.tell() if val is None: raise ValueError('Dynamic field %r not set'%name) if isinstance(val, numbers.Integral): fmt = b'H' if name in self.SHORT_FIELDS else b'I' val = pack(b'>'+fmt, val) buf.write(val) for pos_field, field in iteritems(self.POSITIONS): buf.seek(positions[pos_field]) buf.write(pack(b'>I', positions[field])) ans = buf.getvalue() if self.ALIGN_BLOCK: ans = align_block(ans) return ans
def find_cover_image2(container, strict=False): manifest_id_map = container.manifest_id_map mm = container.mime_map for meta in container.opf_xpath('//opf:meta[@name="cover" and @content]'): item_id = meta.get('content') name = manifest_id_map.get(item_id, None) media_type = mm.get(name, None) if is_raster_image(media_type): return name # First look for a guide item with type == 'cover' guide_type_map = container.guide_type_map for ref_type, name in iteritems(guide_type_map): if ref_type.lower() == 'cover' and is_raster_image(mm.get(name, None)): return name if strict: return # Find the largest image from all possible guide cover items largest_cover = (None, 0) for ref_type, name in iteritems(guide_type_map): if ref_type.lower() in COVER_TYPES and is_raster_image(mm.get(name, None)): path = container.name_path_map.get(name, None) if path: sz = os.path.getsize(path) if sz > largest_cover[1]: largest_cover = (name, sz) if largest_cover[0]: return largest_cover[0]
def get_news_category(self, tag_class, book_ids=None): news_id = None ans = [] for item_id, val in iteritems(self.table.id_map): if val == _('News'): news_id = item_id break if news_id is None: return ans news_books = self.table.col_book_map[news_id] if book_ids is not None: news_books = news_books.intersection(book_ids) if not news_books: return ans for item_id, item_book_ids in iteritems(self.table.col_book_map): item_book_ids = item_book_ids.intersection(news_books) if item_book_ids: name = self.category_formatter(self.table.id_map[item_id]) if name == _('News'): continue c = tag_class(name, id=item_id, sort=name, id_set=item_book_ids, count=len(item_book_ids)) ans.append(c) return ans
def add_resources(self): r = Dictionary() if self.opacities: extgs = Dictionary() for opref, name in iteritems(self.opacities): extgs[name] = opref r['ExtGState'] = extgs if self.fonts: fonts = Dictionary() for ref, name in iteritems(self.fonts): fonts[name] = ref r['Font'] = fonts if self.xobjects: xobjects = Dictionary() for ref, name in iteritems(self.xobjects): xobjects[name] = ref r['XObject'] = xobjects if self.patterns: r['ColorSpace'] = Dictionary({'PCSp':Array( [Name('Pattern'), Name('DeviceRGB')])}) patterns = Dictionary() for ref, name in iteritems(self.patterns): patterns[name] = ref r['Pattern'] = patterns if r: self.page_dict['Resources'] = r
def export(destdir, library_paths=None, dbmap=None, progress1=None, progress2=None, abort=None): from calibre.db.cache import Cache from calibre.db.backend import DB if library_paths is None: library_paths = all_known_libraries() dbmap = dbmap or {} dbmap = {os.path.normcase(os.path.abspath(k)):v for k, v in iteritems(dbmap)} exporter = Exporter(destdir) exporter.metadata['libraries'] = libraries = {} total = len(library_paths) + 1 for i, (lpath, count) in enumerate(iteritems(library_paths)): if abort is not None and abort.is_set(): return if progress1 is not None: progress1(lpath, i, total) key = os.path.normcase(os.path.abspath(lpath)) db, closedb = dbmap.get(lpath), False if db is None: db = Cache(DB(lpath, load_user_formatter_functions=False)) db.init() closedb = True else: db = db.new_api db.export_library(key, exporter, progress=progress2, abort=abort) if closedb: db.close() libraries[key] = count if progress1 is not None: progress1(_('Settings and plugins'), total-1, total) if abort is not None and abort.is_set(): return exporter.export_dir(config_dir, 'config_dir') exporter.commit() if progress1 is not None: progress1(_('Completed'), total, total)
def fm_as_dict(self): return { 'custom_fields': self._tb_custom_fields, 'search_term_map': self._search_term_map, 'custom_label_to_key_map': self.custom_label_to_key_map, 'user_categories': {k:v for k, v in iteritems(self._tb_cats) if v['kind'] == 'user'}, 'search_categories': {k:v for k, v in iteritems(self._tb_cats) if v['kind'] == 'search'}, }
def pseudo_classes(self, filter_css): if filter_css: css = copy.deepcopy(self._pseudo_classes) for psel, cssdict in iteritems(css): for k in filter_css: cssdict.pop(k, None) else: css = self._pseudo_classes return {k:v for k, v in iteritems(css) if v}
def test_remove_items(self): # {{{ ' Test removal of many-(many,one) items ' cache = self.init_cache() tmap = cache.get_id_map('tags') self.assertEqual(cache.remove_items('tags', tmap), {1, 2}) tmap = cache.get_id_map('#tags') t = {v:k for k, v in iteritems(tmap)}['My Tag Two'] self.assertEqual(cache.remove_items('#tags', (t,)), {1, 2}) smap = cache.get_id_map('series') self.assertEqual(cache.remove_items('series', smap), {1, 2}) smap = cache.get_id_map('#series') s = {v:k for k, v in iteritems(smap)}['My Series Two'] self.assertEqual(cache.remove_items('#series', (s,)), {1}) for c in (cache, self.init_cache()): self.assertFalse(c.get_id_map('tags')) self.assertFalse(c.all_field_names('tags')) for bid in c.all_book_ids(): self.assertFalse(c.field_for('tags', bid)) self.assertEqual(len(c.get_id_map('#tags')), 1) self.assertEqual(c.all_field_names('#tags'), {'My Tag One'}) for bid in c.all_book_ids(): self.assertIn(c.field_for('#tags', bid), ((), ('My Tag One',))) for bid in (1, 2): self.assertEqual(c.field_for('series_index', bid), 1.0) self.assertFalse(c.get_id_map('series')) self.assertFalse(c.all_field_names('series')) for bid in c.all_book_ids(): self.assertFalse(c.field_for('series', bid)) self.assertEqual(c.field_for('series_index', 1), 1.0) self.assertEqual(c.all_field_names('#series'), {'My Series One'}) for bid in c.all_book_ids(): self.assertIn(c.field_for('#series', bid), (None, 'My Series One')) # Now test with restriction cache = self.init_cache() cache.set_field('tags', {1:'a,b,c', 2:'b,a', 3:'x,y,z'}) cache.set_field('series', {1:'a', 2:'a', 3:'b'}) cache.set_field('series_index', {1:8, 2:9, 3:3}) tmap, smap = cache.get_id_map('tags'), cache.get_id_map('series') self.assertEqual(cache.remove_items('tags', tmap, restrict_to_book_ids=()), set()) self.assertEqual(cache.remove_items('tags', tmap, restrict_to_book_ids={1}), {1}) self.assertEqual(cache.remove_items('series', smap, restrict_to_book_ids=()), set()) self.assertEqual(cache.remove_items('series', smap, restrict_to_book_ids=(1,)), {1}) c2 = self.init_cache() for c in (cache, c2): self.assertEqual(c.field_for('tags', 1), ()) self.assertEqual(c.field_for('tags', 2), ('b', 'a')) self.assertNotIn('c', set(itervalues(c.get_id_map('tags')))) self.assertEqual(c.field_for('series', 1), None) self.assertEqual(c.field_for('series', 2), 'a') self.assertEqual(c.field_for('series_index', 1), 1.0) self.assertEqual(c.field_for('series_index', 2), 9)
def ensure_prefix(root, prefixes, prefix, value=None): if prefixes is None: prefixes = read_prefixes(root) prefixes[prefix] = value or reserved_prefixes[prefix] prefixes = {k:v for k, v in iteritems(prefixes) if reserved_prefixes.get(k) != v} if prefixes: root.set('prefix', ' '.join('%s: %s' % (k, v) for k, v in iteritems(prefixes))) else: root.attrib.pop('prefix', None)
def load_color_themes(prefs): t = default_color_themes.copy() t.update(prefs.color_themes) disabled = frozenset(prefs.disabled_color_themes) ans = [theme_to_colors(v) for k, v in iteritems(t) if k not in disabled] if not ans: # Ignore disabled and return only the builtin color themes ans = [theme_to_colors(v) for k, v in iteritems(default_color_themes)] return ans
def dump(self, bdir): types = defaultdict(list) for r, dat in iteritems(self.record_indices): tbs_type, strings = self.dump_record(r, dat) if tbs_type == 0: continue types[tbs_type] += strings for typ, strings in iteritems(types): with open(os.path.join(bdir, 'tbs_type_%d.txt'%typ), 'wb') as f: f.write('\n'.join(strings))
def check_external_links(container, progress_callback=(lambda num, total:None), check_anchors=True): progress_callback(0, 0) external_links = defaultdict(list) for name, mt in iteritems(container.mime_map): if mt in OEB_DOCS or mt in OEB_STYLES: for href, lnum, col in container.iterlinks(name): purl = urlparse(href) if purl.scheme in ('http', 'https'): external_links[href].append((name, href, lnum, col)) if not external_links: return [] items = Queue() ans = [] tuple(map(items.put, iteritems(external_links))) progress_callback(0, len(external_links)) done = [] downloaded_html_ids = {} def check_links(): br = browser(honor_time=False, verify_ssl_certificates=False) while True: try: full_href, locations = items.get_nowait() except Empty: return href, frag = full_href.partition('#')[::2] try: res = br.open(href, timeout=10) except Exception as e: ans.append((locations, e, full_href)) else: if frag and check_anchors: ct = res.info().get('Content-Type') if ct and ct.split(';')[0].lower() in {'text/html', XHTML_MIME}: ids = downloaded_html_ids.get(href) if ids is None: try: ids = downloaded_html_ids[href] = get_html_ids(res.read()) except Exception: ids = downloaded_html_ids[href] = frozenset() if frag not in ids: ans.append((locations, ValueError('HTML anchor {} not found on the page'.format(frag)), full_href)) res.close() finally: done.append(None) progress_callback(len(done), len(external_links)) workers = [Thread(name="CheckLinks", target=check_links) for i in range(min(10, len(external_links)))] for w in workers: w.daemon = True w.start() for w in workers: w.join() return ans
def read_border(parent, dest, XPath, get, border_edges=border_edges, name='pBdr'): vals = {k % edge:inherit for edge in border_edges for k in border_props} for border in XPath('./w:' + name)(parent): for edge in border_edges: for prop, val in iteritems(read_single_border(border, edge, XPath, get)): if val is not None: vals[prop % edge] = val for key, val in iteritems(vals): setattr(dest, key, val)
def read_id_maps(self, db): ManyToOneTable.read_id_maps(self, db) # Ensure there are no records with rating=0 in the table. These should # be represented as rating:None instead. bad_ids = {item_id for item_id, rating in iteritems(self.id_map) if rating == 0} if bad_ids: self.id_map = {item_id:rating for item_id, rating in iteritems(self.id_map) if rating != 0} db.executemany('DELETE FROM {0} WHERE {1}=?'.format(self.link_table, self.metadata['link_column']), tuple((x,) for x in bad_ids)) db.execute('DELETE FROM {0} WHERE {1}=0'.format( self.metadata['table'], self.metadata['column']))
def auto_fill_manifest(container): manifest_id_map = container.manifest_id_map manifest_name_map = {v:k for k, v in iteritems(manifest_id_map)} for name, mt in iteritems(container.mime_map): if name not in manifest_name_map and not container.ok_to_be_unmanifested(name): mitem = container.generate_item(name, unique_href=False) gname = container.href_to_name(mitem.get('href'), container.opf_name) if gname != name: raise ValueError('This should never happen (gname=%r, name=%r, href=%r)' % (gname, name, mitem.get('href'))) manifest_name_map[name] = mitem.get('id') manifest_id_map[mitem.get('id')] = name
def _apply_prefs(self, prefs): for x in ('title', 'subtitle', 'footer'): attr = '%s_font_family' % x getattr(self, attr).font_family = prefs[attr] attr = '%s_font_size' % x getattr(self, attr).setValue(prefs[attr]) for x in ('title', 'subtitle', 'footer'): x += '_template' getattr(self, x).setText(prefs[x]) for x in ('width', 'height'): x = 'cover_' + x getattr(self, x).setValue(prefs[x]) color_themes = prefs['color_themes'].copy() color_themes.update(default_color_themes) disabled = set(prefs['disabled_color_themes']) self.colors_list.clear() self.colors_map = {} for name in sorted(color_themes, key=sort_key): self.colors_map[name] = li = QListWidgetItem(name, self.colors_list) li.setFlags(li.flags() | Qt.ItemIsUserCheckable) li.setCheckState(Qt.Unchecked if name in disabled else Qt.Checked) li.setData(Qt.UserRole, color_themes[name]) lu = prefs.get('last_used_colors') if not self.for_global_prefs and lu in self.colors_map and self.colors_map[lu].checkState() == Qt.Checked: self.colors_map[lu].setSelected(True) else: for name, li in iteritems(self.colors_map): if li.checkState() == Qt.Checked: li.setSelected(True) break else: next(itervalues(self.colors_map)).setSelected(True) disabled = set(prefs['disabled_styles']) self.styles_list.clear() self.style_map.clear() for name in sorted(all_styles(), key=sort_key): self.style_map[name] = li = QListWidgetItem(name, self.styles_list) li.setFlags(li.flags() | Qt.ItemIsUserCheckable) li.setCheckState(Qt.Unchecked if name in disabled else Qt.Checked) lu = prefs.get('last_used_style') if not self.for_global_prefs and lu in self.style_map and self.style_map[lu].checkState() == Qt.Checked: self.style_map[lu].setSelected(True) else: for name, li in iteritems(self.style_map): if li.checkState() == Qt.Checked: li.setSelected(True) break else: next(itervalues(self.style_map)).setSelected(True)
def release_file(self, path): ' Release the lock on the file pointed to by path. Will also release the lock on any hardlinks to path ' key = None for p, h in iteritems(self.handle_map): if samefile_windows(path, p): key = (p, h) break if key is not None: import win32file win32file.CloseHandle(key[1]) remove = [f for f, h in iteritems(self.handle_map) if h is key[1]] for x in remove: self.handle_map.pop(x)
def get_matches(self, location, query, candidates=None, allow_recursion=True): # If candidates is not None, it must not be modified. Changing its # value will break query optimization in the search parser matches = set() if candidates is None: candidates = self.all_book_ids if not candidates or not query or not query.strip(): return matches if location not in self.all_search_locations: return matches if location == 'vl': vl = self.dbcache._pref('virtual_libraries', {}).get(query) if query else None if not vl: raise ParseException( _('No such Virtual library: {}').format(query)) try: return candidates & self.dbcache.books_in_virtual_library( query) except RuntimeError: raise ParseException( _('Virtual library search is recursive: {}').format(query)) if (len(location) > 2 and location.startswith('@') and location[1:] in self.grouped_search_terms): location = location[1:] # get metadata key associated with the search term. Eliminates # dealing with plurals and other aliases original_location = location location = self.field_metadata.search_term_to_field_key( icu_lower(location.strip())) # grouped search terms if isinstance(location, list): if allow_recursion: if query.lower() == 'false': invert = True query = 'true' else: invert = False for loc in location: c = candidates.copy() m = self.get_matches(loc, query, candidates=c, allow_recursion=False) matches |= m c -= m if len(c) == 0: break if invert: matches = self.all_book_ids - matches return matches raise ParseException( _('Recursive query group detected: {0}').format(query)) # If the user has asked to restrict searching over all field, apply # that restriction if (location == 'all' and self.limit_search_columns and self.limit_search_columns_to): terms = set() for l in self.limit_search_columns_to: l = icu_lower(l.strip()) if l and l != 'all' and l in self.all_search_locations: terms.add(l) if terms: c = candidates.copy() for l in terms: try: m = self.get_matches(l, query, candidates=c, allow_recursion=allow_recursion) matches |= m c -= m if len(c) == 0: break except: pass return matches upf = prefs['use_primary_find_in_search'] if location in self.field_metadata: fm = self.field_metadata[location] dt = fm['datatype'] # take care of dates special case if (dt == 'datetime' or (dt == 'composite' and fm['display'].get('composite_sort', '') == 'date')): if location == 'date': location = 'timestamp' return self.date_search( icu_lower(query), partial(self.field_iter, location, candidates)) # take care of numbers special case if (dt in ('rating', 'int', 'float') or (dt == 'composite' and fm['display'].get('composite_sort', '') == 'number')): if location == 'id': is_many = False def fi(default_value=None): for qid in candidates: yield qid, {qid} else: field = self.dbcache.fields[location] fi, is_many = partial(self.field_iter, location, candidates), field.is_many if dt == 'rating' and fm['display'].get('allow_half_stars'): dt = 'half-rating' return self.num_search(icu_lower(query), fi, location, dt, candidates, is_many=is_many) # take care of the 'count' operator for is_multiples if (fm['is_multiple'] and len(query) > 1 and query[0] == '#' and query[1] in '=<>!'): return self.num_search( icu_lower(query[1:]), partial(self.dbcache.fields[location].iter_counts, candidates), location, dt, candidates) # take care of boolean special case if dt == 'bool': return self.bool_search( icu_lower(query), partial(self.field_iter, location, candidates), self.dbcache._pref('bools_are_tristate')) # special case: colon-separated fields such as identifiers. isbn # is a special case within the case if fm.get('is_csp', False): field_iter = partial(self.field_iter, location, candidates) if location == 'identifiers' and original_location == 'isbn': return self.keypair_search('=isbn:' + query, field_iter, candidates, upf) return self.keypair_search(query, field_iter, candidates, upf) # check for user categories if len(location) >= 2 and location.startswith('@'): return self.get_user_category_matches(location[1:], icu_lower(query), candidates) # Everything else (and 'all' matches) case_sensitive = prefs['case_sensitive'] if location == 'template': try: template, sep, query = regex.split('#@#:([tdnb]):', query, flags=regex.IGNORECASE) if sep: sep = sep.lower() else: sep = 't' except: if DEBUG: import traceback traceback.print_exc() raise ParseException( _('search template: missing or invalid separator. Valid separators are: {}' ).format('#@#:[tdnb]:')) matchkind, query = _matchkind(query, case_sensitive=case_sensitive) matches = set() error_string = '*@*TEMPLATE_ERROR*@*' template_cache = {} for book_id in candidates: mi = self.dbcache.get_proxy_metadata(book_id) val = mi.formatter.safe_format(template, {}, error_string, mi, column_name='search template', template_cache=template_cache) if val.startswith(error_string): raise ParseException(val[len(error_string):]) if sep == 't': if _match(query, [ val, ], matchkind, use_primary_find_in_search=upf, case_sensitive=case_sensitive): matches.add(book_id) elif sep == 'n' and val: matches.update( self.num_search(icu_lower(query), {val: { book_id, }}.items, '', '', { book_id, }, is_many=False)) elif sep == 'd' and val: matches.update( self.date_search(icu_lower(query), {val: { book_id, }}.items)) elif sep == 'b': matches.update( self.bool_search(icu_lower(query), { 'True' if val else 'False': { book_id, } }.items, False)) return matches matchkind, query = _matchkind(query, case_sensitive=case_sensitive) all_locs = set() text_fields = set() field_metadata = {} for x, fm in self.field_metadata.iter_items(): if x.startswith('@'): continue if fm['search_terms'] and x not in {'series_sort', 'id'}: if x not in self.virtual_fields and x != 'uuid': # We dont search virtual fields because if we do, search # caching will not be used all_locs.add(x) field_metadata[x] = fm if fm['datatype'] in { 'composite', 'text', 'comments', 'series', 'enumeration' }: text_fields.add(x) locations = all_locs if location == 'all' else {location} current_candidates = set(candidates) try: rating_query = int(float(query)) * 2 except: rating_query = None try: int_query = int(float(query)) except: int_query = None try: float_query = float(query) except: float_query = None for location in locations: current_candidates -= matches q = query if location == 'languages': q = canonicalize_lang(query) if q is None: lm = lang_map() rm = {v.lower(): k for k, v in iteritems(lm)} q = rm.get(query, query) if matchkind == CONTAINS_MATCH and q.lower() in {'true', 'false'}: found = set() for val, book_ids in self.field_iter(location, current_candidates): if val and (not hasattr(val, 'strip') or val.strip()): found |= book_ids matches |= (found if q.lower() == 'true' else (current_candidates - found)) continue dt = field_metadata.get(location, {}).get('datatype', None) if dt == 'rating': if rating_query is not None: for val, book_ids in self.field_iter( location, current_candidates): if val == rating_query: matches |= book_ids continue if dt == 'float': if float_query is not None: for val, book_ids in self.field_iter( location, current_candidates): if val == float_query: matches |= book_ids continue if dt == 'int': if int_query is not None: for val, book_ids in self.field_iter( location, current_candidates): if val == int_query: matches |= book_ids continue if location in text_fields: for val, book_ids in self.field_iter(location, current_candidates): if val is not None: if isinstance(val, string_or_bytes): val = (val, ) if _match(q, val, matchkind, use_primary_find_in_search=upf, case_sensitive=case_sensitive): matches |= book_ids if location == 'series_sort': book_lang_map = self.dbcache.fields['languages'].book_value_map for val, book_ids in self.dbcache.fields[ 'series'].iter_searchable_values_for_sort( current_candidates, book_lang_map): if val is not None: if _match(q, (val, ), matchkind, use_primary_find_in_search=upf, case_sensitive=case_sensitive): matches |= book_ids return matches
def __iter__(self): return iteritems(self.item_map)
def shortcuts_changed(self, smap): rmap = defaultdict(list) for k, v in iteritems(smap): rmap[v].append(k) self.actions_toolbar.set_tooltips(rmap) self.highlights_widget.set_tooltips(rmap)
def resolve_links(self): self.resolved_link_map = {} for hyperlink, spans in iteritems(self.link_map): relationships_by_id = self.link_source_map[hyperlink] span = spans[0] if len(spans) > 1: span = self.wrap_elems(spans, SPAN()) span.tag = 'a' self.resolved_link_map[hyperlink] = span tgt = self.namespace.get(hyperlink, 'w:tgtFrame') if tgt: span.set('target', tgt) tt = self.namespace.get(hyperlink, 'w:tooltip') if tt: span.set('title', tt) rid = self.namespace.get(hyperlink, 'r:id') if rid and rid in relationships_by_id: span.set('href', relationships_by_id[rid]) continue anchor = self.namespace.get(hyperlink, 'w:anchor') if anchor and anchor in self.anchor_map: span.set('href', '#' + self.anchor_map[anchor]) continue self.log.warn( 'Hyperlink with unknown target (rid=%s, anchor=%s), ignoring' % (rid, anchor)) # hrefs that point nowhere give epubcheck a hernia. The element # should be styled explicitly by Word anyway. # span.set('href', '#') rmap = {v: k for k, v in iteritems(self.object_map)} for hyperlink, runs in self.fields.hyperlink_fields: spans = [rmap[r] for r in runs if r in rmap] if not spans: continue span = spans[0] if len(spans) > 1: span = self.wrap_elems(spans, SPAN()) span.tag = 'a' tgt = hyperlink.get('target', None) if tgt: span.set('target', tgt) tt = hyperlink.get('title', None) if tt: span.set('title', tt) url = hyperlink.get('url', None) if url is None: anchor = hyperlink.get('anchor', None) if anchor in self.anchor_map: span.set('href', '#' + self.anchor_map[anchor]) continue self.log.warn('Hyperlink field with unknown anchor: %s' % anchor) else: if url in self.anchor_map: span.set('href', '#' + self.anchor_map[url]) continue span.set('href', url) for img, link, relationships_by_id in self.images.links: parent = img.getparent() idx = parent.index(img) a = A(img) a.tail, img.tail = img.tail, None parent.insert(idx, a) tgt = link.get('target', None) if tgt: a.set('target', tgt) tt = link.get('title', None) if tt: a.set('title', tt) rid = link['id'] if rid in relationships_by_id: dest = relationships_by_id[rid] if dest.startswith('#'): if dest[1:] in self.anchor_map: a.set('href', '#' + self.anchor_map[dest[1:]]) else: a.set('href', dest)
def compile_main_translations(self): l = {} lc_dataf = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lc_data.py') exec(compile(open(lc_dataf, 'rb').read(), lc_dataf, 'exec'), l, l) lcdata = {k: {k1: v1 for k1, v1 in v} for k, v in l['data']} self.iso639_errors = [] self.info('Compiling main UI translation files...') fmap = {f: self.mo_file(f) for f in self.po_files()} files = [(f, fmap[f][1]) for f in self.po_files()] def action_per_file(f): locale, dest = fmap[f] ln = normalize_locale(locale).partition('.')[0] if ln in lcdata: ld = lcdata[ln] lcdest = self.j(self.d(dest), 'lcdata.calibre_msgpack') from calibre.utils.serialize import msgpack_dumps with open(lcdest, 'wb') as lcf: lcf.write(msgpack_dumps(ld)) stats = {} def handle_stats(f, nums): trans = nums[0] total = trans if len(nums) == 1 else (trans + nums[1]) locale = fmap[f][0] stats[locale] = min(1.0, float(trans) / total) self.compile_group(files, handle_stats=handle_stats, action_per_file=action_per_file) self.info('Compiling ISO639 files...') files = [] skip_iso = { 'en_GB', 'en_CA', 'en_AU', 'si', 'ur', 'sc', 'ltg', 'nds', 'te', 'yi', 'fo', 'sq', 'ast', 'ml', 'ku', 'fr_CA', 'him', 'jv', 'ka', 'fur', 'ber', 'my', 'fil', 'hy', 'ug' } for f, (locale, dest) in iteritems(fmap): iscpo = {'bn': 'bn_IN', 'zh_HK': 'zh_CN'}.get(locale, locale) iso639 = self.j(self.TRANSLATIONS, 'iso_639', '%s.po' % iscpo) if os.path.exists(iso639): files.append((iso639, self.j(self.d(dest), 'iso639.mo'))) elif locale not in skip_iso: self.warn('No ISO 639 translations for locale:', locale) # self.auto_fix_iso639_files(files) self.compile_group(files, file_ok=self.check_iso639) if self.iso639_errors: for err in self.iso639_errors: print(err) raise SystemExit(1) dest = self.stats base = self.d(dest) try: os.mkdir(base) except EnvironmentError as err: if err.errno != errno.EEXIST: raise from calibre.utils.serialize import msgpack_dumps with open(dest, 'wb') as f: f.write(msgpack_dumps(stats))
def virtualize_resources(self): changed = set() link_uid = self.book_render_data['link_uid'] resource_template = link_uid + '|{}|' xlink_xpath = XPath('//*[@xl:href]') link_xpath = XPath('//h:a[@href]') res_link_xpath = XPath('//h:link[@href]') def link_replacer(base, url): if url.startswith('#'): frag = urlunquote(url[1:]) if not frag: return url changed.add(base) return resource_template.format(encode_url(base, frag)) purl = urlparse(url) if purl.netloc or purl.query: return url if purl.scheme and purl.scheme != 'file': return url if not purl.path or purl.path.startswith('/'): return url url, frag = purl.path, purl.fragment name = self.href_to_name(url, base) if name: if self.has_name_and_is_not_empty(name): frag = urlunquote(frag) url = resource_template.format(encode_url(name, frag)) else: if isinstance(name, unicode_type): name = name.encode('utf-8') url = 'missing:' + force_unicode(quote(name), 'utf-8') changed.add(base) return url ltm = self.book_render_data['link_to_map'] for name, mt in iteritems(self.mime_map): mt = mt.lower() if mt in OEB_STYLES: replaceUrls(self.parsed(name), partial(link_replacer, name)) self.virtualized_names.add(name) elif mt in OEB_DOCS: self.virtualized_names.add(name) root = self.parsed(name) for link in res_link_xpath(root): ltype = (link.get('type') or 'text/css').lower() rel = (link.get('rel') or 'stylesheet').lower() if ltype != 'text/css' or rel != 'stylesheet': # This link will not be loaded by the browser anyway # and will causes the resource load check to hang link.attrib.clear() changed.add(name) rewrite_links(root, partial(link_replacer, name)) for a in link_xpath(root): href = a.get('href') if href.startswith(link_uid): a.set('href', 'javascript:void(0)') parts = decode_url(href.split('|')[1]) lname, lfrag = parts[0], parts[1] ltm.setdefault(lname, {}).setdefault(lfrag or '', set()).add(name) a.set( 'data-' + link_uid, json.dumps({ 'name': lname, 'frag': lfrag }, ensure_ascii=False)) else: a.set('target', '_blank') a.set('rel', 'noopener noreferrer') changed.add(name) elif mt == 'image/svg+xml': self.virtualized_names.add(name) changed.add(name) xlink = XLINK('href') for elem in xlink_xpath(self.parsed(name)): elem.set(xlink, link_replacer(name, elem.get(xlink))) for name, amap in iteritems(ltm): for k, v in tuple(iteritems(amap)): amap[k] = tuple(v) # needed for JSON serialization tuple(map(self.dirty, changed))
<p>Remove soft hyphens from all text in the book.</p> '''), } def hfix(name, raw): if name == 'about': return raw.format('') raw = raw.replace('\n\n', '__XX__') raw = raw.replace('\n', ' ') raw = raw.replace('__XX__', '\n') raw = raw.replace('<', '<').replace('>', '>') return raw CLI_HELP = {x: hfix(x, re.sub('<.*?>', '', y)) for x, y in iteritems(HELP)} # }}} def update_metadata(ebook, new_opf): from calibre.ebooks.metadata.opf import get_metadata, set_metadata with ebook.open(ebook.opf_name, 'r+b') as stream, open(new_opf, 'rb') as ns: mi = get_metadata(ns)[0] mi.cover, mi.cover_data = None, (None, None) opfbytes = set_metadata(stream, mi, apply_null=True, update_timestamp=True)[0] stream.seek(0) stream.truncate()
def __call__(self, query, field_iter): matches = set() if len(query) < 2: return matches if query == 'false': for v, book_ids in field_iter(): if isinstance(v, (bytes, unicode_type)): if isinstance(v, bytes): v = v.decode(preferred_encoding, 'replace') v = parse_date(v) if v is None or v <= UNDEFINED_DATE: matches |= book_ids return matches if query == 'true': for v, book_ids in field_iter(): if isinstance(v, (bytes, unicode_type)): if isinstance(v, bytes): v = v.decode(preferred_encoding, 'replace') v = parse_date(v) if v is not None and v > UNDEFINED_DATE: matches |= book_ids return matches for k, relop in iteritems(self.operators): if query.startswith(k): query = query[len(k):] break else: relop = self.operators['='] if query in self.local_today: qd = now() field_count = 3 elif query in self.local_yesterday: qd = now() - timedelta(1) field_count = 3 elif query in self.local_thismonth: qd = now() field_count = 2 else: m = self.daysago_pat.search(query) if m is not None: num = query[:-len(m.group(1))] try: qd = now() - timedelta(int(num)) except: raise ParseException( _('Number conversion error: {0}').format(num)) field_count = 3 else: try: qd = parse_date(query, as_utc=False) except: raise ParseException( _('Date conversion error: {0}').format(query)) if '-' in query: field_count = query.count('-') + 1 else: field_count = query.count('/') + 1 for v, book_ids in field_iter(): if isinstance(v, string_or_bytes): v = parse_date(v) if v is not None and relop(dt_as_local(v), qd, field_count): matches |= book_ids return matches
def add_resources(raw, rmap): for placeholder, path in iteritems(rmap): fname = os.path.basename(path) shutil.copy2(path, '.') raw = raw.replace(placeholder, fname) return raw
def __call__(self): doc = self.docx.document relationships_by_id, relationships_by_type = self.docx.document_relationships self.fields(doc, self.log) self.read_styles(relationships_by_type) self.images(relationships_by_id) self.layers = OrderedDict() self.framed = [[]] self.frame_map = {} self.framed_map = {} self.anchor_map = {} self.link_map = defaultdict(list) self.link_source_map = {} self.toc_anchor = None self.block_runs = [] paras = [] self.log.debug('Converting Word markup to HTML') self.read_page_properties(doc) self.resolve_alternate_content(doc) self.current_rels = relationships_by_id for wp, page_properties in iteritems(self.page_map): self.current_page = page_properties if wp.tag.endswith('}p'): p = self.convert_p(wp) self.body.append(p) paras.append(wp) self.read_block_anchors(doc) self.styles.apply_contextual_spacing(paras) self.mark_block_runs(paras) # Apply page breaks at the start of every section, except the first # section (since that will be the start of the file) self.styles.apply_section_page_breaks(self.section_starts[1:]) notes_header = None orig_rid_map = self.images.rid_map if self.footnotes.has_notes: self.body.append(H1(self.notes_text)) notes_header = self.body[-1] notes_header.set('class', 'notes-header') for anchor, text, note in self.footnotes: dl = DL(id=anchor) dl.set('class', 'footnote') self.body.append(dl) dl.append( DT('[', A('←' + text, href='#back_%s' % anchor, title=text))) dl[-1][0].tail = ']' dl.append(DD()) paras = [] self.images.rid_map = self.current_rels = note.rels[0] for wp in note: if wp.tag.endswith('}tbl'): self.tables.register(wp, self.styles) self.page_map[wp] = self.current_page else: p = self.convert_p(wp) dl[-1].append(p) paras.append(wp) self.styles.apply_contextual_spacing(paras) self.mark_block_runs(paras) for p, wp in iteritems(self.object_map): if len(p) > 0 and not p.text and len( p[0]) > 0 and not p[0].text and p[0][0].get('class', None) == 'tab': # Paragraph uses tabs for indentation, convert to text-indent parent = p[0] tabs = [] for child in parent: if child.get('class', None) == 'tab': tabs.append(child) if child.tail: break else: break indent = len(tabs) * self.settings.default_tab_stop style = self.styles.resolve(wp) if style.text_indent is inherit or ( hasattr(style.text_indent, 'endswith') and style.text_indent.endswith('pt')): if style.text_indent is not inherit: indent = float(style.text_indent[:-2]) + indent style.text_indent = '%.3gpt' % indent parent.text = tabs[-1].tail or '' map(parent.remove, tabs) self.images.rid_map = orig_rid_map self.resolve_links() self.styles.cascade(self.layers) self.tables.apply_markup(self.object_map, self.page_map) numbered = [] for html_obj, obj in iteritems(self.object_map): raw = obj.get('calibre_num_id', None) if raw is not None: lvl, num_id = raw.partition(':')[0::2] try: lvl = int(lvl) except (TypeError, ValueError): lvl = 0 numbered.append((html_obj, num_id, lvl)) self.numbering.apply_markup(numbered, self.body, self.styles, self.object_map, self.images) self.apply_frames() if len(self.body) > 0: self.body.text = '\n\t' for child in self.body: child.tail = '\n\t' self.body[-1].tail = '\n' self.log.debug('Converting styles to CSS') self.styles.generate_classes() for html_obj, obj in iteritems(self.object_map): style = self.styles.resolve(obj) if style is not None: css = style.css if css: cls = self.styles.class_name(css) if cls: html_obj.set('class', cls) for html_obj, css in iteritems(self.framed_map): cls = self.styles.class_name(css) if cls: html_obj.set('class', cls) if notes_header is not None: for h in self.namespace.children(self.body, 'h1', 'h2', 'h3'): notes_header.tag = h.tag cls = h.get('class', None) if cls and cls != 'notes-header': notes_header.set('class', '%s notes-header' % cls) break self.fields.polish_markup(self.object_map) self.log.debug('Cleaning up redundant markup generated by Word') self.cover_image = cleanup_markup(self.log, self.html, self.styles, self.dest_dir, self.detect_cover, self.namespace.XPath) return self.write(doc)
class CreateCustomColumn(QDialog): # Note: in this class, we are treating is_multiple as the boolean that # custom_columns expects to find in its structure. It does not use the dict column_types = dict( enumerate(( { 'datatype': 'text', 'text': _('Text, column shown in the Tag browser'), 'is_multiple': False }, { 'datatype': '*text', 'text': _('Comma separated text, like tags, shown in the Tag browser'), 'is_multiple': True }, { 'datatype': 'comments', 'text': _('Long text, like comments, not shown in the Tag browser'), 'is_multiple': False }, { 'datatype': 'series', 'text': _('Text column for keeping series-like information'), 'is_multiple': False }, { 'datatype': 'enumeration', 'text': _('Text, but with a fixed set of permitted values'), 'is_multiple': False }, { 'datatype': 'datetime', 'text': _('Date'), 'is_multiple': False }, { 'datatype': 'float', 'text': _('Floating point numbers'), 'is_multiple': False }, { 'datatype': 'int', 'text': _('Integers'), 'is_multiple': False }, { 'datatype': 'rating', 'text': _('Ratings, shown with stars'), 'is_multiple': False }, { 'datatype': 'bool', 'text': _('Yes/No'), 'is_multiple': False }, { 'datatype': 'composite', 'text': _('Column built from other columns'), 'is_multiple': False }, { 'datatype': '*composite', 'text': _('Column built from other columns, behaves like tags'), 'is_multiple': True }, ))) column_types_map = { k['datatype']: idx for idx, k in iteritems(column_types) } def __init__(self, parent, current_row, current_key, standard_colheads, standard_colnames): QDialog.__init__(self, parent) self.setup_ui() self.setWindowTitle(_('Create a custom column')) self.heading_label.setText('<b>' + _('Create a custom column')) # Remove help icon on title bar icon = self.windowIcon() self.setWindowFlags(self.windowFlags() & (~Qt.WindowContextHelpButtonHint)) self.setWindowIcon(icon) self.simple_error = partial(error_dialog, self, show=True, show_copy_button=False) for sort_by in [_('Text'), _('Number'), _('Date'), _('Yes/No')]: self.composite_sort_by.addItem(sort_by) self.parent = parent self.parent.cc_column_key = None self.editing_col = current_row is not None self.standard_colheads = standard_colheads self.standard_colnames = standard_colnames self.column_type_box.setMaxVisibleItems(len(self.column_types)) for t in self.column_types: self.column_type_box.addItem(self.column_types[t]['text']) self.column_type_box.currentIndexChanged.connect(self.datatype_changed) all_colors = [unicode_type(s) for s in list(QColor.colorNames())] self.enum_colors_label.setToolTip('<p>' + ', '.join(all_colors) + '</p>') if not self.editing_col: self.datatype_changed() self.exec_() return self.setWindowTitle(_('Edit custom column')) self.heading_label.setText('<b>' + _('Edit custom column')) self.shortcuts.setVisible(False) idx = current_row if idx < 0: self.simple_error(_('No column selected'), _('No column has been selected')) return col = current_key if col not in parent.custcols: self.simple_error( '', _('Selected column is not a user-defined column')) return c = parent.custcols[col] self.column_name_box.setText(c['label']) self.column_heading_box.setText(c['name']) self.column_heading_box.setFocus() ct = c['datatype'] if c['is_multiple']: ct = '*' + ct self.orig_column_number = c['colnum'] self.orig_column_name = col column_numbers = dict( map(lambda x: (self.column_types[x]['datatype'], x), self.column_types)) self.column_type_box.setCurrentIndex(column_numbers[ct]) self.column_type_box.setEnabled(False) if ct == 'datetime': if c['display'].get('date_format', None): self.format_box.setText(c['display'].get('date_format', '')) elif ct in ['composite', '*composite']: self.composite_box.setText(c['display'].get( 'composite_template', '')) sb = c['display'].get('composite_sort', 'text') vals = ['text', 'number', 'date', 'bool'] if sb in vals: sb = vals.index(sb) else: sb = 0 self.composite_sort_by.setCurrentIndex(sb) self.composite_make_category.setChecked(c['display'].get( 'make_category', False)) self.composite_contains_html.setChecked(c['display'].get( 'contains_html', False)) elif ct == 'enumeration': self.enum_box.setText(','.join(c['display'].get('enum_values', []))) self.enum_colors.setText(','.join(c['display'].get( 'enum_colors', []))) elif ct in ['int', 'float']: if c['display'].get('number_format', None): self.format_box.setText(c['display'].get('number_format', '')) elif ct == 'comments': idx = max( 0, self.comments_heading_position.findData(c['display'].get( 'heading_position', 'hide'))) self.comments_heading_position.setCurrentIndex(idx) idx = max( 0, self.comments_type.findData(c['display'].get( 'interpret_as', 'html'))) self.comments_type.setCurrentIndex(idx) elif ct == 'rating': self.allow_half_stars.setChecked( bool(c['display'].get('allow_half_stars', False))) # Default values dv = c['display'].get('default_value', None) if dv is not None: if ct == 'bool': self.default_value.setText(_('Yes') if dv else _('No')) elif ct == 'datetime': self.default_value.setText(_('Now') if dv == 'now' else dv) elif ct == 'rating': if self.allow_half_stars.isChecked(): self.default_value.setText(unicode_type(dv / 2)) else: self.default_value.setText(unicode_type(dv // 2)) elif ct in ('int', 'float'): self.default_value.setText(unicode_type(dv)) elif ct not in ('composite', '*composite'): self.default_value.setText(dv) self.datatype_changed() if ct in ['text', 'composite', 'enumeration']: self.use_decorations.setChecked(c['display'].get( 'use_decorations', False)) elif ct == '*text': self.is_names.setChecked(c['display'].get('is_names', False)) self.description_box.setText(c['display'].get('description', '')) self.exec_() def shortcut_activated(self, url): # {{{ which = unicode_type(url).split(':')[-1] self.column_type_box.setCurrentIndex({ 'yesno': self.column_types_map['bool'], 'tags': self.column_types_map['*text'], 'series': self.column_types_map['series'], 'rating': self.column_types_map['rating'], 'people': self.column_types_map['*text'], 'text': self.column_types_map['comments'], }.get(which, self.column_types_map['composite'])) self.column_name_box.setText(which) self.column_heading_box.setText({ 'isbn': 'ISBN', 'formats': _('Formats'), 'yesno': _('Yes/No'), 'tags': _('My Tags'), 'series': _('My Series'), 'rating': _('My Rating'), 'people': _('People'), 'text': _('My Title'), }[which]) self.is_names.setChecked(which == 'people') if self.composite_box.isVisible(): self.composite_box.setText({ 'isbn': '{identifiers:select(isbn)}', 'formats': "{:'re(approximate_formats(), ',', ', ')'}", }[which]) self.composite_sort_by.setCurrentIndex(0) if which == 'text': self.comments_heading_position.setCurrentIndex( self.comments_heading_position.findData('side')) self.comments_type.setCurrentIndex( self.comments_type.findData('short-text')) # }}} def setup_ui(self): # {{{ self.setWindowModality(Qt.ApplicationModal) self.setWindowIcon(QIcon(I('column.png'))) self.vl = l = QVBoxLayout(self) self.heading_label = la = QLabel('') l.addWidget(la) self.shortcuts = s = QLabel('') s.setWordWrap(True) s.linkActivated.connect(self.shortcut_activated) text = '<p>' + _('Quick create:') for col, name in [('isbn', _('ISBN')), ('formats', _('Formats')), ('yesno', _('Yes/No')), ('tags', _('Tags')), ('series', ngettext('Series', 'Series', 1)), ('rating', _('Rating')), ('people', _("Names")), ('text', _('Short text'))]: text += ' <a href="col:%s">%s</a>,' % (col, name) text = text[:-1] s.setText(text) l.addWidget(s) self.g = g = QGridLayout() l.addLayout(g) l.addStretch(10) self.button_box = bb = QDialogButtonBox( QDialogButtonBox.Ok | QDialogButtonBox.Cancel, self) bb.accepted.connect(self.accept), bb.rejected.connect(self.reject) l.addWidget(bb) def add_row(text, widget): if text is None: f = g.addWidget if isinstance(widget, QWidget) else g.addLayout f(widget, g.rowCount(), 0, 1, -1) return row = g.rowCount() la = QLabel(text) g.addWidget(la, row, 0, 1, 1) if isinstance(widget, QWidget): la.setBuddy(widget) g.addWidget(widget, row, 1, 1, 1) else: widget.setContentsMargins(0, 0, 0, 0) g.addLayout(widget, row, 1, 1, 1) for i in range(widget.count()): w = widget.itemAt(i).widget() if isinstance(w, QWidget): la.setBuddy(w) break return la # Lookup name self.column_name_box = cnb = QLineEdit(self) cnb.setToolTip( _("Used for searching the column. Must contain only digits and lower case letters." )) add_row(_("&Lookup name"), cnb) # Heading self.column_heading_box = chb = QLineEdit(self) chb.setToolTip( _("Column heading in the library view and category name in the Tag browser" )) add_row(_("Column &heading"), chb) # Column Type h = QHBoxLayout() self.column_type_box = ctb = QComboBox(self) ctb.setMinimumWidth(70) ctb.setToolTip( _("What kind of information will be kept in the column.")) h.addWidget(ctb) self.use_decorations = ud = QCheckBox(_("Show &checkmarks"), self) ud.setToolTip( _("Show check marks in the GUI. Values of 'yes', 'checked', and 'true'\n" "will show a green check. Values of 'no', 'unchecked', and 'false' will show a red X.\n" "Everything else will show nothing.")) h.addWidget(ud) self.is_names = ins = QCheckBox(_("Contains names"), self) ins.setToolTip( _("Check this box if this column contains names, like the authors column." )) h.addWidget(ins) add_row(_("&Column type"), h) # Description self.description_box = d = QLineEdit(self) d.setToolTip(_("Optional text describing what this column is for")) add_row(_("D&escription"), d) # Date/number formatting h = QHBoxLayout() self.format_box = fb = QLineEdit(self) h.addWidget(fb) self.format_default_label = la = QLabel('') la.setOpenExternalLinks(True), la.setWordWrap(True) h.addWidget(la) self.format_label = add_row('', h) # Template self.composite_box = cb = QLineEdit(self) self.composite_default_label = cdl = QLabel(_("Default: (nothing)")) cb.setToolTip( _("Field template. Uses the same syntax as save templates.")) cdl.setToolTip( _("Similar to save templates. For example, %s") % "{title} {isbn}") h = QHBoxLayout() h.addWidget(cb), h.addWidget(cdl) self.composite_label = add_row(_("&Template"), h) # Comments properties self.comments_heading_position = ct = QComboBox(self) for k, text in (('hide', _('No heading')), ('above', _('Show heading above the text')), ('side', _('Show heading to the side of the text'))): ct.addItem(text, k) ct.setToolTip( _('Choose whether or not the column heading is shown in the Book\n' 'details panel and, if shown, where')) self.comments_heading_position_label = add_row(_('Column heading'), ct) self.comments_type = ct = QComboBox(self) for k, text in (('html', 'HTML'), ('short-text', _('Short text, like a title')), ('long-text', _('Plain text')), ('markdown', _('Plain text formatted using markdown'))): ct.addItem(text, k) ct.setToolTip( _('Choose how the data in this column is interpreted.\n' 'This controls how the data is displayed in the Book details panel\n' 'and how it is edited.')) self.comments_type_label = add_row( _('Interpret this column as:') + ' ', ct) # Values for enum type l = QGridLayout() self.enum_box = eb = QLineEdit(self) eb.setToolTip( _("A comma-separated list of permitted values. The empty value is always\n" "included, and is the default. For example, the list 'one,two,three' has\n" "four values, the first of them being the empty value.")) self.enum_default_label = la = QLabel(_("Values")) la.setBuddy(eb) l.addWidget(eb), l.addWidget(la, 0, 1) self.enum_colors = ec = QLineEdit(self) ec.setToolTip( _("A list of color names to use when displaying an item. The\n" "list must be empty or contain a color for each value.")) self.enum_colors_label = la = QLabel(_('Colors')) la.setBuddy(ec) l.addWidget(ec), l.addWidget(la, 1, 1) self.enum_label = add_row(_('&Values'), l) # Rating allow half stars self.allow_half_stars = ahs = QCheckBox(_('Allow half stars')) ahs.setToolTip(_('Allow half star ratings, for example: ') + '★★★⯨') add_row(None, ahs) # Composite display properties l = QHBoxLayout() self.composite_sort_by_label = la = QLabel(_("&Sort/search column by")) self.composite_sort_by = csb = QComboBox(self) la.setBuddy(csb), csb.setToolTip( _("How this column should handled in the GUI when sorting and searching" )) l.addWidget(la), l.addWidget(csb) self.composite_make_category = cmc = QCheckBox( _("Show in Tag browser")) cmc.setToolTip( _("If checked, this column will appear in the Tag browser as a category" )) l.addWidget(cmc) self.composite_contains_html = cch = QCheckBox( _("Show as HTML in Book details")) cch.setToolTip( '<p>' + _('If checked, this column will be displayed as HTML in ' 'Book details and the Content server. This can be used to ' 'construct links with the template language. For example, ' 'the template ' '<pre><big><b>{title}</b></big>' '{series:| [|}{series_index:| [|]]}</pre>' 'will create a field displaying the title in bold large ' 'characters, along with the series, for example <br>"<big><b>' 'An Oblique Approach</b></big> [Belisarius [1]]". The template ' '<pre><a href="https://www.beam-ebooks.de/ebook/{identifiers' ':select(beam)}">Beam book</a></pre> ' 'will generate a link to the book on the Beam e-books site.') + '</p>') l.addWidget(cch) add_row(None, l) # Default value self.default_value = dv = QLineEdit(self) dv.setToolTip('<p>' + _( 'Default value when a new book is added to the ' 'library. For Date columns enter the word "Now", or the date as ' 'yyyy-mm-dd. For Yes/No columns enter "Yes" or "No". For Text with ' 'a fixed set of values enter one of the permitted values. For ' 'Rating columns enter a number between 0 and 5.') + '</p>') self.default_value_label = add_row(_('Default value'), dv) self.resize(self.sizeHint()) # }}} def datatype_changed(self, *args): try: col_type = self.column_types[ self.column_type_box.currentIndex()]['datatype'] except: col_type = None needs_format = col_type in ('datetime', 'int', 'float') for x in ('box', 'default_label', 'label'): getattr(self, 'format_' + x).setVisible(needs_format) if needs_format: if col_type == 'datetime': l, dl = _('&Format for dates'), _('Default: dd MMM yyyy.') self.format_box.setToolTip( _('<p>Date format.</p>' '<p>The formatting codes are:' '<ul>' '<li>d : the day as number without a leading zero (1 to 31)</li>' '<li>dd : the day as number with a leading zero (01 to 31)</li>' '<li>ddd : the abbreviated localized day name (e.g. "Mon" to "Sun").</li>' '<li>dddd : the long localized day name (e.g. "Monday" to "Sunday").</li>' '<li>M : the <b>month</b> as number without a leading zero (1 to 12).</li>' '<li>MM : the <b>month</b> as number with a leading zero (01 to 12)</li>' '<li>MMM : the abbreviated localized <b>month</b> name (e.g. "Jan" to "Dec").</li>' '<li>MMMM : the long localized <b>month</b> name (e.g. "January" to "December").</li>' '<li>yy : the year as two digit number (00 to 99).</li>' '<li>yyyy : the year as four digit number.</li>' '<li>h : the hours without a leading 0 (0 to 11 or 0 to 23, depending on am/pm)</li>' '<li>hh : the hours with a leading 0 (00 to 11 or 00 to 23, depending on am/pm)</li>' '<li>m : the <b>minutes</b> without a leading 0 (0 to 59)</li>' '<li>mm : the <b>minutes</b> with a leading 0 (00 to 59)</li>' '<li>s : the seconds without a leading 0 (0 to 59)</li>' '<li>ss : the seconds with a leading 0 (00 to 59)</li>' '<li>ap : use a 12-hour clock instead of a 24-hour clock, with "ap" replaced by the localized string for am or pm</li>' '<li>AP : use a 12-hour clock instead of a 24-hour clock, with "AP" replaced by the localized string for AM or PM</li>' '<li>iso : the date with time and timezone. Must be the only format present</li>' '</ul></p>' "<p>For example:\n" "<ul>\n" "<li>ddd, d MMM yyyy gives Mon, 5 Jan 2010<li>\n" "<li>dd MMMM yy gives 05 January 10</li>\n" "</ul> ")) else: l, dl = _('&Format for numbers'), ('<p>' + _( 'Default: Not formatted. For format language details see' ' <a href="https://docs.python.org/library/string.html#format-string-syntax">the Python documentation</a>' )) if col_type == 'int': self.format_box.setToolTip('<p>' + _( 'Examples: The format <code>{0:0>4d}</code> ' 'gives a 4-digit number with leading zeros. The format ' '<code>{0:d} days</code> prints the number then the word "days"' ) + '</p>') else: self.format_box.setToolTip('<p>' + _( 'Examples: The format <code>{0:.1f}</code> gives a floating ' 'point number with 1 digit after the decimal point. The format ' '<code>Price: $ {0:,.2f}</code> prints ' '"Price $ " then displays the number with 2 digits ' 'after the decimal point and thousands separated by commas.' ) + '</p>') self.format_label.setText(l), self.format_default_label.setText(dl) for x in ('box', 'default_label', 'label', 'sort_by', 'sort_by_label', 'make_category', 'contains_html'): getattr(self, 'composite_' + x).setVisible(col_type in ['composite', '*composite']) for x in ('box', 'default_label', 'label', 'colors', 'colors_label'): getattr(self, 'enum_' + x).setVisible(col_type == 'enumeration') for x in ('value_label', 'value'): getattr(self, 'default_' + x).setVisible(col_type not in ['composite', '*composite']) self.use_decorations.setVisible( col_type in ['text', 'composite', 'enumeration']) self.is_names.setVisible(col_type == '*text') is_comments = col_type == 'comments' self.comments_heading_position.setVisible(is_comments) self.comments_heading_position_label.setVisible(is_comments) self.comments_type.setVisible(is_comments) self.comments_type_label.setVisible(is_comments) self.allow_half_stars.setVisible(col_type == 'rating') def accept(self): col = unicode_type(self.column_name_box.text()).strip() if not col: return self.simple_error('', _('No lookup name was provided')) if col.startswith('#'): col = col[1:] if re.match(r'^\w*$', col) is None or not col[0].isalpha() or col.lower() != col: return self.simple_error( '', _('The lookup name must contain only ' 'lower case letters, digits and underscores, and start with a letter' )) if col.endswith('_index'): return self.simple_error( '', _('Lookup names cannot end with _index, ' 'because these names are reserved for the index of a series column.' )) col_heading = unicode_type(self.column_heading_box.text()).strip() coldef = self.column_types[self.column_type_box.currentIndex()] col_type = coldef['datatype'] if col_type[0] == '*': col_type = col_type[1:] is_multiple = True else: is_multiple = False if not col_heading: return self.simple_error('', _('No column heading was provided')) db = self.parent.gui.library_view.model().db key = db.field_metadata.custom_field_prefix + col bad_col = False if key in self.parent.custcols: if not self.editing_col or \ self.parent.custcols[key]['colnum'] != self.orig_column_number: bad_col = True if bad_col: return self.simple_error( '', _('The lookup name %s is already used') % col) bad_head = False for t in self.parent.custcols: if self.parent.custcols[t]['name'] == col_heading: if not self.editing_col or \ self.parent.custcols[t]['colnum'] != self.orig_column_number: bad_head = True for t in self.standard_colheads: if self.standard_colheads[t] == col_heading: bad_head = True if bad_head: return self.simple_error( '', _('The heading %s is already used') % col_heading) display_dict = {} default_val = (unicode_type(self.default_value.text()).strip() if col_type != 'composite' else None) if col_type == 'datetime': if unicode_type(self.format_box.text()).strip(): display_dict = { 'date_format': unicode_type(self.format_box.text()).strip() } else: display_dict = {'date_format': None} if default_val: if default_val == _('Now'): display_dict['default_value'] = 'now' else: try: tv = parse_date(default_val) except: tv = UNDEFINED_DATE if tv == UNDEFINED_DATE: return self.simple_error( _('Invalid default value'), _('The default value must be "Now" or a date')) display_dict['default_value'] = default_val elif col_type == 'composite': if not unicode_type(self.composite_box.text()).strip(): return self.simple_error( '', _('You must enter a template for ' 'composite columns')) display_dict = { 'composite_template': unicode_type(self.composite_box.text()).strip(), 'composite_sort': ['text', 'number', 'date', 'bool'][self.composite_sort_by.currentIndex()], 'make_category': self.composite_make_category.isChecked(), 'contains_html': self.composite_contains_html.isChecked(), } elif col_type == 'enumeration': if not unicode_type(self.enum_box.text()).strip(): return self.simple_error( '', _('You must enter at least one ' 'value for enumeration columns')) l = [ v.strip() for v in unicode_type(self.enum_box.text()).split(',') if v.strip() ] l_lower = [v.lower() for v in l] for i, v in enumerate(l_lower): if v in l_lower[i + 1:]: return self.simple_error( '', _('The value "{0}" is in the ' 'list more than once, perhaps with different case'). format(l[i])) c = unicode_type(self.enum_colors.text()) if c: c = [ v.strip() for v in unicode_type(self.enum_colors.text()).split(',') ] else: c = [] if len(c) != 0 and len(c) != len(l): return self.simple_error( '', _('The colors box must be empty or ' 'contain the same number of items as the value box')) for tc in c: if tc not in QColor.colorNames() and not re.match( "#(?:[0-9a-f]{3}){1,4}", tc, re.I): return self.simple_error( '', _('The color {0} is unknown').format(tc)) display_dict = {'enum_values': l, 'enum_colors': c} if default_val: if default_val not in l: return self.simple_error( _('Invalid default value'), _('The default value must be one of the permitted values' )) display_dict['default_value'] = default_val elif col_type == 'text' and is_multiple: display_dict = {'is_names': self.is_names.isChecked()} elif col_type in ['int', 'float']: if unicode_type(self.format_box.text()).strip(): display_dict = { 'number_format': unicode_type(self.format_box.text()).strip() } else: display_dict = {'number_format': None} if default_val: try: if col_type == 'int': msg = _('The default value must be an integer') tv = int(default_val) display_dict['default_value'] = tv else: msg = _('The default value must be a real number') tv = float(default_val) display_dict['default_value'] = tv except: return self.simple_error(_('Invalid default value'), msg) elif col_type == 'comments': display_dict['heading_position'] = unicode_type( self.comments_heading_position.currentData()) display_dict['interpret_as'] = unicode_type( self.comments_type.currentData()) elif col_type == 'rating': half_stars = bool(self.allow_half_stars.isChecked()) display_dict['allow_half_stars'] = half_stars if default_val: try: tv = int((float(default_val) if half_stars else int(default_val)) * 2) except: tv = -1 if tv < 0 or tv > 10: if half_stars: return self.simple_error( _('Invalid default value'), _('The default value must be a real number between 0 and 5.0' )) else: return self.simple_error( _('Invalid default value'), _('The default value must be an integer between 0 and 5' )) display_dict['default_value'] = tv elif col_type == 'bool': if default_val: tv = {_('Yes'): True, _('No'): False}.get(default_val, None) if tv is None: return self.simple_error( _('Invalid default value'), _('The default value must be "Yes" or "No"')) display_dict['default_value'] = tv if col_type in ['text', 'composite', 'enumeration' ] and not is_multiple: display_dict['use_decorations'] = self.use_decorations.checkState() if default_val and 'default_value' not in display_dict: display_dict['default_value'] = default_val display_dict['description'] = self.description_box.text().strip() if not self.editing_col: self.parent.custcols[key] = { 'label': col, 'name': col_heading, 'datatype': col_type, 'display': display_dict, 'normalized': None, 'colnum': None, 'is_multiple': is_multiple, } self.parent.cc_column_key = key else: self.parent.custcols[self.orig_column_name]['label'] = col self.parent.custcols[self.orig_column_name]['name'] = col_heading self.parent.custcols[self.orig_column_name]['display'].update( display_dict) self.parent.custcols[self.orig_column_name]['*edited'] = True self.parent.custcols[self.orig_column_name]['*must_restart'] = True self.parent.cc_column_key = key QDialog.accept(self) def reject(self): QDialog.reject(self)
def vals(self): raw = unicode_type(self.lineEdit().text()) for k, v in iteritems(self.comma_map): raw = raw.replace(k, v) parts = [x.strip() for x in raw.split(',')] return [self.comma_rmap.get(x, x) for x in parts]
def build_exth(metadata, prefer_author_sort=False, is_periodical=False, share_not_sync=True, cover_offset=None, thumbnail_offset=None, start_offset=None, mobi_doctype=2, num_of_resources=None, kf8_unknown_count=0, be_kindlegen2=False, kf8_header_index=None, page_progression_direction=None, primary_writing_mode=None): exth = BytesIO() nrecs = 0 for term in metadata: if term not in EXTH_CODES: continue code = EXTH_CODES[term] items = metadata[term] if term == 'creator': if prefer_author_sort: creators = [ authors_to_sort_string([unicode_type(c)]) for c in items ] else: creators = [unicode_type(c) for c in items] items = creators elif term == 'rights': try: rights = utf8_text(unicode_type(metadata.rights[0])) except: rights = b'Unknown' exth.write(pack(b'>II', EXTH_CODES['rights'], len(rights) + 8)) exth.write(rights) nrecs += 1 continue for item in items: data = unicode_type(item) if term != 'description': data = COLLAPSE_RE.sub(' ', data) if term == 'identifier': if data.lower().startswith('urn:isbn:'): data = data[9:] elif item.scheme.lower() == 'isbn': pass else: continue if term == 'language': d2 = lang_as_iso639_1(data) if d2: data = d2 data = utf8_text(data) exth.write(pack(b'>II', code, len(data) + 8)) exth.write(data) nrecs += 1 # Write UUID as ASIN uuid = None from calibre.ebooks.oeb.base import OPF for x in metadata['identifier']: if (x.get(OPF('scheme'), None).lower() == 'uuid' or unicode_type(x).startswith('urn:uuid:')): uuid = unicode_type(x).split(':')[-1] break if uuid is None: from uuid import uuid4 uuid = str(uuid4()) if isinstance(uuid, unicode_type): uuid = uuid.encode('utf-8') if not share_not_sync: exth.write(pack(b'>II', 113, len(uuid) + 8)) exth.write(uuid) nrecs += 1 # Write UUID as SOURCE c_uuid = b'calibre:%s' % uuid exth.write(pack(b'>II', 112, len(c_uuid) + 8)) exth.write(c_uuid) nrecs += 1 # Write cdetype if not is_periodical: if not share_not_sync: exth.write(pack(b'>II', 501, 12)) exth.write(b'EBOK') nrecs += 1 else: ids = {0x101: b'NWPR', 0x103: b'MAGZ'}.get(mobi_doctype, None) if ids: exth.write(pack(b'>II', 501, 12)) exth.write(ids) nrecs += 1 # Add a publication date entry if metadata['date']: datestr = str(metadata['date'][0]) elif metadata['timestamp']: datestr = str(metadata['timestamp'][0]) if datestr is None: raise ValueError("missing date or timestamp") datestr = datestr.encode('utf-8') exth.write(pack(b'>II', EXTH_CODES['pubdate'], len(datestr) + 8)) exth.write(datestr) nrecs += 1 if is_periodical: exth.write(pack(b'>II', EXTH_CODES['lastupdatetime'], len(datestr) + 8)) exth.write(datestr) nrecs += 1 if be_kindlegen2: mv = 200 if iswindows else 202 if isosx else 201 vals = {204: mv, 205: 2, 206: 9, 207: 0} elif is_periodical: # Pretend to be amazon's super secret periodical generator vals = {204: 201, 205: 2, 206: 0, 207: 101} else: # Pretend to be kindlegen 1.2 vals = {204: 201, 205: 1, 206: 2, 207: 33307} for code, val in iteritems(vals): exth.write(pack(b'>III', code, 12, val)) nrecs += 1 if be_kindlegen2: revnum = b'0730-890adc2' exth.write(pack(b'>II', 535, 8 + len(revnum)) + revnum) nrecs += 1 if cover_offset is not None: exth.write(pack(b'>III', EXTH_CODES['coveroffset'], 12, cover_offset)) exth.write(pack(b'>III', EXTH_CODES['hasfakecover'], 12, 0)) nrecs += 2 if thumbnail_offset is not None: exth.write( pack(b'>III', EXTH_CODES['thumboffset'], 12, thumbnail_offset)) thumbnail_uri_str = ('kindle:embed:%s' % (to_base( thumbnail_offset, base=32, min_num_digits=4))).encode('utf-8') exth.write( pack(b'>II', EXTH_CODES['kf8_thumbnail_uri'], len(thumbnail_uri_str) + 8)) exth.write(thumbnail_uri_str) nrecs += 2 if start_offset is not None: try: len(start_offset) except TypeError: start_offset = [start_offset] for so in start_offset: if so is not None: exth.write(pack(b'>III', EXTH_CODES['startreading'], 12, so)) nrecs += 1 if kf8_header_index is not None: exth.write( pack(b'>III', EXTH_CODES['kf8_header_index'], 12, kf8_header_index)) nrecs += 1 if num_of_resources is not None: exth.write( pack(b'>III', EXTH_CODES['num_of_resources'], 12, num_of_resources)) nrecs += 1 if kf8_unknown_count is not None: exth.write( pack(b'>III', EXTH_CODES['kf8_unknown_count'], 12, kf8_unknown_count)) nrecs += 1 if primary_writing_mode: pwm = primary_writing_mode.encode('utf-8') exth.write( pack(b'>II', EXTH_CODES['primary_writing_mode'], len(pwm) + 8)) exth.write(pwm) nrecs += 1 if page_progression_direction in {'rtl', 'ltr', 'default'}: ppd = page_progression_direction.encode('ascii') exth.write( pack(b'>II', EXTH_CODES['page_progression_direction'], len(ppd) + 8)) exth.write(ppd) nrecs += 1 exth = exth.getvalue() trail = len(exth) % 4 pad = b'\0' * (4 - trail) # Always pad w/ at least 1 byte exth = [b'EXTH', pack(b'>II', len(exth) + 12, nrecs), exth, pad] return b''.join(exth)
def set_button_texts(self): for but, text in iteritems(self.BUTTON_TEXTS): self.setButtonText(getattr(self, but + 'Button'), _(text))
def test_legacy_setters(self): # {{{ 'Test methods that are directly equivalent in the old and new interface' from calibre.ebooks.metadata.book.base import Metadata from calibre.utils.date import now n = now() ndb = self.init_legacy(self.cloned_library) amap = ndb.new_api.get_id_map('authors') sorts = [(aid, 's%d' % aid) for aid in amap] db = self.init_old(self.cloned_library) run_funcs(self, db, ndb, ( ('+format_metadata', 1, 'FMT1', itemgetter('size')), ('+format_metadata', 1, 'FMT2', itemgetter('size')), ('+format_metadata', 2, 'FMT1', itemgetter('size')), ('get_tags', 0), ('get_tags', 1), ('get_tags', 2), ('is_tag_used', 'News'), ('is_tag_used', 'xchkjgfh'), ('bulk_modify_tags', (1, ), ['t1'], ['News']), ('bulk_modify_tags', (2, ), ['t1'], ['Tag One', 'Tag Two']), ('bulk_modify_tags', (3, ), ['t1', 't2', 't3']), (db.clean, ), ('@all_tags', ), ('@tags', 0), ('@tags', 1), ('@tags', 2), ('unapply_tags', 1, ['t1']), ('unapply_tags', 2, ['xxxx']), ('unapply_tags', 3, ['t2', 't3']), (db.clean, ), ('@all_tags', ), ('@tags', 0), ('@tags', 1), ('@tags', 2), ('update_last_modified', (1, ), True, n), ('update_last_modified', (3, ), True, n), ('metadata_last_modified', 1, True), ('metadata_last_modified', 3, True), ('set_sort_field_for_author', sorts[0][0], sorts[0][1]), ('set_sort_field_for_author', sorts[1][0], sorts[1][1]), ('set_sort_field_for_author', sorts[2][0], sorts[2][1]), ('set_link_field_for_author', sorts[0][0], sorts[0][1]), ('set_link_field_for_author', sorts[1][0], sorts[1][1]), ('set_link_field_for_author', sorts[2][0], sorts[2][1]), (db.refresh, ), ('author_sort', 0), ('author_sort', 1), ('author_sort', 2), )) omi = [db.get_metadata(x) for x in (0, 1, 2)] nmi = [ndb.get_metadata(x) for x in (0, 1, 2)] self.assertEqual([x.author_sort_map for x in omi], [x.author_sort_map for x in nmi]) self.assertEqual([x.author_link_map for x in omi], [x.author_link_map for x in nmi]) db.close() ndb = self.init_legacy(self.cloned_library) db = self.init_old(self.cloned_library) run_funcs(self, db, ndb, ( ( 'set_authors', 1, ('author one', ), ), ('set_authors', 2, ('author two', ), True, True, True), ('set_author_sort', 3, 'new_aus'), ('set_comment', 1, ''), ('set_comment', 2, None), ('set_comment', 3, '<p>a comment</p>'), ('set_has_cover', 1, True), ('set_has_cover', 2, True), ('set_has_cover', 3, 1), ('set_identifiers', 2, { 'test': '', 'a': 'b' }), ('set_identifiers', 3, { 'id': '1', 'isbn': '9783161484100' }), ('set_identifiers', 1, {}), ('set_languages', 1, ('en', )), ('set_languages', 2, ()), ('set_languages', 3, ('deu', 'spa', 'fra')), ('set_pubdate', 1, None), ('set_pubdate', 2, '2011-1-7'), ('set_series', 1, 'a series one'), ('set_series', 2, 'another series [7]'), ('set_series', 3, 'a third series'), ('set_publisher', 1, 'publisher two'), ('set_publisher', 2, None), ('set_publisher', 3, 'a third puB'), ('set_rating', 1, 2.3), ('set_rating', 2, 0), ('set_rating', 3, 8), ('set_timestamp', 1, None), ('set_timestamp', 2, '2011-1-7'), ('set_uuid', 1, None), ('set_uuid', 2, 'a test uuid'), ('set_title', 1, 'title two'), ('set_title', 2, None), ('set_title', 3, 'The Test Title'), ('set_tags', 1, ['a1', 'a2'], True), ('set_tags', 2, ['b1', 'tag one'], False, False, False, True), ('set_tags', 3, ['A1']), (db.refresh, ), ('title', 0), ('title', 1), ('title', 2), ('title_sort', 0), ('title_sort', 1), ('title_sort', 2), ('authors', 0), ('authors', 1), ('authors', 2), ('author_sort', 0), ('author_sort', 1), ('author_sort', 2), ('has_cover', 3), ('has_cover', 1), ('has_cover', 2), ('get_identifiers', 0), ('get_identifiers', 1), ('get_identifiers', 2), ('pubdate', 0), ('pubdate', 1), ('pubdate', 2), ('timestamp', 0), ('timestamp', 1), ('timestamp', 2), ('publisher', 0), ('publisher', 1), ('publisher', 2), ('rating', 0), ('+rating', 1, lambda x: x or 0), ('rating', 2), ('series', 0), ('series', 1), ('series', 2), ('series_index', 0), ('series_index', 1), ('series_index', 2), ('uuid', 0), ('uuid', 1), ('uuid', 2), ('isbn', 0), ('isbn', 1), ('isbn', 2), ('@tags', 0), ('@tags', 1), ('@tags', 2), ('@all_tags', ), ('@get_all_identifier_types', ), ('set_title_sort', 1, 'Title Two'), ('set_title_sort', 2, None), ('set_title_sort', 3, 'The Test Title_sort'), ('set_series_index', 1, 2.3), ('set_series_index', 2, 0), ('set_series_index', 3, 8), ('set_identifier', 1, 'moose', 'val'), ('set_identifier', 2, 'test', ''), ('set_identifier', 3, '', ''), (db.refresh, ), ('series_index', 0), ('series_index', 1), ('series_index', 2), ('title_sort', 0), ('title_sort', 1), ('title_sort', 2), ('get_identifiers', 0), ('get_identifiers', 1), ('get_identifiers', 2), ('@get_all_identifier_types', ), ('set_metadata', 1, Metadata( 'title', ('a1', )), False, False, False, True, True), ('set_metadata', 3, Metadata('title', ('a1', ))), (db.refresh, ), ('title', 0), ('title', 1), ('title', 2), ('title_sort', 0), ('title_sort', 1), ('title_sort', 2), ('authors', 0), ('authors', 1), ('authors', 2), ('author_sort', 0), ('author_sort', 1), ('author_sort', 2), ('@tags', 0), ('@tags', 1), ('@tags', 2), ('@all_tags', ), ('@get_all_identifier_types', ), )) db.close() ndb = self.init_legacy(self.cloned_library) db = self.init_old(self.cloned_library) run_funcs(self, db, ndb, ( ('set', 0, 'title', 'newtitle'), ('set', 0, 'tags', 't1,t2,tag one', True), ('set', 0, 'authors', 'author one & Author Two', True), ('set', 0, 'rating', 3.2), ('set', 0, 'publisher', 'publisher one', False), (db.refresh, ), ('title', 0), ('rating', 0), ('#tags', 0), ('#tags', 1), ('#tags', 2), ('authors', 0), ('authors', 1), ('authors', 2), ('publisher', 0), ('publisher', 1), ('publisher', 2), ('delete_tag', 'T1'), ('delete_tag', 'T2'), ('delete_tag', 'Tag one'), ('delete_tag', 'News'), (db.clean, ), (db.refresh, ), ('@all_tags', ), ('#tags', 0), ('#tags', 1), ('#tags', 2), )) db.close() ndb = self.init_legacy(self.cloned_library) db = self.init_old(self.cloned_library) run_funcs(self, db, ndb, ( ('remove_all_tags', (1, 2, 3)), (db.clean, ), ('@all_tags', ), ('@tags', 0), ('@tags', 1), ('@tags', 2), )) db.close() ndb = self.init_legacy(self.cloned_library) db = self.init_old(self.cloned_library) a = {v: k for k, v in iteritems(ndb.new_api.get_id_map('authors')) }['Author One'] t = {v: k for k, v in iteritems(ndb.new_api.get_id_map('tags'))}['Tag One'] s = {v: k for k, v in iteritems(ndb.new_api.get_id_map('series')) }['A Series One'] p = {v: k for k, v in iteritems(ndb.new_api.get_id_map('publisher')) }['Publisher One'] run_funcs(self, db, ndb, ( ('rename_author', a, 'Author Two'), ('rename_tag', t, 'News'), ('rename_series', s, 'ss'), ('rename_publisher', p, 'publisher one'), (db.clean, ), (db.refresh, ), ('@all_tags', ), ('tags', 0), ('tags', 1), ('tags', 2), ('series', 0), ('series', 1), ('series', 2), ('publisher', 0), ('publisher', 1), ('publisher', 2), ('series_index', 0), ('series_index', 1), ('series_index', 2), ('authors', 0), ('authors', 1), ('authors', 2), ('author_sort', 0), ('author_sort', 1), ('author_sort', 2), )) db.close()
def test_legacy_custom(self): # {{{ 'Test the legacy API for custom columns' ndb = self.init_legacy(self.cloned_library) db = self.init_old(self.cloned_library) # Test getting run_funcs(self, db, ndb, ( ('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'rating'), ('all_custom', 'authors'), ('all_custom', None, 7), ('get_next_cc_series_num_for', 'My Series One', 'series'), ('get_next_cc_series_num_for', 'My Series Two', 'series'), ('is_item_used_in_multiple', 'My Tag One', 'tags'), ('is_item_used_in_multiple', 'My Series One', 'series'), ('$get_custom_items_with_ids', 'series'), ('$get_custom_items_with_ids', 'tags'), ('$get_custom_items_with_ids', 'float'), ('$get_custom_items_with_ids', 'rating'), ('$get_custom_items_with_ids', 'authors'), ('$get_custom_items_with_ids', None, 7), )) for label in ('tags', 'series', 'authors', 'comments', 'rating', 'date', 'yesno', 'isbn', 'enum', 'formats', 'float', 'comp_tags'): for func in ('get_custom', 'get_custom_extra', 'get_custom_and_extra'): run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)]) # Test renaming/deleting t = {v: k for k, v in iteritems(ndb.new_api.get_id_map('#tags')) }['My Tag One'] t2 = {v: k for k, v in iteritems(ndb.new_api.get_id_map('#tags')) }['My Tag Two'] a = {v: k for k, v in iteritems(ndb.new_api.get_id_map('#authors')) }['My Author Two'] a2 = {v: k for k, v in iteritems(ndb.new_api.get_id_map('#authors')) }['Custom One'] s = {v: k for k, v in iteritems(ndb.new_api.get_id_map('#series')) }['My Series One'] run_funcs(self, db, ndb, ( ('delete_custom_item_using_id', t, 'tags'), ('delete_custom_item_using_id', a, 'authors'), ('rename_custom_item', t2, 't2', 'tags'), ('rename_custom_item', a2, 'custom one', 'authors'), ('rename_custom_item', s, 'My Series Two', 'series'), ('delete_item_from_multiple', 'custom two', 'authors'), (db.clean, ), (db.refresh, ), ('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'authors'), )) for label in ('tags', 'authors', 'series'): run_funcs(self, db, ndb, [('get_custom_and_extra', idx, label) for idx in range(3)]) db.close() ndb = self.init_legacy(self.cloned_library) db = self.init_old(self.cloned_library) # Test setting run_funcs(self, db, ndb, ( ('-set_custom', 1, 't1 & t2', 'authors'), ('-set_custom', 1, 't3 & t4', 'authors', None, True), ('-set_custom', 3, 'test one & test Two', 'authors'), ('-set_custom', 1, 'ijfkghkjdf', 'enum'), ('-set_custom', 3, 'One', 'enum'), ('-set_custom', 3, 'xxx', 'formats'), ('-set_custom', 1, 'my tag two', 'tags', None, False, False, None, True, True), (db.clean, ), (db.refresh, ), ('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'authors'), )) for label in ('tags', 'series', 'authors', 'comments', 'rating', 'date', 'yesno', 'isbn', 'enum', 'formats', 'float', 'comp_tags'): for func in ('get_custom', 'get_custom_extra', 'get_custom_and_extra'): run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)]) db.close() ndb = self.init_legacy(self.cloned_library) db = self.init_old(self.cloned_library) # Test setting bulk run_funcs(self, db, ndb, ( ('set_custom_bulk', (1, 2, 3), 't1 & t2', 'authors'), ('set_custom_bulk', (1, 2, 3), 'a series', 'series', None, False, False, (9, 10, 11)), ('set_custom_bulk', (1, 2, 3), 't1', 'tags', None, True), (db.clean, ), (db.refresh, ), ('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'authors'), )) for label in ('tags', 'series', 'authors', 'comments', 'rating', 'date', 'yesno', 'isbn', 'enum', 'formats', 'float', 'comp_tags'): for func in ('get_custom', 'get_custom_extra', 'get_custom_and_extra'): run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)]) db.close() ndb = self.init_legacy(self.cloned_library) db = self.init_old(self.cloned_library) # Test bulk multiple run_funcs(self, db, ndb, ( ('set_custom_bulk_multiple', (1, 2, 3), ['t1'], ['My Tag One'], 'tags'), (db.clean, ), (db.refresh, ), ('all_custom', 'tags'), ('get_custom', 0, 'tags'), ('get_custom', 1, 'tags'), ('get_custom', 2, 'tags'), )) db.close() o = self.cloned_library n = self.cloned_library ndb, db = self.init_legacy(n), self.init_old(o) ndb.create_custom_column('created', 'Created', 'text', True, True, {'moose': 'cat'}) db.create_custom_column('created', 'Created', 'text', True, True, {'moose': 'cat'}) db.close() ndb, db = self.init_legacy(n), self.init_old(o) self.assertEqual(db.custom_column_label_map['created'], ndb.backend.custom_field_metadata('created')) num = db.custom_column_label_map['created']['num'] ndb.set_custom_column_metadata(num, is_editable=False, name='Crikey', display={}) db.set_custom_column_metadata(num, is_editable=False, name='Crikey', display={}) db.close() ndb, db = self.init_legacy(n), self.init_old(o) self.assertEqual(db.custom_column_label_map['created'], ndb.backend.custom_field_metadata('created')) db.close() ndb = self.init_legacy(n) ndb.delete_custom_column('created') ndb = self.init_legacy(n) self.assertRaises(KeyError, ndb.custom_field_name, num=num) # Test setting custom series ndb = self.init_legacy(self.cloned_library) ndb.set_custom(1, 'TS [9]', label='series') self.assertEqual(ndb.new_api.field_for('#series', 1), 'TS') self.assertEqual(ndb.new_api.field_for('#series_index', 1), 9)
def __call__(self, query, field_iter, location, datatype, candidates, is_many=False): matches = set() if not query: return matches q = '' cast = adjust = lambda x: x dt = datatype if is_many and query in {'true', 'false'}: valcheck = lambda x: True if datatype == 'rating': valcheck = lambda x: x is not None and x > 0 found = set() for val, book_ids in field_iter(): if valcheck(val): found |= book_ids return found if query == 'true' else candidates - found if query == 'false': if location == 'cover': relop = lambda x, y: not bool(x) else: relop = lambda x, y: x is None elif query == 'true': if location == 'cover': relop = lambda x, y: bool(x) else: relop = lambda x, y: x is not None else: for k, relop in iteritems(self.operators): if query.startswith(k): query = query[len(k):] break else: relop = self.operators['='] if dt == 'rating': cast = lambda x: 0 if x is None else int(x) adjust = lambda x: x // 2 else: # Datatype is empty if the source is a tempate. Assume float cast = float if dt in ('float', 'composite', 'half-rating', '') else int mult = 1.0 if len(query) > 1: mult = query[-1].lower() mult = { 'k': 1024., 'm': 1024.**2, 'g': 1024.**3 }.get(mult, 1.0) if mult != 1.0: query = query[:-1] else: mult = 1.0 try: q = cast(query) * mult except Exception: raise ParseException( _('Non-numeric value in query: {0}').format(query)) if dt == 'half-rating': q = int(round(q * 2)) cast = int qfalse = query == 'false' for val, book_ids in field_iter(): if val is None: if qfalse: matches |= book_ids continue try: v = cast(val) except Exception: v = None if v: v = adjust(v) if relop(v, q): matches |= book_ids return matches
def test_legacy_direct(self): # {{{ 'Test read-only methods that are directly equivalent in the old and new interface' from calibre.ebooks.metadata.book.base import Metadata from datetime import timedelta ndb = self.init_legacy(self.cloned_library) db = self.init_old() newstag = ndb.new_api.get_item_id('tags', 'news') self.assertEqual(dict(db.prefs), dict(ndb.prefs)) for meth, args in iteritems({ 'find_identical_books': [(Metadata('title one', ['author one']), ), (Metadata('unknown'), ), (Metadata('xxxx'), )], 'get_books_for_category': [('tags', newstag), ('#formats', 'FMT1')], 'get_next_series_num_for': [('A Series One', )], 'get_id_from_uuid': [('ddddd', ), (db.uuid(1, True), )], 'cover': [(0, ), (1, ), (2, )], 'get_author_id': [('author one', ), ('unknown', ), ('xxxxx', )], 'series_id': [(0, ), (1, ), (2, )], 'publisher_id': [(0, ), (1, ), (2, )], '@tags_older_than': [ ('News', None), ('Tag One', None), ('xxxx', None), ('Tag One', None, 'News'), ('News', None, 'xxxx'), ('News', None, None, ['xxxxxxx']), ('News', None, 'Tag One', ['Author Two', 'Author One']), ('News', timedelta(0), None, None), ('News', timedelta(100000)), ], 'format': [(1, 'FMT1', True), (2, 'FMT1', True), (0, 'xxxxxx')], 'has_format': [(1, 'FMT1', True), (2, 'FMT1', True), (0, 'xxxxxx')], 'sizeof_format': [(1, 'FMT1', True), (2, 'FMT1', True), (0, 'xxxxxx')], '@format_files': [(0, ), (1, ), (2, )], 'formats': [(0, ), (1, ), (2, )], 'max_size': [(0, ), (1, ), (2, )], 'format_hash': [(1, 'FMT1'), (1, 'FMT2'), (2, 'FMT1')], 'author_sort_from_authors': [(['Author One', 'Author Two', 'Unknown'], )], 'has_book': [(Metadata('title one'), ), (Metadata('xxxx1111'), )], 'has_id': [(1, ), (2, ), (3, ), (9999, )], 'id': [ (1, ), (2, ), (0, ), ], 'index': [ (1, ), (2, ), (3, ), ], 'row': [ (1, ), (2, ), (3, ), ], 'is_empty': [()], 'count': [()], 'all_author_names': [()], 'all_tag_names': [()], 'all_series_names': [()], 'all_publisher_names': [()], '!all_authors': [()], '!all_tags2': [()], '@all_tags': [()], '@get_all_identifier_types': [()], '!all_publishers': [()], '!all_titles': [()], '!all_series': [()], 'standard_field_keys': [()], 'all_field_keys': [()], 'searchable_fields': [()], 'search_term_to_field_key': [('author', ), ('tag', )], 'metadata_for_field': [('title', ), ('tags', )], 'sortable_field_keys': [()], 'custom_field_keys': [(True, ), (False, )], '!get_usage_count_by_id': [('authors', ), ('tags', ), ('series', ), ('publisher', ), ('#tags', ), ('languages', )], 'get_field': [(1, 'title'), (2, 'tags'), (0, 'rating'), (1, 'authors'), (2, 'series'), (1, '#tags')], 'all_formats': [()], 'get_authors_with_ids': [()], '!get_tags_with_ids': [()], '!get_series_with_ids': [()], '!get_publishers_with_ids': [()], '!get_ratings_with_ids': [()], '!get_languages_with_ids': [()], 'tag_name': [(3, )], 'author_name': [(3, )], 'series_name': [(3, )], 'authors_sort_strings': [(0, ), (1, ), (2, )], 'author_sort_from_book': [(0, ), (1, ), (2, )], 'authors_with_sort_strings': [(0, ), (1, ), (2, )], 'book_on_device_string': [(1, ), (2, ), (3, )], 'books_in_series_of': [(0, ), (1, ), (2, )], 'books_with_same_title': [(Metadata(db.title(0)), ), (Metadata(db.title(1)), ), (Metadata('1234'), )], }): fmt = lambda x: x if meth[0] in {'!', '@'}: fmt = {'!': dict, '@': frozenset}[meth[0]] meth = meth[1:] elif meth == 'get_authors_with_ids': fmt = lambda val: {x[0]: tuple(x[1:]) for x in val} for a in args: self.assertEqual( fmt(getattr(db, meth)(*a)), fmt(getattr(ndb, meth)(*a)), 'The method: %s() returned different results for argument %s' % (meth, a)) def f( x, y ): # get_top_level_move_items is broken in the old db on case-insensitive file systems x.discard('metadata_db_prefs_backup.json') return x, y self.assertEqual(f(*db.get_top_level_move_items()), f(*ndb.get_top_level_move_items())) d1, d2 = BytesIO(), BytesIO() db.copy_cover_to(1, d1, True) ndb.copy_cover_to(1, d2, True) self.assertTrue(d1.getvalue() == d2.getvalue()) d1, d2 = BytesIO(), BytesIO() db.copy_format_to(1, 'FMT1', d1, True) ndb.copy_format_to(1, 'FMT1', d2, True) self.assertTrue(d1.getvalue() == d2.getvalue()) old = db.get_data_as_dict(prefix='test-prefix') new = ndb.get_data_as_dict(prefix='test-prefix') for o, n in zip(old, new): o = { str(k) if isinstance(k, bytes) else k: set(v) if isinstance(v, list) else v for k, v in iteritems(o) } n = { k: set(v) if isinstance(v, list) else v for k, v in iteritems(n) } self.assertEqual(o, n) ndb.search('title:Unknown') db.search('title:Unknown') self.assertEqual(db.row(3), ndb.row(3)) self.assertRaises(ValueError, ndb.row, 2) self.assertRaises(ValueError, db.row, 2) db.close()
def current_val(self): ans = unicode_type(self.value_box.text()).strip() if self.current_col == 'languages': rmap = {lower(v):k for k, v in iteritems(lang_map())} ans = rmap.get(lower(ans), ans) return ans
def job_done(self, ok, result): if not ok: etype, e, tb = result if isinstance(e, HTTPSimpleResponse): eh = {} if e.location: eh['Location'] = e.location if e.authenticate: eh['WWW-Authenticate'] = e.authenticate if e.log: self.log.warn(e.log) return self.simple_response( e.http_code, msg=error_message(e) or '', close_after_response=e.close_connection, extra_headers=eh) reraise(etype, e, tb) data, output = result output = self.finalize_output(output, data, self.method is HTTP1) if output is None: return outheaders = data.outheaders outheaders.set('Date', http_date(), replace_all=True) outheaders.set('Server', 'calibre %s' % __version__, replace_all=True) keep_alive = not self.close_after_response and self.opts.timeout > 0 if keep_alive: outheaders.set('Keep-Alive', 'timeout=%d' % int(self.opts.timeout)) if 'Connection' not in outheaders: if self.response_protocol is HTTP11: if self.close_after_response: outheaders.set('Connection', 'close') else: if not self.close_after_response: outheaders.set('Connection', 'Keep-Alive') ct = outheaders.get('Content-Type', '') if ct.startswith('text/') and 'charset=' not in ct: outheaders.set('Content-Type', ct + '; charset=UTF-8', replace_all=True) buf = [ HTTP11 + (' %d ' % data.status_code) + http_client.responses[data.status_code] ] for header, value in sorted(iteritems(outheaders), key=itemgetter(0)): buf.append('%s: %s' % (header, value)) for morsel in itervalues(data.outcookie): morsel['version'] = '1' x = morsel.output() if isinstance(x, bytes): x = x.decode('ascii') buf.append(x) buf.append('') response_data = ReadOnlyFileBuffer(b''.join( (x + '\r\n').encode('ascii') for x in buf)) if self.access_log is not None: sz = outheaders.get('Content-Length') if sz is not None: sz = int(sz) + response_data.sz self.log_access(status_code=data.status_code, response_size=sz, username=data.username) self.response_ready(response_data, output=output)
def get_categories(dbcache, sort='name', book_ids=None, first_letter_sort=False): if sort not in CATEGORY_SORTS: raise ValueError('sort ' + sort + ' not a valid value') fm = dbcache.field_metadata book_rating_map = dbcache.fields['rating'].book_value_map lang_map = dbcache.fields['languages'].book_value_map categories = {} book_ids = frozenset(book_ids) if book_ids else book_ids pm_cache = {} def get_metadata(book_id): ans = pm_cache.get(book_id) if ans is None: ans = pm_cache[book_id] = dbcache._get_proxy_metadata(book_id) return ans bids = None first_letter_sort = bool(first_letter_sort) for category, is_multiple, is_composite in find_categories(fm): tag_class = create_tag_class(category, fm) sort_on, reverse = sort, False if is_composite: if bids is None: bids = dbcache._all_book_ids( ) if book_ids is None else book_ids cats = dbcache.fields[category].get_composite_categories( tag_class, book_rating_map, bids, is_multiple, get_metadata) elif category == 'news': cats = dbcache.fields['tags'].get_news_category( tag_class, book_ids) else: cat = fm[category] brm = book_rating_map dt = cat['datatype'] if dt == 'rating': if category != 'rating': brm = dbcache.fields[category].book_value_map if sort_on == 'name': sort_on, reverse = 'rating', True cats = dbcache.fields[category].get_categories( tag_class, brm, lang_map, book_ids) if (category != 'authors' and dt == 'text' and cat['is_multiple'] and cat['display'].get('is_names', False)): for item in cats: item.sort = author_to_author_sort(item.sort) cats.sort(key=category_sort_keys[first_letter_sort][sort_on], reverse=reverse) categories[category] = cats # Needed for legacy databases that have multiple ratings that # map to n stars for r in categories['rating']: for x in tuple(categories['rating']): if r.name == x.name and r.id != x.id: r.id_set |= x.id_set r.count = len(r.id_set) categories['rating'].remove(x) break # User categories user_categories = clean_user_categories(dbcache).copy() # First add any grouped search terms to the user categories muc = dbcache.pref('grouped_search_make_user_categories', []) gst = dbcache.pref('grouped_search_terms', {}) for c in gst: if c not in muc: continue user_categories[c] = [] for sc in gst[c]: for t in categories.get(sc, ()): user_categories[c].append([t.name, sc, 0]) if user_categories: # We want to use same node in the user category as in the source # category. To do that, we need to find the original Tag node. There is # a time/space tradeoff here. By converting the tags into a map, we can # do the verification in the category loop much faster, at the cost of # temporarily duplicating the categories lists. taglist = {} for c, items in iteritems(categories): taglist[c] = dict(map(lambda t: (icu_lower(t.name), t), items)) # Add the category values to the user categories for user_cat in sorted(user_categories, key=sort_key): items = [] names_seen = {} user_cat_is_gst = user_cat in gst for name, label, ign in user_categories[user_cat]: n = icu_lower(name) if label in taglist and n in taglist[label]: if user_cat_is_gst: # for gst items, make copy and consolidate the tags by name. if n in names_seen: # We must combine this node into a previous one with # the same name ignoring case. As part of the process, # remember the source categories and correct the # average rating t = names_seen[n] other_tag = taglist[label][n] t.id_set |= other_tag.id_set t.count = len(t.id_set) t.original_categories.add(other_tag.category) total_rating = 0 count = 0 for id_ in t.id_set: rating = book_rating_map.get(id_, 0) if rating: total_rating += rating / 2 count += 1 if total_rating and count: t.avg_rating = total_rating / count else: # Must deepcopy so we don't share the id_set between nodes t = copy.deepcopy(taglist[label][n]) t.original_categories = {t.category} names_seen[n] = t items.append(t) else: items.append(taglist[label][n]) # else: do nothing, to not include nodes w zero counts cat_name = '@' + user_cat # add the '@' to avoid name collision items.sort(key=category_sort_keys[False][sort]) categories[cat_name] = items # ### Finally, the saved searches category #### items = [] queries = dbcache._search_api.saved_searches.queries for srch in sorted(queries, key=sort_key): items.append( Tag(srch, sort=srch, search_expression=queries[srch], category='search', is_editable=False)) if len(items): categories['search'] = items return categories
def editor_name(editor): for n, ed in iteritems(editors): if ed is editor: return n
def __init__(self, path_to_ebook, tdir, log=None, book_hash=None): log = log or default_log book_fmt, opfpath, input_fmt = extract_book(path_to_ebook, tdir, log=log) ContainerBase.__init__(self, tdir, opfpath, log) # We do not add zero byte sized files as the IndexedDB API in the # browser has no good way to distinguish between zero byte files and # load failures. excluded_names = { name for name, mt in iteritems(self.mime_map) if name == self.opf_name or mt == guess_type('a.ncx') or name.startswith('META-INF/') or name == 'mimetype' or not self.has_name_and_is_not_empty(name) } raster_cover_name, titlepage_name = self.create_cover_page( input_fmt.lower()) toc = get_toc(self).to_dict(count()) spine = [name for name, is_linear in self.spine_names] spineq = frozenset(spine) landmarks = [l for l in get_landmarks(self) if l['dest'] in spineq] self.book_render_data = data = { 'version': RENDER_VERSION, 'toc': toc, 'spine': spine, 'link_uid': uuid4(), 'book_hash': book_hash, 'is_comic': input_fmt.lower() in {'cbc', 'cbz', 'cbr', 'cb7'}, 'raster_cover_name': raster_cover_name, 'title_page_name': titlepage_name, 'has_maths': False, 'total_length': 0, 'spine_length': 0, 'toc_anchor_map': toc_anchor_map(toc), 'landmarks': landmarks, 'link_to_map': {}, } # Mark the spine as dirty since we have to ensure it is normalized for name in data['spine']: self.parsed(name), self.dirty(name) self.transform_css() self.virtualized_names = set() self.virtualize_resources() def manifest_data(name): mt = (self.mime_map.get(name) or 'application/octet-stream').lower() ans = { 'size': os.path.getsize(self.name_path_map[name]), 'is_virtualized': name in self.virtualized_names, 'mimetype': mt, 'is_html': mt in OEB_DOCS, } if ans['is_html']: root = self.parsed(name) ans['length'] = l = get_length(root) self.book_render_data['total_length'] += l if name in data['spine']: self.book_render_data['spine_length'] += l ans['has_maths'] = hm = check_for_maths(root) if hm: self.book_render_data['has_maths'] = True ans['anchor_map'] = anchor_map(root) return ans data['files'] = { name: manifest_data(name) for name in set(self.name_path_map) - excluded_names } self.commit() for name in excluded_names: os.remove(self.name_path_map[name]) with lopen(os.path.join(self.root, 'calibre-book-manifest.json'), 'wb') as f: f.write( json.dumps(self.book_render_data, ensure_ascii=False).encode('utf-8'))
def create_from_query_string(qs): ans = MultiDict() qs = as_unicode(qs) for k, v in iteritems(parse_qs(qs, keep_blank_values=True)): dict.__setitem__(ans, as_unicode(k), [as_unicode(x) for x in v]) return ans
def __repr__(self): return '{' + ', '.join('%s: %s' % (reprlib.repr(k), reprlib.repr(v)) for k, v in iteritems(self)) + '}'
def convert_p(self, p): dest = P() self.object_map[dest] = p style = self.styles.resolve_paragraph(p) self.layers[p] = [] self.frame_map[p] = style.frame self.add_frame(dest, style.frame) current_anchor = None current_hyperlink = None hl_xpath = self.namespace.XPath('ancestor::w:hyperlink[1]') def p_parent(x): # Ensure that nested <w:p> tags are handled. These can occur if a # textbox is present inside a paragraph. while True: x = x.getparent() try: if x.tag.endswith('}p'): return x except AttributeError: break for x in self.namespace.descendants(p, 'w:r', 'w:bookmarkStart', 'w:hyperlink', 'w:instrText'): if p_parent(x) is not p: continue if x.tag.endswith('}r'): span = self.convert_run(x) if current_anchor is not None: (dest if len(dest) == 0 else span).set( 'id', current_anchor) current_anchor = None if current_hyperlink is not None: try: hl = hl_xpath(x)[0] self.link_map[hl].append(span) self.link_source_map[hl] = self.current_rels x.set('is-link', '1') except IndexError: current_hyperlink = None dest.append(span) self.layers[p].append(x) elif x.tag.endswith('}bookmarkStart'): anchor = self.namespace.get(x, 'w:name') if anchor and anchor not in self.anchor_map and anchor != '_GoBack': # _GoBack is a special bookmark inserted by Word 2010 for # the return to previous edit feature, we ignore it old_anchor = current_anchor self.anchor_map[anchor] = current_anchor = generate_anchor( anchor, frozenset(itervalues(self.anchor_map))) if old_anchor is not None: # The previous anchor was not applied to any element for a, t in tuple(iteritems(self.anchor_map)): if t == old_anchor: self.anchor_map[a] = current_anchor elif x.tag.endswith('}hyperlink'): current_hyperlink = x elif x.tag.endswith('}instrText') and x.text and x.text.strip( ).startswith('TOC '): old_anchor = current_anchor anchor = str(uuid.uuid4()) self.anchor_map[anchor] = current_anchor = generate_anchor( 'toc', frozenset(itervalues(self.anchor_map))) self.toc_anchor = current_anchor if old_anchor is not None: # The previous anchor was not applied to any element for a, t in tuple(iteritems(self.anchor_map)): if t == old_anchor: self.anchor_map[a] = current_anchor if current_anchor is not None: # This paragraph had no <w:r> descendants dest.set('id', current_anchor) current_anchor = None m = re.match(r'heading\s+(\d+)$', style.style_name or '', re.IGNORECASE) if m is not None: n = min(6, max(1, int(m.group(1)))) dest.tag = 'h%d' % n if style.bidi is True: dest.set('dir', 'rtl') border_runs = [] common_borders = [] for span in dest: run = self.object_map[span] style = self.styles.resolve_run(run) if not border_runs or border_runs[-1][1].same_border(style): border_runs.append((span, style)) elif border_runs: if len(border_runs) > 1: common_borders.append(border_runs) border_runs = [] for border_run in common_borders: spans = [] bs = {} for span, style in border_run: style.get_border_css(bs) style.clear_border_css() spans.append(span) if bs: cls = self.styles.register(bs, 'text_border') wrapper = self.wrap_elems(spans, SPAN()) wrapper.set('class', cls) if not dest.text and len(dest) == 0 and not style.has_visible_border(): # Empty paragraph add a non-breaking space so that it is rendered # by WebKit dest.text = NBSP # If the last element in a block is a <br> the <br> is not rendered in # HTML, unless it is followed by a trailing space. Word, on the other # hand inserts a blank line for trailing <br>s. if len(dest) > 0 and not dest[-1].tail: if dest[-1].tag == 'br': dest[-1].tail = NBSP elif len(dest[-1]) > 0 and dest[-1][ -1].tag == 'br' and not dest[-1][-1].tail: dest[-1][-1].tail = NBSP return dest
def update_from_listdict(self, ld): for key, values in iteritems(ld): for val in values: self[key] = val
def dynamic_fields(self): return tuple(k for k, v in iteritems(self) if v is None)
def plugin_for_index(self, index): row = index.row() if hasattr(index, 'row') else index for k, v in iteritems(self.plugin_map): if row in v: return k
def __init__(self, kw): for k, v in iteritems(kw): setattr(self, k, v)