def filesystem_cache(self): if self._filesystem_cache is None: debug('Loading filesystem metadata...') st = time.time() from calibre.devices.mtp.filesystem_cache import FilesystemCache ts = self.total_space() all_storage = [] items = [] for storage_id, capacity in zip([self._main_id, self._carda_id, self._cardb_id], ts): if storage_id is None: continue name = _('Unknown') for s in self.dev.data['storage']: if s['id'] == storage_id: name = s['name'] break storage = {'id':storage_id, 'size':capacity, 'name':name, 'is_folder':True, 'can_delete':False, 'is_system':True} self._currently_getting_sid = unicode_type(storage_id) id_map = self.dev.get_filesystem(storage_id, partial( self._filesystem_callback, {})) for x in itervalues(id_map): x['storage_id'] = storage_id all_storage.append(storage) items.append(itervalues(id_map)) self._filesystem_cache = FilesystemCache(all_storage, chain(*items)) debug('Filesystem metadata loaded in %g seconds (%d objects)'%( time.time()-st, len(self._filesystem_cache))) return self._filesystem_cache
def collect_font_stats(self, container, do_embed=False): self.font_stats = {} self.font_usage_map = {} self.font_spec_map = {} self.font_rule_map = {} self.all_font_rules = {} processed_sheets = {} for name, is_linear in container.spine_names: self.font_rule_map[name] = font_face_rules = [] resolve_property, resolve_pseudo_property, select = resolve_styles(container, name, sheet_callback=partial( self.collect_font_face_rules, container, processed_sheets, name)) for rule in font_face_rules: self.all_font_rules[rule['src']] = rule if rule['src'] not in self.font_stats: self.font_stats[rule['src']] = set() self.font_usage_map[name] = {} self.font_spec_map[name] = set() self.get_font_usage(container, name, resolve_property, resolve_pseudo_property, font_face_rules, do_embed) self.font_stats = {k:{safe_chr(x) for x in v} for k, v in iteritems(self.font_stats)} for fum in itervalues(self.font_usage_map): for v in itervalues(fum): v['text'] = {safe_chr(x) for x in v['text']}
def do_coffee_compile(self, opts, timestamp=False, ignore_errors=False): from calibre.utils.serve_coffee import compile_coffeescript src_files = {} for src in self.COFFEE_DIRS: for f in glob.glob(self.j(self.SRC, __appname__, src, '*.coffee')): bn = os.path.basename(f).rpartition('.')[0] arcname = src.replace('/', '.') + '.' + bn + '.js' try: with open(f, 'rb') as fs: src_files[arcname] = (f, hashlib.sha1(fs.read()).hexdigest()) except EnvironmentError: time.sleep(0.1) with open(f, 'rb') as fs: src_files[arcname] = (f, hashlib.sha1(fs.read()).hexdigest()) existing = {} dest = self.j(self.RESOURCES, 'compiled_coffeescript.zip') if os.path.exists(dest): with zipfile.ZipFile(dest, 'r') as zf: existing_hashes = {} raw = zf.comment if raw: existing_hashes = json.loads(raw) for info in zf.infolist(): if info.filename in existing_hashes and src_files.get(info.filename, (None, None))[1] == existing_hashes[info.filename]: existing[info.filename] = (zf.read(info), info, existing_hashes[info.filename]) todo = set(src_files) - set(existing) updated = {} for arcname in todo: name = arcname.rpartition('.')[0] print ('\t%sCompiling %s'%(time.strftime('[%H:%M:%S] ') if timestamp else '', name)) src, sig = src_files[arcname] js, errors = compile_coffeescript(open(src, 'rb').read(), filename=src) if errors: print ('\n\tCompilation of %s failed'%name) for line in errors: print(line, file=sys.stderr) if ignore_errors: js = u'# Compilation from coffeescript failed' else: raise SystemExit(1) else: if opts.show_js: self.show_js(js) print ('#'*80) print ('#'*80) zi = zipfile.ZipInfo() zi.filename = arcname zi.date_time = time.localtime()[:6] updated[arcname] = (js.encode('utf-8'), zi, sig) if updated: hashes = {} with zipfile.ZipFile(dest, 'w', zipfile.ZIP_STORED) as zf: for raw, zi, sig in sorted(chain(itervalues(updated), itervalues(existing)), key=lambda x: x[1].filename): zf.writestr(zi, raw) hashes[zi.filename] = sig zf.comment = json.dumps(hashes)
def finalize(self, all_blocks): lists = {} for block in all_blocks: if block.list_tag is not None: list_tag, tag_style = block.list_tag list_type = (tag_style['list-style-type'] or '').lower() if list_type not in LIST_STYLES: continue container_tags = find_list_containers(list_tag, tag_style) if not container_tags: continue top_most = container_tags[-1] if top_most not in lists: lists[top_most] = NumberingDefinition(top_most, tag_style._stylizer, self.namespace) l = lists[top_most] ilvl = len(container_tags) - 1 l.level_map[ilvl].append((container_tags[0], list_tag, block, list_type, tag_style)) [nd.finalize() for nd in itervalues(lists)] definitions = {} for defn in itervalues(lists): try: defn = definitions[defn] except KeyError: definitions[defn] = defn defn.num_id = len(definitions) - 1 defn.link_blocks() self.definitions = sorted(itervalues(definitions), key=attrgetter('num_id'))
def finalize(self): if Tap in self.possible_gestures: tp = next(itervalues(self.touch_points)) if tp.total_movement <= TAP_THRESHOLD: self.tapped.emit(tp) return if Swipe in self.possible_gestures: tp = next(itervalues(self.touch_points)) st = tp.swipe_type if st is not None: self.swiped.emit(st) return if Pinch in self.possible_gestures: points = tuple(itervalues(self.touch_points)) if len(points) == 2: pinch_dir = get_pinch(*points) if pinch_dir is not None: self.pinched.emit(pinch_dir) if not self.hold_started: return if TapAndHold in self.possible_gestures: tp = next(itervalues(self.touch_points)) self.tap_hold_finished.emit(tp) return if SwipeAndHold in self.possible_gestures: self.swipe_hold_finished.emit(self.hold_data[1]) return
def generate_classes(self): for bs in itervalues(self.para_cache): css = bs.css if css: self.register(css, 'block') for bs in itervalues(self.run_cache): css = bs.css if css: self.register(css, 'text')
def embed_all_fonts(container, stats, report): all_font_rules = tuple(itervalues(stats.all_font_rules)) warned = set() rules, nrules = [], {} modified = set() for path in container.spine_items: name = container.abspath_to_name(path) fu = stats.font_usage_map.get(name, None) fs = stats.font_spec_map.get(name, None) fr = stats.font_rule_map.get(name, None) if None in (fs, fu, fr): continue fs = {icu_lower(x) for x in fs} for font in itervalues(fu): if icu_lower(font['font-family']) not in fs: continue rule = matching_rule(font, fr) if rule is None: # This font was not already embedded in this HTML file, before # processing started key = font_key(font) rule = nrules.get(key) if rule is None: rule = embed_font(container, font, all_font_rules, report, warned) if rule is not None: rules.append(rule) nrules[key] = rule modified.add(name) stats.font_stats[rule['name']] = font['text'] else: # This font was previously embedded by this code, update its stats stats.font_stats[rule['name']] |= font['text'] modified.add(name) if not rules: report(_('No embeddable fonts found')) return False # Write out CSS rules = [';\n\t'.join('%s: %s' % ( k, '"%s"' % v if k == 'font-family' else v) for k, v in iteritems(rulel) if (k in props and props[k] != v and v != '400') or k == 'src') for rulel in rules] css = '\n\n'.join(['@font-face {\n\t%s\n}' % r for r in rules]) item = container.generate_item('fonts.css', id_prefix='font_embed') name = container.href_to_name(item.get('href'), container.opf_name) with container.open(name, 'wb') as out: out.write(css.encode('utf-8')) # Add link to CSS in all files that need it for spine_name in modified: root = container.parsed(spine_name) head = root.xpath('//*[local-name()="head"][1]')[0] href = container.name_to_href(name, spine_name) etree.SubElement(head, XHTML('link'), rel='stylesheet', type='text/css', href=href).tail = '\n' container.dirty(spine_name) return True
def __call__(self, oeb, log, opts): self.oeb, self.log, self.opts = oeb, log, opts self.find_embedded_fonts() if not self.embedded_fonts: self.log.debug('No embedded fonts found') return self.find_style_rules() self.find_font_usage() totals = [0, 0] def remove(font): totals[1] += len(font['item'].data) self.oeb.manifest.remove(font['item']) font['rule'].parentStyleSheet.deleteRule(font['rule']) fonts = {} for font in self.embedded_fonts: item, chars = font['item'], font['chars'] if item.href in fonts: fonts[item.href]['chars'] |= chars else: fonts[item.href] = font for font in itervalues(fonts): if not font['chars']: self.log('The font %s is unused. Removing it.'%font['src']) remove(font) continue try: raw, old_stats, new_stats = subset(font['item'].data, font['chars']) except NoGlyphs: self.log('The font %s has no used glyphs. Removing it.'%font['src']) remove(font) continue except UnsupportedFont as e: self.log.warn('The font %s is unsupported for subsetting. %s'%( font['src'], e)) sz = len(font['item'].data) totals[0] += sz totals[1] += sz else: font['item'].data = raw nlen = sum(itervalues(new_stats)) olen = sum(itervalues(old_stats)) self.log('Decreased the font %s to %.1f%% of its original size'% (font['src'], nlen/olen *100)) totals[0] += nlen totals[1] += olen font['item'].unload_data_from_memory() if totals[0]: self.log('Reduced total font size to %.1f%% of original'% (totals[0]/totals[1] * 100))
def create_manifest(html): import hashlib from calibre.library.field_metadata import category_icon_map h = hashlib.sha256(html) for ci in itervalues(category_icon_map): h.update(I(ci, data=True)) icons = {'icon/' + x for x in itervalues(category_icon_map)} icons.add('favicon.png') h.update(I('lt.png', data=True)) manifest = '\n'.join(sorted(icons)) return 'CACHE MANIFEST\n# {}\n{}\n\nNETWORK:\n*'.format( h.hexdigest(), manifest).encode('utf-8')
def _apply_prefs(self, prefs): for x in ('title', 'subtitle', 'footer'): attr = '%s_font_family' % x getattr(self, attr).font_family = prefs[attr] attr = '%s_font_size' % x getattr(self, attr).setValue(prefs[attr]) for x in ('title', 'subtitle', 'footer'): x += '_template' getattr(self, x).setText(prefs[x]) for x in ('width', 'height'): x = 'cover_' + x getattr(self, x).setValue(prefs[x]) color_themes = prefs['color_themes'].copy() color_themes.update(default_color_themes) disabled = set(prefs['disabled_color_themes']) self.colors_list.clear() self.colors_map = {} for name in sorted(color_themes, key=sort_key): self.colors_map[name] = li = QListWidgetItem(name, self.colors_list) li.setFlags(li.flags() | Qt.ItemIsUserCheckable) li.setCheckState(Qt.Unchecked if name in disabled else Qt.Checked) li.setData(Qt.UserRole, color_themes[name]) lu = prefs.get('last_used_colors') if not self.for_global_prefs and lu in self.colors_map and self.colors_map[lu].checkState() == Qt.Checked: self.colors_map[lu].setSelected(True) else: for name, li in iteritems(self.colors_map): if li.checkState() == Qt.Checked: li.setSelected(True) break else: next(itervalues(self.colors_map)).setSelected(True) disabled = set(prefs['disabled_styles']) self.styles_list.clear() self.style_map.clear() for name in sorted(all_styles(), key=sort_key): self.style_map[name] = li = QListWidgetItem(name, self.styles_list) li.setFlags(li.flags() | Qt.ItemIsUserCheckable) li.setCheckState(Qt.Unchecked if name in disabled else Qt.Checked) lu = prefs.get('last_used_style') if not self.for_global_prefs and lu in self.style_map and self.style_map[lu].checkState() == Qt.Checked: self.style_map[lu].setSelected(True) else: for name, li in iteritems(self.style_map): if li.checkState() == Qt.Checked: li.setSelected(True) break else: next(itervalues(self.style_map)).setSelected(True)
def all(): from calibre.utils.fonts.scanner import font_scanner failed = [] unsupported = [] warnings = {} total = 0 averages = [] for family in font_scanner.find_font_families(): for font in font_scanner.fonts_for_family(family): raw = font_scanner.get_font_data(font) print('Subsetting', font['full_name'], end='\t') total += 1 try: w = [] sf, old_stats, new_stats = subset(raw, set(('a', 'b', 'c')), (), w) if w: warnings[font['full_name'] + ' (%s)'%font['path']] = w except NoGlyphs: print('No glyphs!') continue except UnsupportedFont as e: unsupported.append((font['full_name'], font['path'], unicode_type(e))) print('Unsupported!') continue except Exception as e: print('Failed!') failed.append((font['full_name'], font['path'], unicode_type(e))) else: averages.append(sum(itervalues(new_stats))/sum(itervalues(old_stats)) * 100) print('Reduced to:', '%.1f'%averages[-1] , '%') if unsupported: print('\n\nUnsupported:') for name, path, err in unsupported: print(name, path, err) print() if warnings: print('\n\nWarnings:') for name, w in iteritems(warnings): if w: print(name) print('', '\n\t'.join(w), sep='\t') if failed: print('\n\nFailures:') for name, path, err in failed: print(name, path, err) print() print('Average reduction to: %.1f%%'%(sum(averages)/len(averages))) print('Total:', total, 'Unsupported:', len(unsupported), 'Failed:', len(failed), 'Warnings:', len(warnings))
def send_multiple_by_mail(self, recipients, delete_from_library): ids = set(self.library_view.model().id(r) for r in self.library_view.selectionModel().selectedRows()) if not ids: return db = self.current_db db_fmt_map = {book_id:set((db.formats(book_id, index_is_id=True) or '').upper().split(',')) for book_id in ids} ofmts = {x.upper() for x in available_output_formats()} ifmts = {x.upper() for x in available_input_formats()} bad_recipients = {} auto_convert_map = defaultdict(list) for to, fmts, subject in recipients: rfmts = set(fmts) ok_ids = {book_id for book_id, bfmts in iteritems(db_fmt_map) if bfmts.intersection(rfmts)} convert_ids = ids - ok_ids self.send_by_mail(to, fmts, delete_from_library, subject=subject, send_ids=ok_ids, do_auto_convert=False) if not rfmts.intersection(ofmts): bad_recipients[to] = (convert_ids, True) continue outfmt = tuple(f for f in fmts if f in ofmts)[0] ok_ids = {book_id for book_id in convert_ids if db_fmt_map[book_id].intersection(ifmts)} bad_ids = convert_ids - ok_ids if bad_ids: bad_recipients[to] = (bad_ids, False) if ok_ids: auto_convert_map[outfmt].append((to, subject, ok_ids)) if auto_convert_map: titles = {book_id for x in itervalues(auto_convert_map) for data in x for book_id in data[2]} titles = {db.title(book_id, index_is_id=True) for book_id in titles} if self.auto_convert_question( _('Auto convert the following books before sending via email?'), list(titles)): for ofmt, data in iteritems(auto_convert_map): ids = {bid for x in data for bid in x[2]} data = [(to, subject) for to, subject, x in data] self.iactions['Convert Books'].auto_convert_multiple_mail(ids, data, ofmt, delete_from_library) if bad_recipients: det_msg = [] titles = {book_id for x in itervalues(bad_recipients) for book_id in x[0]} titles = {book_id:db.title(book_id, index_is_id=True) for book_id in titles} for to, (ids, nooutput) in iteritems(bad_recipients): msg = _('This recipient has no valid formats defined') if nooutput else \ _('These books have no suitable input formats for conversion') det_msg.append('%s - %s' % (to, msg)) det_msg.extend('\t' + titles[bid] for bid in ids) det_msg.append('\n') warning_dialog(self, _('Could not send'), _('Could not send books to some recipients. Click "Show details" for more information'), det_msg='\n'.join(det_msg), show=True)
def subset(self, character_map, extra_glyphs): from calibre.utils.fonts.sfnt.cff.writer import Subset # Map codes from the cmap table to glyph names, this will be used to # reconstruct character_map for the subset font charset_map = {code:self.cff.charset.safe_lookup(glyph_id) for code, glyph_id in iteritems(character_map)} charset = set(itervalues(charset_map)) charset.discard(None) if not charset and character_map: raise NoGlyphs('This font has no glyphs for the specified characters') charset |= { self.cff.charset.safe_lookup(glyph_id) for glyph_id in extra_glyphs} charset.discard(None) s = Subset(self.cff, charset) # Rebuild character_map with the glyph ids from the subset font character_map.clear() for code, charname in iteritems(charset_map): glyph_id = s.charname_map.get(charname, None) if glyph_id: character_map[code] = glyph_id # Check that raw is parseable CFF(s.raw) self.raw = s.raw
def load_hyphenator_dicts(hp_cache, lang, default_lang='en'): from calibre.utils.localization import lang_as_iso639_1 import zipfile if not lang: lang = default_lang or 'en' def lang_name(l): l = l.lower() l = lang_as_iso639_1(l) if not l: l = 'en' l = {'en':'en-us', 'nb':'nb-no', 'el':'el-monoton'}.get(l, l) return l.lower().replace('_', '-') if not hp_cache: with zipfile.ZipFile(P('viewer/hyphenate/patterns.zip', allow_user_override=False), 'r') as zf: for pat in zf.namelist(): raw = zf.read(pat).decode('utf-8') hp_cache[pat.partition('.')[0]] = raw if lang_name(lang) not in hp_cache: lang = lang_name(default_lang) lang = lang_name(lang) js = '\n\n'.join(itervalues(hp_cache)) return js, lang
def __init__(self, all_storage, entries): self.entries = [] self.id_map = {} self.all_storage_ids = tuple(x['id'] for x in all_storage) for storage in all_storage: storage['storage_id'] = storage['id'] e = FileOrFolder(storage, self) self.entries.append(e) self.entries.sort(key=attrgetter('object_id')) all_storage_ids = [x.storage_id for x in self.entries] self.all_storage_ids = tuple(all_storage_ids) for entry in entries: FileOrFolder(entry, self) for item in itervalues(self.id_map): try: p = item.parent except KeyError: # Parent does not exist, set the parent to be the storage # object sid = item.storage_id if sid not in all_storage_ids: sid = all_storage_ids[0] item.parent_id = sid p = item.parent if p is not None: t = p.folders if item.is_folder else p.files t.append(item)
def read_available_plugins(raise_error=False): import json, bz2 display_plugins = [] try: raw = get_https_resource_securely(INDEX_URL) if not raw: return raw = json.loads(bz2.decompress(raw)) except: if raise_error: raise traceback.print_exc() return for plugin in itervalues(raw): try: display_plugin = DisplayPlugin(plugin) get_installed_plugin_status(display_plugin) display_plugins.append(display_plugin) except: if DEBUG: prints('======= Plugin Parse Error =======') traceback.print_exc() import pprint pprint.pprint(plugin) display_plugins = sorted(display_plugins, key=lambda k: k.name) return display_plugins
def check_for_holds(self): if not {SwipeAndHold, TapAndHold} & self.possible_gestures: return now = time.time() tp = next(itervalues(self.touch_points)) if now - tp.time_of_last_move < HOLD_THRESHOLD: return if self.hold_started: if TapAndHold in self.possible_gestures: self.tap_hold_updated.emit(tp) if SwipeAndHold in self.possible_gestures: self.swipe_hold_updated.emit(self.hold_data[1]) else: self.possible_gestures &= {TapAndHold, SwipeAndHold} if tp.total_movement > TAP_THRESHOLD: st = tp.swipe_type if st is None: self.possible_gestures.clear() else: self.hold_started = True self.possible_gestures = {SwipeAndHold} self.hold_data = (now, st) self.swipe_hold_started.emit(st) else: self.possible_gestures = {TapAndHold} self.hold_started = True self.hold_data = now self.tap_hold_started.emit(tp)
def compile_recipe(src): ''' Compile the code in src and return a recipe object, if found. :param src: Python source code as bytestring or unicode object :return: Recipe class or None, if no such class was found in src ''' if not isinstance(src, unicode_type): match = re.search(br'coding[:=]\s*([-\w.]+)', src[:200]) enc = match.group(1).decode('utf-8') if match else 'utf-8' src = src.decode(enc) # Python complains if there is a coding declaration in a unicode string src = re.sub(r'^#.*coding\s*[:=]\s*([-\w.]+)', '#', src.lstrip('\ufeff'), flags=re.MULTILINE) # Translate newlines to \n src = io.StringIO(src, newline=None).getvalue() namespace = { 'BasicNewsRecipe':BasicNewsRecipe, 'AutomaticNewsRecipe':AutomaticNewsRecipe, 'time':time, 're':re, 'BeautifulSoup':BeautifulSoup, 'unicode': unicode_type, 'unichr': codepoint_to_chr, 'xrange': range, } exec(src, namespace) for x in itervalues(namespace): if (isinstance(x, type) and issubclass(x, BasicNewsRecipe) and x not in basic_recipes): return x return None
def set_marked_ids(self, id_dict): ''' ids in id_dict are "marked". They can be searched for by using the search term ``marked:true``. Pass in an empty dictionary or set to clear marked ids. :param id_dict: Either a dictionary mapping ids to values or a set of ids. In the latter case, the value is set to 'true' for all ids. If a mapping is provided, then the search can be used to search for particular values: ``marked:value`` ''' old_marked_ids = set(self.marked_ids) if not hasattr(id_dict, 'items'): # Simple list. Make it a dict of string 'true' self.marked_ids = dict.fromkeys(id_dict, u'true') else: # Ensure that all the items in the dict are text self.marked_ids = {k: unicode_type(v) for k, v in iteritems(id_dict)} # This invalidates all searches in the cache even though the cache may # be shared by multiple views. This is not ideal, but... cmids = set(self.marked_ids) self.cache.clear_search_caches(old_marked_ids | cmids) if old_marked_ids != cmids: for funcref in itervalues(self.marked_listeners): func = funcref() if func is not None: func(old_marked_ids, cmids)
def save_state(self): tprefs.set('search-panel-visible', self.search_panel.isVisible()) self.search_panel.save_state() for ed in itervalues(editors): ed.save_state() if self.current_editor is not None: self.current_editor.save_state() # Ensure the current editor saves it state last
def ensure_namespace_prefixes(node, nsmap): namespace_uris = frozenset(itervalues(nsmap)) fnsmap = {k:v for k, v in iteritems(node.nsmap) if v not in namespace_uris} fnsmap.update(nsmap) if fnsmap != dict(node.nsmap): node = clone_element(node, nsmap=fnsmap, in_context=False) return node
def update_result(self, plugin_name, width, height, data): if plugin_name.endswith('}'): # multi cover plugin plugin_name = plugin_name.partition('{')[0] plugin = [plugin for plugin in self.plugin_map if plugin.name == plugin_name] if not plugin: return plugin = plugin[0] last_row = max(self.plugin_map[plugin]) pmap = self.load_pixmap(data) if pmap.isNull(): return self.beginInsertRows(QModelIndex(), last_row, last_row) for rows in itervalues(self.plugin_map): for i in range(len(rows)): if rows[i] >= last_row: rows[i] += 1 self.plugin_map[plugin].insert(-1, last_row) self.covers.insert(last_row, self.get_item(plugin_name, pmap, waiting=False)) self.endInsertRows() else: # single cover plugin idx = None for plugin, rows in iteritems(self.plugin_map): if plugin.name == plugin_name: idx = rows[0] break if idx is None: return pmap = self.load_pixmap(data) if pmap.isNull(): return self.covers[idx] = self.get_item(plugin_name, pmap, waiting=False) self.dataChanged.emit(self.index(idx), self.index(idx))
def test_non_bmp(self): raw = '_\U0001f431-' m = Matcher([raw], scorer=CScorer) positions = next(itervalues(m(raw))) self.assertEqual( positions, (0, 1, (2 if sys.maxunicode >= 0x10ffff else 3)) )
def save_library_format_by_ids(self, book_ids, fmt, single_dir=True): if isinstance(book_ids, numbers.Integral): book_ids = [book_ids] rows = list(itervalues(self.gui.library_view.ids_to_rows(book_ids))) rows = [self.gui.library_view.model().index(r, 0) for r in rows] self.save_to_disk(True, single_dir=single_dir, single_format=fmt, rows=rows, write_opf=False, save_cover=False)
def upgrade_version_10(self): 'Add restricted Tag Browser views' def create_tag_browser_view(table_name, column_name, view_column_name): script = (''' DROP VIEW IF EXISTS tag_browser_{tn}; CREATE VIEW tag_browser_{tn} AS SELECT id, {vcn}, (SELECT COUNT(id) FROM books_{tn}_link WHERE {cn}={tn}.id) count FROM {tn}; DROP VIEW IF EXISTS tag_browser_filtered_{tn}; CREATE VIEW tag_browser_filtered_{tn} AS SELECT id, {vcn}, (SELECT COUNT(books_{tn}_link.id) FROM books_{tn}_link WHERE {cn}={tn}.id AND books_list_filter(book)) count FROM {tn}; '''.format(tn=table_name, cn=column_name, vcn=view_column_name)) self.conn.executescript(script) for field in itervalues(self.field_metadata): if field['is_category'] and not field['is_custom'] and 'link_column' in field: table = self.conn.get( 'SELECT name FROM sqlite_master WHERE type="table" AND name=?', ('books_%s_link'%field['table'],), all=False) if table is not None: create_tag_browser_view(field['table'], field['link_column'], field['column'])
def all_substitutions(self, glyph_ids): gid_index_map = self.coverage.coverage_indices(glyph_ids) ans = set() for index in itervalues(gid_index_map): glyphs = set(self.coverage_to_subs_map[index]) ans |= glyphs return ans
def update(self, ev, boundary='update'): if boundary == 'start': self.start() for tp in ev.touchPoints(): tpid = tp.id() if tpid not in self.touch_points: self.touch_points[tpid] = TouchPoint(tp) else: self.touch_points[tpid].update(tp) if len(self.touch_points) > 2: self.possible_gestures.clear() elif len(self.touch_points) > 1: self.possible_gestures &= {Pinch} if boundary == 'end': self.check_for_holds() self.finalize() self.clear() else: self.check_for_holds() if {Swipe, SwipeAndHold} & self.possible_gestures: tp = next(itervalues(self.touch_points)) self.swiping.emit(*tp.swipe_live)
def safe_replace(self, name, datastream, extra_replacements={}, add_missing=False): from calibre.utils.zipfile import ZipFile, ZipInfo replacements = {name:datastream} replacements.update(extra_replacements) names = frozenset(list(replacements.keys())) found = set() def rbytes(name): r = replacements[name] if not isinstance(r, bytes): r = r.read() return r with SpooledTemporaryFile(max_size=100*1024*1024) as temp: ztemp = ZipFile(temp, 'w') for offset, header in itervalues(self.file_info): if header.filename in names: zi = ZipInfo(header.filename) zi.compress_type = header.compression_method ztemp.writestr(zi, rbytes(header.filename)) found.add(header.filename) else: ztemp.writestr(header.filename, self.read(header.filename, spool_size=0)) if add_missing: for name in names - found: ztemp.writestr(name, rbytes(name)) ztemp.close() zipstream = self.stream temp.seek(0) zipstream.seek(0) zipstream.truncate() shutil.copyfileobj(temp, zipstream) zipstream.flush()
def get_all_ip_addresses(): ipaddrs = list() for iface in itervalues(get_all_ips()): for addrs in iface: if 'broadcast' in addrs and addrs['addr'] != '127.0.0.1': ipaddrs.append(addrs['addr']) ipaddrs.sort(key=ipaddr_sort_key) return ipaddrs
def wait_for_shutdown(self, wait_till): for job in itervalues(self.jobs): delta = wait_till - monotonic() if delta > 0: job.join(delta) if self.event_loop is not None: delta = wait_till - monotonic() if delta > 0: self.event_loop.join(delta)
def book_to_json(ctx, rd, db, book_id, get_category_urls=True, device_compatible=False, device_for_template=None): mi = db.get_metadata(book_id, get_cover=False) codec = JsonCodec(db.field_metadata) if not device_compatible: try: mi.rating = mi.rating / 2. except Exception: mi.rating = 0.0 data = codec.encode_book_metadata(mi) for x in ('publication_type', 'size', 'db_id', 'lpath', 'mime', 'rights', 'book_producer'): data.pop(x, None) get = partial(ctx.url_for, get_content, book_id=book_id, library_id=db.server_library_id) data['cover'] = get(what='cover') data['thumbnail'] = get(what='thumb') if not device_compatible: mi.format_metadata = { k.lower(): dict(v) for k, v in iteritems(mi.format_metadata) } for v in itervalues(mi.format_metadata): mtime = v.get('mtime', None) if mtime is not None: v['mtime'] = isoformat(mtime, as_utc=True) data['format_metadata'] = mi.format_metadata fmts = set(x.lower() for x in mi.format_metadata) pf = prefs['output_format'].lower() other_fmts = list(fmts) try: fmt = pf if pf in fmts else other_fmts[0] except: fmt = None if fmts and fmt: other_fmts = [x for x in fmts if x != fmt] data['formats'] = sorted(fmts) if fmt: data['main_format'] = {fmt: get(what=fmt)} else: data['main_format'] = None data['other_formats'] = {fmt: get(what=fmt) for fmt in other_fmts} if get_category_urls: category_urls = data['category_urls'] = {} all_cats = ctx.get_categories(rd, db) for key in mi.all_field_keys(): fm = mi.metadata_for_field(key) if (fm and fm['is_category'] and not fm['is_csp'] and key != 'formats' and fm['datatype'] != 'rating'): categories = mi.get(key) or [] if isinstance(categories, string_or_bytes): categories = [categories] category_urls[key] = dbtags = {} for category in categories: for tag in all_cats.get(key, ()): if tag.original_name == category: dbtags[category] = ctx.url_for( books_in, encoded_category=encode_name( tag.category if tag.category else key), encoded_item=encode_name( tag.original_name if tag.id is None else unicode_type(tag.id)), library_id=db.server_library_id) break else: series = data.get('series', None) or '' if series: tsorder = tweaks['save_template_title_series_sorting'] series = title_sort(series, order=tsorder) data['_series_sort_'] = series if device_for_template: import posixpath from calibre.devices.utils import create_upload_path from calibre.utils.filenames import ascii_filename as sanitize from calibre.customize.ui import device_plugins for device_class in device_plugins(): if device_class.__class__.__name__ == device_for_template: template = device_class.save_template() data['_filename_'] = create_upload_path( mi, unicode_type(book_id), template, sanitize, path_type=posixpath) break return data, mi.last_modified
def __iter__(self): for s in itervalues(self.id_map): yield s
def encode_strands_as_sequences(strands, tbs_type=8): ''' Encode the list of strands for a single text record into a list of sequences, ready to be converted into TBS bytes. ''' ans = [] last_index = None max_length_offset = 0 first_entry = None for strand in strands: for entries in itervalues(strand): for entry in entries: if first_entry is None: first_entry = entry if entry.length_offset > max_length_offset: max_length_offset = entry.length_offset for strand in strands: strand_seqs = [] for depth, entries in iteritems(strand): extra = {} if entries[-1].action == 'spans': extra[0b1] = 0 elif False and ( entries[-1].length_offset < entries[-1].text_record_length and entries[-1].action == 'completes' and entries[-1].length_offset != max_length_offset): # I can't figure out exactly when kindlegen decides to insert # this, so disable it for now. extra[0b1] = entries[-1].length_offset if entries[0] is first_entry: extra[0b10] = tbs_type if len(entries) > 1: extra[0b100] = len(entries) index = entries[0].index - (entries[0].parent or 0) if ans and not strand_seqs: # We are in the second or later strands, so we need to use a # special flag and index value. The index value is the entry # index - the index of the last entry in the previous strand. index = last_index - entries[0].index if index < 0: if tbs_type == 5: index = -index else: raise NegativeStrandIndex() else: extra[0b1000] = True last_index = entries[-1].index strand_seqs.append((index, extra)) # Handle the case of consecutive action == 'spans' entries. In this # case, the 0b1 = 0 flag should be present only in the last consecutive # spans entry. for i, seq in enumerate(strand_seqs): if i + 1 < len(strand_seqs): if 0b1 in seq[1] and 0b1 in strand_seqs[i + 1][1]: del seq[1][0b1] ans.extend(strand_seqs) return ans
def show_context_menu(self, point): item = self.itemAt(point) if item is None or item in tuple(itervalues(self.categories)): return m = QMenu(self) sel = self.selectedItems() num = len(sel) container = current_container() ci = self.currentItem() if ci is not None: cn = unicode_type(ci.data(0, NAME_ROLE) or '') mt = unicode_type(ci.data(0, MIME_ROLE) or '') cat = unicode_type(ci.data(0, CATEGORY_ROLE) or '') n = elided_text(cn.rpartition('/')[-1]) m.addAction(QIcon(I('save.png')), _('Export %s') % n, partial(self.export, cn)) if cn not in container.names_that_must_not_be_changed and cn not in container.names_that_must_not_be_removed and mt not in OEB_FONTS: m.addAction(_('Replace %s with file...') % n, partial(self.replace, cn)) if num > 1: m.addAction(QIcon(I('save.png')), _('Export all %d selected files') % num, self.export_selected) m.addSeparator() m.addAction(QIcon(I('modified.png')), _('&Rename %s') % n, self.edit_current_item) if is_raster_image(mt): m.addAction(QIcon(I('default_cover.png')), _('Mark %s as cover image') % n, partial(self.mark_as_cover, cn)) elif current_container().SUPPORTS_TITLEPAGES and mt in OEB_DOCS and cat == 'text': m.addAction(QIcon(I('default_cover.png')), _('Mark %s as cover page') % n, partial(self.mark_as_titlepage, cn)) m.addSeparator() if num > 0: m.addSeparator() if num > 1: m.addAction(QIcon(I('modified.png')), _('&Bulk rename the selected files'), self.request_bulk_rename) m.addAction(QIcon(I('modified.png')), _('Change the file extension for the selected files'), self.request_change_ext) m.addAction(QIcon(I('trash.png')), ngettext( '&Delete the selected file', '&Delete the {} selected files', num).format(num), self.request_delete) m.addAction(QIcon(I('edit-copy.png')), ngettext( '&Copy the selected file to another editor instance', '&Copy the {} selected files to another editor instance', num).format(num), self.copy_selected_files) m.addSeparator() md = QApplication.instance().clipboard().mimeData() if md.hasUrls() and md.hasFormat(FILE_COPY_MIME): m.addAction(_('Paste files from other editor instance'), self.paste_from_other_instance) selected_map = defaultdict(list) for item in sel: selected_map[unicode_type(item.data(0, CATEGORY_ROLE) or '')].append(unicode_type(item.data(0, NAME_ROLE) or '')) for items in itervalues(selected_map): items.sort(key=self.index_of_name) if selected_map['text']: m.addAction(QIcon(I('format-text-color.png')), _('Link &stylesheets...'), partial(self.link_stylesheets, selected_map['text'])) if len(selected_map['text']) > 1: m.addAction(QIcon(I('merge.png')), _('&Merge selected text files'), partial(self.start_merge, 'text', selected_map['text'])) if len(selected_map['styles']) > 1: m.addAction(QIcon(I('merge.png')), _('&Merge selected style files'), partial(self.start_merge, 'styles', selected_map['styles'])) if len(list(m.actions())) > 0: m.popup(self.mapToGlobal(point))
def item_from_name(self, name): for parent in itervalues(self.categories): for c in (parent.child(i) for i in range(parent.childCount())): q = unicode_type(c.data(0, NAME_ROLE) or '') if q == name: return c
def __init__(self, open_at=None, continue_reading=None, force_reload=False, calibre_book_data=None): MainWindow.__init__(self, None) self.annotations_saver = None self.calibre_book_data_for_first_book = calibre_book_data self.shutting_down = self.close_forced = self.shutdown_done = False self.force_reload = force_reload connect_lambda(self.book_preparation_started, self, lambda self: self.loading_overlay( _('Preparing book for first read, please wait')), type=Qt.ConnectionType.QueuedConnection) self.maximized_at_last_fullscreen = False self.save_pos_timer = t = QTimer(self) t.setSingleShot(True), t.setInterval(3000), t.setTimerType( Qt.TimerType.VeryCoarseTimer) connect_lambda(t.timeout, self, lambda self: self.save_annotations(in_book_file=False)) self.pending_open_at = open_at self.base_window_title = _('E-book viewer') self.setDockOptions(QMainWindow.DockOption.AnimatedDocks | QMainWindow.DockOption.AllowTabbedDocks | QMainWindow.DockOption.AllowNestedDocks) self.setWindowTitle(self.base_window_title) self.in_full_screen_mode = None self.image_popup = ImagePopup(self) self.actions_toolbar = at = ActionsToolBar(self) at.open_book_at_path.connect(self.ask_for_open) self.addToolBar(Qt.ToolBarArea.LeftToolBarArea, at) try: os.makedirs(annotations_dir) except EnvironmentError: pass self.current_book_data = {} get_current_book_data(self.current_book_data) self.book_prepared.connect(self.load_finished, type=Qt.ConnectionType.QueuedConnection) self.dock_defs = dock_defs() def create_dock(title, name, area, areas=Qt.DockWidgetArea.LeftDockWidgetArea | Qt.DockWidgetArea.RightDockWidgetArea): ans = QDockWidget(title, self) ans.setObjectName(name) self.addDockWidget(area, ans) ans.setVisible(False) ans.visibilityChanged.connect(self.dock_visibility_changed) return ans for dock_def in itervalues(self.dock_defs): setattr( self, '{}_dock'.format(dock_def.name.partition('-')[0]), create_dock(dock_def.title, dock_def.name, dock_def.initial_area, dock_def.allowed_areas)) self.toc_container = w = QWidget(self) w.l = QVBoxLayout(w) self.toc = TOCView(w) self.toc.clicked[QModelIndex].connect(self.toc_clicked) self.toc.searched.connect(self.toc_searched) self.toc_search = TOCSearch(self.toc, parent=w) w.l.addWidget(self.toc), w.l.addWidget( self.toc_search), w.l.setContentsMargins(0, 0, 0, 0) self.toc_dock.setWidget(w) self.search_widget = w = SearchPanel(self) w.search_requested.connect(self.start_search) w.hide_search_panel.connect(self.search_dock.close) w.count_changed.connect(self.search_results_count_changed) w.goto_cfi.connect(self.goto_cfi) self.search_dock.setWidget(w) self.search_dock.visibilityChanged.connect( self.search_widget.visibility_changed) self.lookup_widget = w = Lookup(self) self.lookup_dock.visibilityChanged.connect( self.lookup_widget.visibility_changed) self.lookup_dock.setWidget(w) self.bookmarks_widget = w = BookmarkManager(self) connect_lambda( w.create_requested, self, lambda self: self.web_view.trigger_shortcut('new_bookmark')) w.edited.connect(self.bookmarks_edited) w.activated.connect(self.bookmark_activated) w.toggle_requested.connect(self.toggle_bookmarks) self.bookmarks_dock.setWidget(w) self.highlights_widget = w = HighlightsPanel(self) self.highlights_dock.setWidget(w) w.toggle_requested.connect(self.toggle_highlights) self.web_view = WebView(self) self.web_view.cfi_changed.connect(self.cfi_changed) self.web_view.reload_book.connect(self.reload_book) self.web_view.toggle_toc.connect(self.toggle_toc) self.web_view.show_search.connect(self.show_search) self.web_view.find_next.connect(self.search_widget.find_next_requested) self.search_widget.show_search_result.connect( self.web_view.show_search_result) self.web_view.search_result_not_found.connect( self.search_widget.search_result_not_found) self.web_view.search_result_discovered.connect( self.search_widget.search_result_discovered) self.web_view.toggle_bookmarks.connect(self.toggle_bookmarks) self.web_view.toggle_highlights.connect(self.toggle_highlights) self.web_view.new_bookmark.connect( self.bookmarks_widget.create_new_bookmark) self.web_view.toggle_inspector.connect(self.toggle_inspector) self.web_view.toggle_lookup.connect(self.toggle_lookup) self.web_view.quit.connect(self.quit) self.web_view.update_current_toc_nodes.connect( self.toc.update_current_toc_nodes) self.web_view.toggle_full_screen.connect(self.toggle_full_screen) self.web_view.ask_for_open.connect( self.ask_for_open, type=Qt.ConnectionType.QueuedConnection) self.web_view.selection_changed.connect( self.lookup_widget.selected_text_changed, type=Qt.ConnectionType.QueuedConnection) self.web_view.selection_changed.connect( self.highlights_widget.selected_text_changed, type=Qt.ConnectionType.QueuedConnection) self.web_view.view_image.connect( self.view_image, type=Qt.ConnectionType.QueuedConnection) self.web_view.copy_image.connect( self.copy_image, type=Qt.ConnectionType.QueuedConnection) self.web_view.show_loading_message.connect(self.show_loading_message) self.web_view.show_error.connect(self.show_error) self.web_view.print_book.connect( self.print_book, type=Qt.ConnectionType.QueuedConnection) self.web_view.reset_interface.connect( self.reset_interface, type=Qt.ConnectionType.QueuedConnection) self.web_view.quit.connect(self.quit, type=Qt.ConnectionType.QueuedConnection) self.web_view.shortcuts_changed.connect(self.shortcuts_changed) self.web_view.scrollbar_context_menu.connect( self.scrollbar_context_menu) self.web_view.close_prep_finished.connect(self.close_prep_finished) self.web_view.highlights_changed.connect(self.highlights_changed) self.actions_toolbar.initialize(self.web_view, self.search_dock.toggleViewAction()) self.setCentralWidget(self.web_view) self.loading_overlay = LoadingOverlay(self) self.restore_state() self.actions_toolbar.update_visibility() self.dock_visibility_changed() self.highlights_widget.request_highlight_action.connect( self.web_view.highlight_action) self.highlights_widget.web_action.connect(self.web_view.generic_action) if continue_reading: self.continue_reading() self.setup_mouse_auto_hide()
def find_icons(): global icon_data if icon_data is not None: return icon_data base_dirs = [(os.environ.get('XDG_DATA_HOME') or os.path.expanduser('~/.local/share')) + '/icons'] base_dirs += [os.path.expanduser('~/.icons')] base_dirs += [ os.path.join(b, 'icons') for b in os.environ.get( 'XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(os.pathsep) ] + ['/usr/share/pixmaps'] ans = defaultdict(list) sz_pat = re.compile(r'/((?:\d+x\d+)|scalable)/') cache_file = os.path.join(cache_dir(), 'icon-theme-cache.calibre_msgpack') exts = {'.svg', '.png', '.xpm'} def read_icon_theme_dir(dirpath): ans = defaultdict(list) for path in walk(dirpath): bn = os.path.basename(path) name, ext = os.path.splitext(bn) if ext in exts: sz = sz_pat.findall(path) if sz: sz = sz[-1] if sz == 'scalable': sz = 100000 else: sz = int(sz.partition('x')[0]) idx = len(ans[name]) ans[name].append((-sz, idx, sz, path)) for icons in itervalues(ans): icons.sort(key=list) return {k: (-v[0][2], v[0][3]) for k, v in iteritems(ans)} try: with open(cache_file, 'rb') as f: cache = f.read() cache = msgpack_loads(cache) mtimes, cache = defaultdict(int, cache['mtimes']), defaultdict( dict, cache['data']) except Exception: mtimes, cache = defaultdict(int), defaultdict(dict) seen_dirs = set() changed = False for loc in base_dirs: try: subdirs = os.listdir(loc) except EnvironmentError: continue for dname in subdirs: d = os.path.join(loc, dname) if os.path.isdir(d): try: mtime = os.stat(d).st_mtime except EnvironmentError: continue seen_dirs.add(d) if mtime != mtimes[d]: changed = True try: cache[d] = read_icon_theme_dir(d) except Exception: prints( 'Failed to read icon theme dir: %r with error:' % d) import traceback traceback.print_exc() mtimes[d] = mtime for name, data in iteritems(cache[d]): ans[name].append(data) for removed in set(mtimes) - seen_dirs: mtimes.pop(removed), cache.pop(removed) changed = True if changed: data = msgpack_dumps({'data': cache, 'mtimes': mtimes}) try: with open(cache_file, 'wb') as f: f.write(data) except Exception: import traceback traceback.print_exc() for icons in itervalues(ans): icons.sort(key=list) icon_data = {k: v[0][1] for k, v in iteritems(ans)} return icon_data
def resolve_styles(container, name, select=None, sheet_callback=None): root = container.parsed(name) select = select or Select(root, ignore_inappropriate_pseudo_classes=True) style_map = defaultdict(list) pseudo_style_map = defaultdict(list) rule_index_counter = count() pseudo_pat = re.compile( ':{1,2}(%s)' % ('|'.join(INAPPROPRIATE_PSEUDO_CLASSES)), re.I) def process_sheet(sheet, sheet_name): if sheet_callback is not None: sheet_callback(sheet, sheet_name) for rule, sheet_name, rule_index in iterrules( container, sheet_name, rules=sheet, rule_index_counter=rule_index_counter, rule_type='STYLE_RULE'): for selector in rule.selectorList: text = selector.selectorText try: matches = tuple(select(text)) except SelectorError as err: container.log.error( 'Ignoring CSS rule with invalid selector: {!r} ({})'. format(text, as_unicode(err))) continue m = pseudo_pat.search(text) style = normalize_style_declaration(rule.style, sheet_name) if m is None: for elem in matches: style_map[elem].append( StyleDeclaration(specificity(rule_index, selector), style, None)) else: for elem in matches: pseudo_style_map[elem].append( StyleDeclaration(specificity(rule_index, selector), style, m.group(1))) process_sheet(html_css_stylesheet(container), 'user-agent.css') for elem in root.iterdescendants(XHTML('style'), XHTML('link')): if elem.tag.lower().endswith('style'): if not elem.text: continue sheet = container.parse_css(elem.text) sheet_name = name else: if (elem.get('type') or 'text/css').lower() not in OEB_STYLES or \ (elem.get('rel') or 'stylesheet').lower() != 'stylesheet' or \ not media_ok(elem.get('media')): continue href = elem.get('href') if not href: continue sheet_name = container.href_to_name(href, name) if not container.has_name(sheet_name): continue sheet = container.parsed(sheet_name) if not isinstance(sheet, CSSStyleSheet): continue process_sheet(sheet, sheet_name) for elem in root.xpath('//*[@style]'): text = elem.get('style') if text: style = container.parse_css(text, is_declaration=True) style_map[elem].append( StyleDeclaration(Specificity(1, 0, 0, 0, 0), normalize_style_declaration(style, name), None)) for l in (style_map, pseudo_style_map): for x in itervalues(l): x.sort(key=itemgetter(0), reverse=True) style_map = { elem: resolve_declarations(x) for elem, x in iteritems(style_map) } pseudo_style_map = { elem: resolve_pseudo_declarations(x) for elem, x in iteritems(pseudo_style_map) } return partial(resolve_property, style_map), partial(resolve_pseudo_property, style_map, pseudo_style_map), select
def test_remove_items(self): # {{{ ' Test removal of many-(many,one) items ' cache = self.init_cache() tmap = cache.get_id_map('tags') self.assertEqual(cache.remove_items('tags', tmap), {1, 2}) tmap = cache.get_id_map('#tags') t = {v: k for k, v in iteritems(tmap)}['My Tag Two'] self.assertEqual(cache.remove_items('#tags', (t, )), {1, 2}) smap = cache.get_id_map('series') self.assertEqual(cache.remove_items('series', smap), {1, 2}) smap = cache.get_id_map('#series') s = {v: k for k, v in iteritems(smap)}['My Series Two'] self.assertEqual(cache.remove_items('#series', (s, )), {1}) for c in (cache, self.init_cache()): self.assertFalse(c.get_id_map('tags')) self.assertFalse(c.all_field_names('tags')) for bid in c.all_book_ids(): self.assertFalse(c.field_for('tags', bid)) self.assertEqual(len(c.get_id_map('#tags')), 1) self.assertEqual(c.all_field_names('#tags'), {'My Tag One'}) for bid in c.all_book_ids(): self.assertIn(c.field_for('#tags', bid), ((), ('My Tag One', ))) for bid in (1, 2): self.assertEqual(c.field_for('series_index', bid), 1.0) self.assertFalse(c.get_id_map('series')) self.assertFalse(c.all_field_names('series')) for bid in c.all_book_ids(): self.assertFalse(c.field_for('series', bid)) self.assertEqual(c.field_for('series_index', 1), 1.0) self.assertEqual(c.all_field_names('#series'), {'My Series One'}) for bid in c.all_book_ids(): self.assertIn(c.field_for('#series', bid), (None, 'My Series One')) # Now test with restriction cache = self.init_cache() cache.set_field('tags', {1: 'a,b,c', 2: 'b,a', 3: 'x,y,z'}) cache.set_field('series', {1: 'a', 2: 'a', 3: 'b'}) cache.set_field('series_index', {1: 8, 2: 9, 3: 3}) tmap, smap = cache.get_id_map('tags'), cache.get_id_map('series') self.assertEqual( cache.remove_items('tags', tmap, restrict_to_book_ids=()), set()) self.assertEqual( cache.remove_items('tags', tmap, restrict_to_book_ids={1}), {1}) self.assertEqual( cache.remove_items('series', smap, restrict_to_book_ids=()), set()) self.assertEqual( cache.remove_items('series', smap, restrict_to_book_ids=(1, )), {1}) c2 = self.init_cache() for c in (cache, c2): self.assertEqual(c.field_for('tags', 1), ()) self.assertEqual(c.field_for('tags', 2), ('b', 'a')) self.assertNotIn('c', set(itervalues(c.get_id_map('tags')))) self.assertEqual(c.field_for('series', 1), None) self.assertEqual(c.field_for('series', 2), 'a') self.assertEqual(c.field_for('series_index', 1), 1.0) self.assertEqual(c.field_for('series_index', 2), 9)
def do_paged_render(self): if self.paged_js is None: import uuid from calibre.utils.resources import compiled_coffeescript as cc self.paged_js = cc('ebooks.oeb.display.utils').decode('utf-8') self.paged_js += cc('ebooks.oeb.display.indexing').decode('utf-8') self.paged_js += cc('ebooks.oeb.display.paged').decode('utf-8') self.paged_js += cc('ebooks.oeb.display.mathjax').decode('utf-8') if self.opts.pdf_hyphenate: self.paged_js += P('viewer/hyphenate/Hyphenator.js', data=True).decode('utf-8') hjs, self.hyphenate_lang = load_hyphenator_dicts({}, self.book_language) self.paged_js += hjs self.hf_uuid = str(uuid.uuid4()).replace('-', '') self.view.page().mainFrame().addToJavaScriptWindowObject("py_bridge", self) self.view.page().longjs_counter = 0 evaljs = self.view.page().mainFrame().evaluateJavaScript evaljs(self.paged_js) self.load_mathjax() if self.opts.pdf_hyphenate: self.hyphenate(evaljs) margin_top, margin_bottom = self.margin_top, self.margin_bottom page_margins = None if self.opts.pdf_use_document_margins: doc_margins = evaljs('document.documentElement.getAttribute("data-calibre-pdf-output-page-margins")') try: doc_margins = json.loads(doc_margins) except Exception: doc_margins = None if doc_margins and isinstance(doc_margins, dict): doc_margins = {k:float(v) for k, v in iteritems(doc_margins) if isinstance(v, numbers.Number) and k in {'right', 'top', 'left', 'bottom'}} if doc_margins: margin_top = margin_bottom = 0 page_margins = self.convert_page_margins(doc_margins) amap = json.loads(evaljs(''' document.body.style.backgroundColor = "white"; // Qt WebKit cannot handle opacity with the Pdf backend s = document.createElement('style'); s.textContent = '* {opacity: 1 !important}'; document.documentElement.appendChild(s); paged_display.set_geometry(1, %d, %d, %d); paged_display.layout(); paged_display.fit_images(); ret = book_indexing.all_links_and_anchors(); window.scrollTo(0, 0); // This is needed as getting anchor positions could have caused the viewport to scroll JSON.stringify(ret); '''%(margin_top, 0, margin_bottom))) if not isinstance(amap, dict): amap = {'links':[], 'anchors':{}} # Some javascript error occurred for val in itervalues(amap['anchors']): if isinstance(val, dict) and 'column' in val: val['column'] = int(val['column']) for href, val in amap['links']: if isinstance(val, dict) and 'column' in val: val['column'] = int(val['column']) sections = self.get_sections(amap['anchors']) tl_sections = self.get_sections(amap['anchors'], True) col = 0 if self.header: evaljs('paged_display.header_template = ' + json.dumps(self.header)) if self.footer: evaljs('paged_display.footer_template = ' + json.dumps(self.footer)) if self.header or self.footer: evaljs('paged_display.create_header_footer("%s");'%self.hf_uuid) start_page = self.current_page_num mf = self.view.page().mainFrame() def set_section(col, sections, attr): # If this page has no section, use the section from the previous page idx = col if col in sections else col - 1 if col - 1 in sections else None if idx is not None: setattr(self, attr, sections[idx][0]) from calibre.ebooks.pdf.render.toc import calculate_page_number while True: set_section(col, sections, 'current_section') set_section(col, tl_sections, 'current_tl_section') self.doc.init_page(page_margins) num = calculate_page_number(self.current_page_num, self.opts.pdf_page_number_map, evaljs) if self.header or self.footer: if evaljs('paged_display.update_header_footer(%d)'%num) is True: self.load_header_footer_images() self.painter.save() mf.render(self.painter, mf.ContentsLayer) self.painter.restore() try: nsl = int(evaljs('paged_display.next_screen_location()')) except (TypeError, ValueError): break self.doc.end_page(nsl <= 0) if nsl <= 0: break evaljs('window.scrollTo(%d, 0); paged_display.position_header_footer();'%nsl) if self.doc.errors_occurred: break col += 1 if not self.doc.errors_occurred and self.doc.current_page_num > 1: self.doc.add_links(self.current_item, start_page, amap['links'], amap['anchors'])
def __iter__(self): for p in self.paragraphs: yield p for t in itervalues(self.sub_tables): for p in t: yield p
def convert_p(self, p): dest = P() self.object_map[dest] = p style = self.styles.resolve_paragraph(p) self.layers[p] = [] self.frame_map[p] = style.frame self.add_frame(dest, style.frame) current_anchor = None current_hyperlink = None hl_xpath = self.namespace.XPath('ancestor::w:hyperlink[1]') def p_parent(x): # Ensure that nested <w:p> tags are handled. These can occur if a # textbox is present inside a paragraph. while True: x = x.getparent() try: if x.tag.endswith('}p'): return x except AttributeError: break for x in self.namespace.descendants(p, 'w:r', 'w:bookmarkStart', 'w:hyperlink', 'w:instrText'): if p_parent(x) is not p: continue if x.tag.endswith('}r'): span = self.convert_run(x) if current_anchor is not None: (dest if len(dest) == 0 else span).set( 'id', current_anchor) current_anchor = None if current_hyperlink is not None: try: hl = hl_xpath(x)[0] self.link_map[hl].append(span) self.link_source_map[hl] = self.current_rels x.set('is-link', '1') except IndexError: current_hyperlink = None dest.append(span) self.layers[p].append(x) elif x.tag.endswith('}bookmarkStart'): anchor = self.namespace.get(x, 'w:name') if anchor and anchor not in self.anchor_map and anchor != '_GoBack': # _GoBack is a special bookmark inserted by Word 2010 for # the return to previous edit feature, we ignore it old_anchor = current_anchor self.anchor_map[anchor] = current_anchor = generate_anchor( anchor, frozenset(itervalues(self.anchor_map))) if old_anchor is not None: # The previous anchor was not applied to any element for a, t in tuple(iteritems(self.anchor_map)): if t == old_anchor: self.anchor_map[a] = current_anchor elif x.tag.endswith('}hyperlink'): current_hyperlink = x elif x.tag.endswith('}instrText') and x.text and x.text.strip( ).startswith('TOC '): old_anchor = current_anchor anchor = unicode_type(uuid.uuid4()) self.anchor_map[anchor] = current_anchor = generate_anchor( 'toc', frozenset(itervalues(self.anchor_map))) self.toc_anchor = current_anchor if old_anchor is not None: # The previous anchor was not applied to any element for a, t in tuple(iteritems(self.anchor_map)): if t == old_anchor: self.anchor_map[a] = current_anchor if current_anchor is not None: # This paragraph had no <w:r> descendants dest.set('id', current_anchor) current_anchor = None m = re.match(r'heading\s+(\d+)$', style.style_name or '', re.IGNORECASE) if m is not None: n = min(6, max(1, int(m.group(1)))) dest.tag = 'h%d' % n dest.set('data-heading-level', unicode_type(n)) if style.bidi is True: dest.set('dir', 'rtl') border_runs = [] common_borders = [] for span in dest: run = self.object_map[span] style = self.styles.resolve_run(run) if not border_runs or border_runs[-1][1].same_border(style): border_runs.append((span, style)) elif border_runs: if len(border_runs) > 1: common_borders.append(border_runs) border_runs = [] for border_run in common_borders: spans = [] bs = {} for span, style in border_run: style.get_border_css(bs) style.clear_border_css() spans.append(span) if bs: cls = self.styles.register(bs, 'text_border') wrapper = self.wrap_elems(spans, SPAN()) wrapper.set('class', cls) if not dest.text and len(dest) == 0 and not style.has_visible_border(): # Empty paragraph add a non-breaking space so that it is rendered # by WebKit dest.text = NBSP # If the last element in a block is a <br> the <br> is not rendered in # HTML, unless it is followed by a trailing space. Word, on the other # hand inserts a blank line for trailing <br>s. if len(dest) > 0 and not dest[-1].tail: if dest[-1].tag == 'br': dest[-1].tail = NBSP elif len(dest[-1]) > 0 and dest[-1][ -1].tag == 'br' and not dest[-1][-1].tail: dest[-1][-1].tail = NBSP return dest
self.ids = [] self.accept() def keyPressEvent(self, ev): if ev.key() in (Qt.Key.Key_Enter, Qt.Key.Key_Return): ev.accept() return return QDialog.keyPressEvent(self, ev) if __name__ == '__main__': from calibre.gui2 import Application from calibre.library import db app = Application([]) db = db() ids = sorted(db.all_ids(), reverse=True) ids = tuple(zip(ids[0::2], ids[1::2])) gm = partial(db.get_metadata, index_is_id=True, get_cover=True, cover_as_data=True) get_metadata = lambda x: list(map(gm, ids[x])) d = CompareMany(list(range(len(ids))), get_metadata, db.field_metadata, db=db) if d.exec_() == QDialog.DialogCode.Accepted: for changed, mi in itervalues(d.accepted): if changed and mi is not None: print(mi)
def restore_defaults(self): for setting in itervalues(self.settings): setting.setter(setting.widget, self.default_value(setting.name))
def books(self, oncard=None, end_session=True): from calibre.ebooks.metadata.meta import path_to_ext debug_print('USBMS: Fetching list of books from device. Device=', self.__class__.__name__, 'oncard=', oncard) dummy_bl = self.booklist_class(None, None, None) if oncard == 'carda' and not self._card_a_prefix: self.report_progress(1.0, _('Getting list of books on device...')) return dummy_bl elif oncard == 'cardb' and not self._card_b_prefix: self.report_progress(1.0, _('Getting list of books on device...')) return dummy_bl elif oncard and oncard != 'carda' and oncard != 'cardb': self.report_progress(1.0, _('Getting list of books on device...')) return dummy_bl prefix = self._card_a_prefix if oncard == 'carda' else \ self._card_b_prefix if oncard == 'cardb' \ else self._main_prefix ebook_dirs = self.get_carda_ebook_dir() if oncard == 'carda' else \ self.EBOOK_DIR_CARD_B if oncard == 'cardb' else \ self.get_main_ebook_dir() debug_print('USBMS: dirs are:', prefix, ebook_dirs) # get the metadata cache bl = self.booklist_class(oncard, prefix, self.settings) need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE) # make a dict cache of paths so the lookup in the loop below is faster. bl_cache = {} for idx, b in enumerate(bl): bl_cache[b.lpath] = idx all_formats = self.formats_to_scan_for() def update_booklist(filename, path, prefix): changed = False if path_to_ext( filename) in all_formats and self.is_allowed_book_file( filename, path, prefix): try: lpath = os.path.join(path, filename).partition( self.normalize_path(prefix))[2] if lpath.startswith(os.sep): lpath = lpath[len(os.sep):] lpath = lpath.replace('\\', '/') idx = bl_cache.get(lpath, None) if idx is not None: bl_cache[lpath] = None if self.update_metadata_item(bl[idx]): # print 'update_metadata_item returned true' changed = True else: if bl.add_book(self.book_from_path(prefix, lpath), replace_metadata=False): changed = True except: # Probably a filename encoding error import traceback traceback.print_exc() return changed if isinstance(ebook_dirs, string_or_bytes): ebook_dirs = [ebook_dirs] for ebook_dir in ebook_dirs: ebook_dir = self.path_to_unicode(ebook_dir) if self.SCAN_FROM_ROOT: ebook_dir = self.normalize_path(prefix) else: ebook_dir = self.normalize_path( os.path.join(prefix, *( ebook_dir.split('/'))) if ebook_dir else prefix) debug_print('USBMS: scan from root', self.SCAN_FROM_ROOT, ebook_dir) if not os.path.exists(ebook_dir): continue # Get all books in the ebook_dir directory if self.SUPPORTS_SUB_DIRS or self.SUPPORTS_SUB_DIRS_FOR_SCAN: # build a list of files to check, so we can accurately report progress flist = [] for path, dirs, files in safe_walk(ebook_dir): for filename in files: if filename != self.METADATA_CACHE: flist.append({ 'filename': self.path_to_unicode(filename), 'path': self.path_to_unicode(path) }) for i, f in enumerate(flist): self.report_progress( i / float(len(flist)), _('Getting list of books on device...')) changed = update_booklist(f['filename'], f['path'], prefix) if changed: need_sync = True else: paths = os.listdir(ebook_dir) for i, filename in enumerate(paths): self.report_progress( (i + 1) / float(len(paths)), _('Getting list of books on device...')) changed = update_booklist(self.path_to_unicode(filename), ebook_dir, prefix) if changed: need_sync = True # Remove books that are no longer in the filesystem. Cache contains # indices into the booklist if book not in filesystem, None otherwise # Do the operation in reverse order so indices remain valid for idx in sorted(itervalues(bl_cache), reverse=True): if idx is not None: need_sync = True del bl[idx] debug_print( 'USBMS: count found in cache: %d, count of files in metadata: %d, need_sync: %s' % (len(bl_cache), len(bl), need_sync)) if need_sync: # self.count_found_in_bl != len(bl) or need_sync: if oncard == 'cardb': self.sync_booklists((None, None, bl)) elif oncard == 'carda': self.sync_booklists((None, bl, None)) else: self.sync_booklists((bl, None, None)) self.report_progress(1.0, _('Getting list of books on device...')) debug_print( 'USBMS: Finished fetching list of books from device. oncard=', oncard) return bl
def __iter__(self): return itervalues(self.routes)
def run(self, opts): from calibre.utils.serialize import msgpack_dumps scripts = {} for x in ('console', 'gui'): for name in basenames[x]: if name in ('calibre-complete', 'calibre_postinstall'): continue scripts[name] = x dest = self.j(self.RESOURCES, 'scripts.calibre_msgpack') if self.newer(dest, self.j(self.SRC, 'calibre', 'linux.py')): self.info('\tCreating ' + self.b(dest)) with open(dest, 'wb') as f: f.write(msgpack_dumps(scripts)) from calibre.web.feeds.recipes.collection import \ serialize_builtin_recipes, iterate_over_builtin_recipe_files files = [x[1] for x in iterate_over_builtin_recipe_files()] dest = self.j(self.RESOURCES, 'builtin_recipes.xml') if self.newer(dest, files): self.info('\tCreating builtin_recipes.xml') xml = serialize_builtin_recipes() with open(dest, 'wb') as f: f.write(xml) recipe_icon_dir = self.a( self.j(self.RESOURCES, '..', 'recipes', 'icons')) dest = os.path.splitext(dest)[0] + '.zip' files += glob.glob(self.j(recipe_icon_dir, '*.png')) if self.newer(dest, files): self.info('\tCreating builtin_recipes.zip') with zipfile.ZipFile(dest, 'w', zipfile.ZIP_STORED) as zf: for n in sorted(files, key=self.b): with open(n, 'rb') as f: zf.writestr(self.b(n), f.read()) dest = self.j(self.RESOURCES, 'ebook-convert-complete.calibre_msgpack') files = [] for x in os.walk(self.j(self.SRC, 'calibre')): for f in x[-1]: if f.endswith('.py'): files.append(self.j(x[0], f)) if self.newer(dest, files): self.info('\tCreating ' + self.b(dest)) complete = {} from calibre.ebooks.conversion.plumber import supported_input_formats complete['input_fmts'] = set(supported_input_formats()) from calibre.web.feeds.recipes.collection import get_builtin_recipe_titles complete['input_recipes'] = [ t + '.recipe ' for t in get_builtin_recipe_titles() ] from calibre.customize.ui import available_output_formats complete['output'] = set(available_output_formats()) from calibre.ebooks.conversion.cli import create_option_parser from calibre.utils.logging import Log log = Log() # log.outputs = [] for inf in supported_input_formats(): if inf in ('zip', 'rar', 'oebzip'): continue for ouf in available_output_formats(): of = ouf if ouf == 'oeb' else 'dummy.' + ouf p = create_option_parser(('ec', 'dummy1.' + inf, of, '-h'), log)[0] complete[(inf, ouf)] = [ x + ' ' for x in get_opts_from_parser(p) ] with open(dest, 'wb') as f: f.write(msgpack_dumps(only_unicode_recursive(complete))) self.info('\tCreating template-functions.json') dest = self.j(self.RESOURCES, 'template-functions.json') function_dict = {} import inspect from calibre.utils.formatter_functions import formatter_functions for obj in formatter_functions().get_builtins().values(): eval_func = inspect.getmembers( obj, lambda x: inspect.ismethod(x) and x.__name__ == 'evaluate') try: lines = [ l[4:] for l in inspect.getsourcelines(eval_func[0][1])[0] ] except: continue lines = ''.join(lines) function_dict[obj.name] = lines dump_json(function_dict, dest) self.info('\tCreating editor-functions.json') dest = self.j(self.RESOURCES, 'editor-functions.json') function_dict = {} from calibre.gui2.tweak_book.function_replace import builtin_functions for func in builtin_functions(): try: src = ''.join(inspect.getsourcelines(func)[0][1:]) except Exception: continue src = src.replace('def ' + func.__name__, 'def replace') imports = [ 'from %s import %s' % (x.__module__, x.__name__) for x in func.imports ] if imports: src = '\n'.join(imports) + '\n\n' + src function_dict[func.name] = src dump_json(function_dict, dest) self.info('\tCreating user-manual-translation-stats.json') d = {} for lc, stats in iteritems( json.load( open( self.j(self.d(self.SRC), 'manual', 'locale', 'completed.json')))): total = sum(itervalues(stats)) d[lc] = stats['translated'] / float(total) dump_json(d, self.j(self.RESOURCES, 'user-manual-translation-stats.json'))
def subset_all_fonts(container, font_stats, report): remove = set() total_old = total_new = 0 changed = False for name, mt in iter_subsettable_fonts(container): chars = font_stats.get(name, set()) with container.open(name, 'rb') as f: f.seek(0, os.SEEK_END) total_old += f.tell() if not chars: remove.add(name) report(_('Removed unused font: %s') % name) continue with container.open(name, 'r+b') as f: raw = f.read() try: font_name = get_font_names(raw)[-1] except Exception as e: container.log.warning( 'Corrupted font: %s, ignoring. Error: %s' % (name, as_unicode(e))) continue warnings = [] container.log('Subsetting font: %s' % (font_name or name)) try: nraw, old_sizes, new_sizes = subset(raw, chars, warnings=warnings) except UnsupportedFont as e: container.log.warning( 'Unsupported font: %s, ignoring. Error: %s' % (name, as_unicode(e))) continue for w in warnings: container.log.warn(w) olen = sum(itervalues(old_sizes)) nlen = sum(itervalues(new_sizes)) total_new += len(nraw) if nlen == olen: report(_('The font %s was already subset') % font_name) else: report( _('Decreased the font {0} to {1} of its original size'). format(font_name, ('%.1f%%' % (nlen / olen * 100)))) changed = True f.seek(0), f.truncate(), f.write(nraw) for name in remove: container.remove_item(name) changed = True if remove: for name, mt in iteritems(container.mime_map): if mt in OEB_STYLES: sheet = container.parsed(name) if remove_font_face_rules(container, sheet, remove, name): container.dirty(name) elif mt in OEB_DOCS: for style in XPath('//h:style')(container.parsed(name)): if style.get('type', 'text/css') == 'text/css' and style.text: sheet = container.parse_css(style.text, name) if remove_font_face_rules(container, sheet, remove, name): style.text = css_text(sheet) container.dirty(name) if total_old > 0: report( _('Reduced total font size to %.1f%% of original') % (total_new / total_old * 100)) else: report(_('No embedded fonts found')) return changed
def build(self, container, preserve_state=True): if container is None: return if preserve_state: state = self.get_state() self.clear() self.root = self.invisibleRootItem() self.root.setFlags(Qt.ItemIsDragEnabled) self.categories = {} for category, text, __ in CATEGORIES: self.categories[category] = i = QTreeWidgetItem(self.root, 0) i.setText(0, text) i.setData(0, Qt.DecorationRole, self.top_level_pixmap_cache[category]) f = i.font(0) f.setBold(True) i.setFont(0, f) i.setData(0, NAME_ROLE, category) flags = Qt.ItemIsEnabled if category == 'text': flags |= Qt.ItemIsDropEnabled i.setFlags(flags) processed, seen = {}, {} cover_page_name = get_cover_page_name(container) cover_image_name = get_raster_cover_name(container) manifested_names = set() for names in itervalues(container.manifest_type_map): manifested_names |= set(names) def get_category(name, mt): category = 'misc' if mt.startswith('image/'): category = 'images' elif mt in OEB_FONTS: category = 'fonts' elif mt in OEB_STYLES: category = 'styles' elif mt in OEB_DOCS: category = 'text' ext = name.rpartition('.')[-1].lower() if ext in {'ttf', 'otf', 'woff'}: # Probably wrong mimetype in the OPF category = 'fonts' return category def set_display_name(name, item): if tprefs['file_list_shows_full_pathname']: text = name else: if name in processed: # We have an exact duplicate (can happen if there are # duplicates in the spine) item.setText(0, processed[name].text(0)) item.setText(1, processed[name].text(1)) return parts = name.split('/') text = parts.pop() while text in seen and parts: text = parts.pop() + '/' + text seen[text] = item item.setText(0, text) item.setText(1, as_hex_unicode(numeric_sort_key(text))) def render_emblems(item, emblems): emblems = tuple(emblems) if not emblems: return icon = self.rendered_emblem_cache.get(emblems, None) if icon is None: pixmaps = [] for emblem in emblems: pm = self.emblem_cache.get(emblem, None) if pm is None: pm = self.emblem_cache[emblem] = QIcon(I(emblem)).pixmap(self.iconSize()) pixmaps.append(pm) num = len(pixmaps) w, h = pixmaps[0].width(), pixmaps[0].height() if num == 1: icon = self.rendered_emblem_cache[emblems] = QIcon(pixmaps[0]) else: canvas = QPixmap((num * w) + ((num-1)*2), h) canvas.setDevicePixelRatio(pixmaps[0].devicePixelRatio()) canvas.fill(Qt.transparent) painter = QPainter(canvas) for i, pm in enumerate(pixmaps): painter.drawPixmap(int(i * (w + 2)/canvas.devicePixelRatio()), 0, pm) painter.end() icon = self.rendered_emblem_cache[emblems] = canvas item.setData(0, Qt.DecorationRole, icon) cannot_be_renamed = container.names_that_must_not_be_changed ncx_mime = guess_type('a.ncx') nav_items = frozenset(container.manifest_items_with_property('nav')) def create_item(name, linear=None): imt = container.mime_map.get(name, guess_type(name)) icat = get_category(name, imt) category = 'text' if linear is not None else ({'text':'misc'}.get(icat, icat)) item = QTreeWidgetItem(self.categories['text' if linear is not None else category], 1) flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable if category == 'text': flags |= Qt.ItemIsDragEnabled if name not in cannot_be_renamed: flags |= Qt.ItemIsEditable item.setFlags(flags) item.setStatusTip(0, _('Full path: ') + name) item.setData(0, NAME_ROLE, name) item.setData(0, CATEGORY_ROLE, category) item.setData(0, LINEAR_ROLE, bool(linear)) item.setData(0, MIME_ROLE, imt) set_display_name(name, item) tooltips = [] emblems = [] if name in {cover_page_name, cover_image_name}: emblems.append('default_cover.png') tooltips.append(_('This file is the cover %s for this book') % (_('image') if name == cover_image_name else _('page'))) if name in container.opf_name: emblems.append('metadata.png') tooltips.append(_('This file contains all the metadata and book structure information')) if imt == ncx_mime or name in nav_items: emblems.append('toc.png') tooltips.append(_('This file contains the metadata table of contents')) if name not in manifested_names and not container.ok_to_be_unmanifested(name): emblems.append('dialog_question.png') tooltips.append(_('This file is not listed in the book manifest')) if linear is False: emblems.append('arrow-down.png') tooltips.append(_('This file is marked as non-linear in the spine\nDrag it to the top to make it linear')) if linear is None and icat == 'text': # Text item outside spine emblems.append('dialog_warning.png') tooltips.append(_('This file is a text file that is not referenced in the spine')) if category == 'text' and name in processed: # Duplicate entry in spine emblems.append('dialog_error.png') tooltips.append(_('This file occurs more than once in the spine')) if category == 'fonts' and name.rpartition('.')[-1].lower() in ('ttf', 'otf'): fname = self.get_font_family_name(name) if fname: tooltips.append(fname) else: emblems.append('dialog_error.png') tooltips.append(_('Not a valid font')) render_emblems(item, emblems) if tooltips: item.setData(0, Qt.ToolTipRole, '\n'.join(tooltips)) return item for name, linear in container.spine_names: processed[name] = create_item(name, linear=linear) for name in container.name_path_map: if name in processed: continue processed[name] = create_item(name) for name, c in iteritems(self.categories): c.setExpanded(True) if name != 'text': c.sortChildren(1, Qt.AscendingOrder) if preserve_state: self.set_state(state) if self.current_edited_name: item = self.item_from_name(self.current_edited_name) if item is not None: self.mark_item_as_current(item)
def all_lang_names(): ans = getattr(all_lang_names, 'ans', None) if ans is None: ans = all_lang_names.ans = tuple( sorted(itervalues(lang_map_for_ui()), key=numeric_sort_key)) return ans
def toolbar_floated(self, floating): if not floating: self.save_state() for ed in itervalues(editors): if ed is not self: ed.restore_state()
def close(self): with self: for db in itervalues(self.loaded_dbs): getattr(db, 'close', lambda: None)() self.lmap, self.loaded_dbs = OrderedDict(), {}
def get_collections(self, collection_attributes): from calibre.devices.usbms.driver import debug_print from calibre.utils.config import device_prefs debug_print('Starting get_collections:', device_prefs['manage_device_metadata']) debug_print('Renaming rules:', tweaks['sony_collection_renaming_rules']) debug_print('Formatting template:', tweaks['sony_collection_name_template']) debug_print('Sorting rules:', tweaks['sony_collection_sorting_rules']) # Complexity: we can use renaming rules only when using automatic # management. Otherwise we don't always have the metadata to make the # right decisions use_renaming_rules = device_prefs[ 'manage_device_metadata'] == 'on_connect' collections = {} # get the special collection names all_by_author = '' all_by_title = '' ca = [] all_by_something = [] for c in collection_attributes: if c.startswith('aba:') and c[4:].strip(): all_by_author = c[4:].strip() elif c.startswith('abt:') and c[4:].strip(): all_by_title = c[4:].strip() elif c.startswith('abs:') and c[4:].strip(): name = c[4:].strip() sby = self.in_category_sort_rules(name) if sby is None: sby = name if name and sby: all_by_something.append((name, sby)) else: ca.append(c.lower()) collection_attributes = ca for book in self: tsval = book.get('_pb_title_sort', book.get('title_sort', book.get('title', 'zzzz'))) asval = book.get('_pb_author_sort', book.get('author_sort', '')) # Make sure we can identify this book via the lpath lpath = getattr(book, 'lpath', None) if lpath is None: continue # Decide how we will build the collections. The default: leave the # book in all existing collections. Do not add any new ones. attrs = ['device_collections'] if getattr(book, '_new_book', False): if device_prefs['manage_device_metadata'] == 'manual': # Ensure that the book is in all the book's existing # collections plus all metadata collections attrs += collection_attributes else: # For new books, both 'on_send' and 'on_connect' do the same # thing. The book's existing collections are ignored. Put # the book in collections defined by its metadata. attrs = collection_attributes elif device_prefs['manage_device_metadata'] == 'on_connect': # For existing books, modify the collections only if the user # specified 'on_connect' attrs = collection_attributes for attr in attrs: attr = attr.strip() # If attr is device_collections, then we cannot use # format_field, because we don't know the fields where the # values came from. if attr == 'device_collections': doing_dc = True val = book.device_collections # is a list else: doing_dc = False ign, val, orig_val, fm = book.format_field_extended(attr) if not val: continue if isbytestring(val): val = val.decode(preferred_encoding, 'replace') if isinstance(val, (list, tuple)): val = list(val) elif fm['datatype'] == 'series': val = [orig_val] elif fm['datatype'] == 'text' and fm['is_multiple']: val = orig_val elif fm['datatype'] == 'composite' and fm['is_multiple']: val = [ v.strip() for v in val.split(fm['is_multiple']['ui_to_list']) ] else: val = [val] sort_attr = self.in_category_sort_rules(attr) for category in val: is_series = False if doing_dc: # Attempt to determine if this value is a series by # comparing it to the series name. if category == book.series: is_series = True elif fm['is_custom']: # is a custom field if fm['datatype'] == 'text' and len(category) > 1 and \ category[0] == '[' and category[-1] == ']': continue if fm['datatype'] == 'series': is_series = True else: # is a standard field if attr == 'tags' and len(category) > 1 and \ category[0] == '[' and category[-1] == ']': continue if attr == 'series' or \ ('series' in collection_attributes and book.get('series', None) == category): is_series = True if use_renaming_rules: cat_name = self.compute_category_name( attr, category, fm) else: cat_name = category if cat_name not in collections: collections[cat_name] = {} if use_renaming_rules and sort_attr: sort_val = book.get(sort_attr, None) collections[cat_name][lpath] = (book, sort_val, tsval) elif is_series: if doing_dc: collections[cat_name][lpath] = \ (book, book.get('series_index', sys.maxsize), tsval) else: collections[cat_name][lpath] = \ (book, book.get(attr+'_index', sys.maxsize), tsval) else: if lpath not in collections[cat_name]: collections[cat_name][lpath] = (book, tsval, tsval) # All books by author if all_by_author: if all_by_author not in collections: collections[all_by_author] = {} collections[all_by_author][lpath] = (book, asval, tsval) # All books by title if all_by_title: if all_by_title not in collections: collections[all_by_title] = {} collections[all_by_title][lpath] = (book, tsval, asval) for (n, sb) in all_by_something: if n not in collections: collections[n] = {} collections[n][lpath] = (book, book.get(sb, ''), tsval) # Sort collections result = {} def none_cmp(xx, yy): x = xx[1] y = yy[1] if x is None and y is None: # No sort_key needed here, because defaults are ascii return cmp(xx[2], yy[2]) if x is None: return 1 if y is None: return -1 if isinstance(x, string_or_bytes) and isinstance( y, string_or_bytes): x, y = sort_key(force_unicode(x)), sort_key(force_unicode(y)) try: c = cmp(x, y) except TypeError: c = 0 if c != 0: return c # same as above -- no sort_key needed here try: return cmp(xx[2], yy[2]) except TypeError: return 0 for category, lpaths in iteritems(collections): books = sorted(itervalues(lpaths), key=cmp_to_key(none_cmp)) result[category] = [x[0] for x in books] return result
def __init__(self, open_at=None, continue_reading=None): MainWindow.__init__(self, None) connect_lambda(self.book_preparation_started, self, lambda self: self.loading_overlay(_( 'Preparing book for first read, please wait')), type=Qt.QueuedConnection) self.maximized_at_last_fullscreen = False self.pending_open_at = open_at self.base_window_title = _('E-book viewer') self.setWindowTitle(self.base_window_title) self.in_full_screen_mode = None self.image_popup = ImagePopup(self) try: os.makedirs(annotations_dir) except EnvironmentError: pass self.current_book_data = {} self.book_prepared.connect(self.load_finished, type=Qt.QueuedConnection) self.dock_defs = dock_defs() def create_dock(title, name, area, areas=Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea): ans = QDockWidget(title, self) ans.setObjectName(name) self.addDockWidget(area, ans) ans.setVisible(False) return ans for dock_def in itervalues(self.dock_defs): setattr(self, '{}_dock'.format(dock_def.name.partition('-')[0]), create_dock( dock_def.title, dock_def.name, dock_def.initial_area, dock_def.allowed_areas)) self.toc_container = w = QWidget(self) w.l = QVBoxLayout(w) self.toc = TOCView(w) self.toc.clicked[QModelIndex].connect(self.toc_clicked) self.toc.searched.connect(self.toc_searched) self.toc_search = TOCSearch(self.toc, parent=w) w.l.addWidget(self.toc), w.l.addWidget(self.toc_search), w.l.setContentsMargins(0, 0, 0, 0) self.toc_dock.setWidget(w) self.lookup_widget = w = Lookup(self) self.lookup_dock.visibilityChanged.connect(self.lookup_widget.visibility_changed) self.lookup_dock.setWidget(w) self.bookmarks_widget = w = BookmarkManager(self) connect_lambda( w.create_requested, self, lambda self: self.web_view.get_current_cfi(self.bookmarks_widget.create_new_bookmark)) w.edited.connect(self.bookmarks_edited) w.activated.connect(self.bookmark_activated) w.toggle_requested.connect(self.toggle_bookmarks) self.bookmarks_dock.setWidget(w) self.web_view = WebView(self) self.web_view.cfi_changed.connect(self.cfi_changed) self.web_view.reload_book.connect(self.reload_book) self.web_view.toggle_toc.connect(self.toggle_toc) self.web_view.toggle_bookmarks.connect(self.toggle_bookmarks) self.web_view.toggle_inspector.connect(self.toggle_inspector) self.web_view.toggle_lookup.connect(self.toggle_lookup) self.web_view.quit.connect(self.quit) self.web_view.update_current_toc_nodes.connect(self.toc.update_current_toc_nodes) self.web_view.toggle_full_screen.connect(self.toggle_full_screen) self.web_view.ask_for_open.connect(self.ask_for_open, type=Qt.QueuedConnection) self.web_view.selection_changed.connect(self.lookup_widget.selected_text_changed, type=Qt.QueuedConnection) self.web_view.view_image.connect(self.view_image, type=Qt.QueuedConnection) self.web_view.copy_image.connect(self.copy_image, type=Qt.QueuedConnection) self.web_view.show_loading_message.connect(self.show_loading_message) self.web_view.show_error.connect(self.show_error) self.web_view.print_book.connect(self.print_book, type=Qt.QueuedConnection) self.setCentralWidget(self.web_view) self.loading_overlay = LoadingOverlay(self) self.restore_state() if continue_reading: self.continue_reading()
def test_remove_books(self): # {{{ 'Test removal of books' cl = self.cloned_library cache = self.init_cache() af, ae = self.assertFalse, self.assertEqual authors = cache.fields['authors'].table # Delete a single book, with no formats and check cleaning self.assertIn('Unknown', set(itervalues(authors.id_map))) olen = len(authors.id_map) item_id = {v: k for k, v in iteritems(authors.id_map)}['Unknown'] cache.remove_books((3, )) for c in (cache, self.init_cache()): table = c.fields['authors'].table self.assertNotIn(3, c.all_book_ids()) self.assertNotIn('Unknown', set(itervalues(table.id_map))) self.assertNotIn(item_id, table.asort_map) self.assertNotIn(item_id, table.alink_map) ae(len(table.id_map), olen - 1) # Check that files are removed fmtpath = cache.format_abspath(1, 'FMT1') bookpath = os.path.dirname(fmtpath) authorpath = os.path.dirname(bookpath) os.mkdir(os.path.join(authorpath, '.DS_Store')) open(os.path.join(authorpath, 'Thumbs.db'), 'wb').close() item_id = { v: k for k, v in iteritems(cache.fields['#series'].table.id_map) }['My Series Two'] cache.remove_books((1, ), permanent=True) for x in (fmtpath, bookpath, authorpath): af(os.path.exists(x), 'The file %s exists, when it should not' % x) for c in (cache, self.init_cache()): table = c.fields['authors'].table self.assertNotIn(1, c.all_book_ids()) self.assertNotIn('Author Two', set(itervalues(table.id_map))) self.assertNotIn(6, set(itervalues(c.fields['rating'].table.id_map))) self.assertIn('A Series One', set(itervalues(c.fields['series'].table.id_map))) self.assertNotIn('My Series Two', set(itervalues(c.fields['#series'].table.id_map))) self.assertNotIn(item_id, c.fields['#series'].table.col_book_map) self.assertNotIn(1, c.fields['#series'].table.book_col_map) # Test emptying the db cache.remove_books(cache.all_book_ids(), permanent=True) for f in ('authors', 'series', '#series', 'tags'): table = cache.fields[f].table self.assertFalse(table.id_map) self.assertFalse(table.book_col_map) self.assertFalse(table.col_book_map) # Test the delete service from calibre.db.delete_service import delete_service cache = self.init_cache(cl) # Check that files are removed fmtpath = cache.format_abspath(1, 'FMT1') bookpath = os.path.dirname(fmtpath) authorpath = os.path.dirname(bookpath) item_id = { v: k for k, v in iteritems(cache.fields['#series'].table.id_map) }['My Series Two'] cache.remove_books((1, )) delete_service().wait() for x in (fmtpath, bookpath, authorpath): af(os.path.exists(x), 'The file %s exists, when it should not' % x)
def generate_css(self, dest_dir, docx, notes_nopb, nosupsub): ef = self.fonts.embed_fonts(dest_dir, docx) s = '''\ body { font-family: %s; font-size: %s; %s } /* In word all paragraphs have zero margins unless explicitly specified in a style */ p, h1, h2, h3, h4, h5, h6, div { margin: 0; padding: 0 } /* In word headings only have bold font if explicitly specified, similarly the font size is the body font size, unless explicitly set. */ h1, h2, h3, h4, h5, h6 { font-weight: normal; font-size: 1rem } /* Setting padding-left to zero breaks rendering of lists, so we only set the other values to zero and leave padding-left for the user-agent */ ul, ol { margin: 0; padding-top: 0; padding-bottom: 0; padding-right: 0 } /* The word hyperlink styling will set text-decoration to underline if needed */ a { text-decoration: none } sup.noteref a { text-decoration: none } h1.notes-header { page-break-before: always } dl.footnote dt { font-size: large } dl.footnote dt a { text-decoration: none } ''' if not notes_nopb: s += '''\ dl.footnote { page-break-after: always } dl.footnote:last-of-type { page-break-after: avoid } ''' s = s + '''\ span.tab { white-space: pre } p.index-entry { text-indent: 0pt; } p.index-entry a:visited { color: blue } p.index-entry a:hover { color: red } ''' if nosupsub: s = s + '''\ sup { vertical-align: top } sub { vertical-align: bottom } ''' body_color = '' if self.body_color.lower() not in ('currentcolor', 'inherit'): body_color = 'color: {};'.format(self.body_color) prefix = textwrap.dedent(s) % (self.body_font_family, self.body_font_size, body_color) if ef: prefix = ef + '\n' + prefix ans = [] for (cls, css) in sorted(itervalues(self.classes), key=lambda x: x[0]): b = ('\t%s: %s;' % (k, v) for k, v in iteritems(css)) b = '\n'.join(b) ans.append('.%s {\n%s\n}\n' % (cls, b.rstrip(';'))) return prefix + '\n' + '\n'.join(ans)
def many_one(book_id_val_map, db, field, allow_case_change, *args): dirtied = set() m = field.metadata table = field.table dt = m['datatype'] is_custom_series = dt == 'series' and table.name.startswith('#') # Map values to db ids, including any new values kmap = safe_lower if dt in {'text', 'series'} else lambda x: x rid_map = { kmap(item): item_id for item_id, item in iteritems(table.id_map) } if len(rid_map) != len(table.id_map): # table has some entries that differ only in case, fix it table.fix_case_duplicates(db) rid_map = { kmap(item): item_id for item_id, item in iteritems(table.id_map) } val_map = {None: None} case_changes = {} for val in itervalues(book_id_val_map): if val is not None: get_db_id(val, db, m, table, kmap, rid_map, allow_case_change, case_changes, val_map) if case_changes: change_case(case_changes, dirtied, db, table, m) book_id_item_id_map = { k: val_map[v] for k, v in iteritems(book_id_val_map) } # Ignore those items whose value is the same as the current value book_id_item_id_map = { k: v for k, v in iteritems(book_id_item_id_map) if v != table.book_col_map.get(k, None) } dirtied |= set(book_id_item_id_map) # Update the book->col and col->book maps deleted = set() updated = {} for book_id, item_id in iteritems(book_id_item_id_map): old_item_id = table.book_col_map.get(book_id, None) if old_item_id is not None: table.col_book_map[old_item_id].discard(book_id) if item_id is None: table.book_col_map.pop(book_id, None) deleted.add(book_id) else: table.book_col_map[book_id] = item_id table.col_book_map[item_id].add(book_id) updated[book_id] = item_id # Update the db link table if deleted: db.executemany('DELETE FROM %s WHERE book=?' % table.link_table, ((k, ) for k in deleted)) if updated: sql = ( 'DELETE FROM {0} WHERE book=?; INSERT INTO {0}(book,{1},extra) VALUES(?, ?, 1.0)' if is_custom_series else 'DELETE FROM {0} WHERE book=?; INSERT INTO {0}(book,{1}) VALUES(?, ?)' ) db.executemany(sql.format(table.link_table, m['link_column']), ((book_id, book_id, item_id) for book_id, item_id in iteritems(updated))) # Remove no longer used items remove = { item_id for item_id in table.id_map if not table.col_book_map.get(item_id, False) } if remove: db.executemany('DELETE FROM %s WHERE id=?' % m['table'], ((item_id, ) for item_id in remove)) for item_id in remove: del table.id_map[item_id] table.col_book_map.pop(item_id, None) return dirtied
def merge_metadata_results(self, merge_on_identifiers=False): ''' Merge results with identical title and authors or an identical identifier ''' # First title/author groups = {} for result in self.results: title = lower(result.title if result.title else '') key = (title, tuple(lower(x) for x in result.authors)) if key not in groups: groups[key] = [] groups[key].append(result) if len(groups) != len(self.results): self.results = [] for rgroup in itervalues(groups): rel = [r.average_source_relevance for r in rgroup] if len(rgroup) > 1: result = self.merge(rgroup, None, do_asr=False) result.average_source_relevance = sum(rel) / len(rel) else: result = rgroup[0] self.results.append(result) if merge_on_identifiers: # Now identifiers groups, empty = {}, [] for result in self.results: key = set() for typ, val in iteritems(result.identifiers): if typ and val: key.add((typ, val)) if key: key = frozenset(key) match = None for candidate in list(groups): if candidate.intersection(key): # We have at least one identifier in common match = candidate.union(key) results = groups.pop(candidate) results.append(result) groups[match] = results break if match is None: groups[key] = [result] else: empty.append(result) if len(groups) != len(self.results): self.results = [] for rgroup in itervalues(groups): rel = [r.average_source_relevance for r in rgroup] if len(rgroup) > 1: result = self.merge(rgroup, None, do_asr=False) result.average_source_relevance = sum(rel) / len(rel) elif rgroup: result = rgroup[0] self.results.append(result) if empty: self.results.extend(empty) self.results.sort(key=attrgetter('average_source_relevance'))
def many_many(book_id_val_map, db, field, allow_case_change, *args): dirtied = set() m = field.metadata table = field.table dt = m['datatype'] is_authors = field.name == 'authors' # Map values to db ids, including any new values kmap = safe_lower if dt == 'text' else lambda x: x rid_map = { kmap(item): item_id for item_id, item in iteritems(table.id_map) } if len(rid_map) != len(table.id_map): # table has some entries that differ only in case, fix it table.fix_case_duplicates(db) rid_map = { kmap(item): item_id for item_id, item in iteritems(table.id_map) } val_map = {} case_changes = {} book_id_val_map = { k: uniq(vals, kmap) for k, vals in iteritems(book_id_val_map) } for vals in itervalues(book_id_val_map): for val in vals: get_db_id(val, db, m, table, kmap, rid_map, allow_case_change, case_changes, val_map, is_authors=is_authors) if case_changes: change_case(case_changes, dirtied, db, table, m, is_authors=is_authors) if is_authors: for item_id, val in iteritems(case_changes): for book_id in table.col_book_map[item_id]: current_sort = field.db_author_sort_for_book(book_id) new_sort = field.author_sort_for_book(book_id) if strcmp(current_sort, new_sort) == 0: # The sort strings differ only by case, update the db # sort field.author_sort_field.writer.set_books( {book_id: new_sort}, db) book_id_item_id_map = { k: tuple(val_map[v] for v in vals) for k, vals in iteritems(book_id_val_map) } # Ignore those items whose value is the same as the current value book_id_item_id_map = { k: v for k, v in iteritems(book_id_item_id_map) if v != table.book_col_map.get(k, None) } dirtied |= set(book_id_item_id_map) # Update the book->col and col->book maps deleted = set() updated = {} for book_id, item_ids in iteritems(book_id_item_id_map): old_item_ids = table.book_col_map.get(book_id, None) if old_item_ids: for old_item_id in old_item_ids: table.col_book_map[old_item_id].discard(book_id) if item_ids: table.book_col_map[book_id] = item_ids for item_id in item_ids: table.col_book_map[item_id].add(book_id) updated[book_id] = item_ids else: table.book_col_map.pop(book_id, None) deleted.add(book_id) # Update the db link table if deleted: db.executemany('DELETE FROM %s WHERE book=?' % table.link_table, ((k, ) for k in deleted)) if updated: vals = ((book_id, val) for book_id, vals in iteritems(updated) for val in vals) db.executemany('DELETE FROM %s WHERE book=?' % table.link_table, ((k, ) for k in updated)) db.executemany( 'INSERT INTO {0}(book,{1}) VALUES(?, ?)'.format( table.link_table, m['link_column']), vals) if is_authors: aus_map = { book_id: field.author_sort_for_book(book_id) for book_id in updated } field.author_sort_field.writer.set_books(aus_map, db) # Remove no longer used items remove = { item_id for item_id in table.id_map if not table.col_book_map.get(item_id, False) } if remove: db.executemany('DELETE FROM %s WHERE id=?' % m['table'], ((item_id, ) for item_id in remove)) for item_id in remove: del table.id_map[item_id] table.col_book_map.pop(item_id, None) if is_authors: table.asort_map.pop(item_id, None) table.alink_map.pop(item_id, None) return dirtied
def all_files(self): return (category.child(i) for category in itervalues(self.categories) for i in range(category.childCount()))