def apply_diff(self, identical_msg, cache, syntax_map, changed_names, renamed_names, removed_names, added_names): self.view.clear() self.apply_diff_calls = calls = [] def add(args, kwargs): self.view.add_diff(*args, **kwargs) calls.append((args, kwargs)) if len(changed_names) + len(renamed_names) + len(removed_names) + len(added_names) < 1: info_dialog(self, _('No changes found'), identical_msg, show=True) return True kwargs = lambda name: {'context':self.context, 'beautify':self.beautify, 'syntax':syntax_map.get(name, None)} if isinstance(changed_names, dict): for name, other_name in sorted(changed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])): args = (name, other_name, cache.left(name), cache.right(other_name)) add(args, kwargs(name)) else: for name in sorted(changed_names, key=numeric_sort_key): args = (name, name, cache.left(name), cache.right(name)) add(args, kwargs(name)) for name in sorted(added_names, key=numeric_sort_key): args = (_('[%s was added]') % name, name, None, cache.right(name)) add(args, kwargs(name)) for name in sorted(removed_names, key=numeric_sort_key): args = (name, _('[%s was removed]') % name, cache.left(name), None) add(args, kwargs(name)) for name, new_name in sorted(renamed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])): args = (name, new_name, None, None) add(args, kwargs(name))
def apply_diff(self, identical_msg, cache, syntax_map, changed_names, renamed_names, removed_names, added_names): self.view.clear() self.apply_diff_calls = calls = [] def add(args, kwargs): self.view.add_diff(*args, **kwargs) calls.append((args, kwargs)) if len(changed_names) + len(renamed_names) + len(removed_names) + len(added_names) < 1: self.busy.setVisible(False) info_dialog(self, _('No changes found'), identical_msg, show=True) self.busy.setVisible(True) return True kwargs = lambda name: {'context':self.context, 'beautify':self.beautify, 'syntax':syntax_map.get(name, None)} if isinstance(changed_names, dict): for name, other_name in sorted(changed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])): args = (name, other_name, cache.left(name), cache.right(other_name)) add(args, kwargs(name)) else: for name in sorted(changed_names, key=numeric_sort_key): args = (name, name, cache.left(name), cache.right(name)) add(args, kwargs(name)) for name in sorted(added_names, key=numeric_sort_key): args = (_('[%s was added]') % name, name, None, cache.right(name)) add(args, kwargs(name)) for name in sorted(removed_names, key=numeric_sort_key): args = (name, _('[%s was removed]') % name, cache.left(name), None) add(args, kwargs(name)) for name, new_name in sorted(renamed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])): args = (name, new_name, None, None) add(args, kwargs(name))
def serialize(self, styles): for style in sorted( self.block_styles, key=lambda s: (s is not self.normal_block_style, numeric_sort_key(s.id))): style.serialize(styles, self.normal_block_style) for style in sorted( self.text_styles, key=lambda s: (s is not self.normal_text_style, numeric_sort_key(s.id))): style.serialize(styles, self.normal_text_style)
def test_sorting(self): " Test the various sorting APIs " german = """Sonntag Montag Dienstag Januar Februar März Fuße Fluße Flusse flusse fluße flüße flüsse""".split() german_good = ( """Dienstag Februar flusse Flusse fluße Fluße flüsse flüße Fuße Januar März Montag Sonntag""".split() ) french = """dimanche lundi mardi janvier février mars déjà Meme deja même dejà bpef bœg Boef Mémé bœf boef bnef pêche pèché pêché pêche pêché""".split() french_good = """bnef boef Boef bœf bœg bpef deja dejà déjà dimanche février janvier lundi mardi mars Meme Mémé même pèché pêche pêche pêché pêché""".split() # noqa # Test corner cases sort_key = icu.sort_key s = "\U0001f431" self.ae( sort_key(s), sort_key(s.encode(sys.getdefaultencoding())), "UTF-8 encoded object not correctly decoded to generate sort key", ) self.ae(s.encode("utf-16"), s.encode("utf-16"), "Undecodable bytestring not returned as itself") self.ae(b"", sort_key(None)) self.ae(0, icu.strcmp(None, b"")) self.ae(0, icu.strcmp(s, s.encode(sys.getdefaultencoding()))) # Test locales with make_collation_func("dsk", "de", func="sort_key") as dsk: self.ae(german_good, sorted(german, key=dsk)) with make_collation_func("dcmp", "de", template="_strcmp_template") as dcmp: for x in german: for y in german: self.ae(cmp(dsk(x), dsk(y)), dcmp(x, y)) with make_collation_func("fsk", "fr", func="sort_key") as fsk: self.ae(french_good, sorted(french, key=fsk)) with make_collation_func("fcmp", "fr", template="_strcmp_template") as fcmp: for x in french: for y in french: self.ae(cmp(fsk(x), fsk(y)), fcmp(x, y)) with make_collation_func("ssk", "es", func="sort_key") as ssk: self.assertNotEqual(ssk("peña"), ssk("pena")) with make_collation_func("scmp", "es", template="_strcmp_template") as scmp: self.assertNotEqual(0, scmp("pena", "peña")) for k, v in {"pèché": "peche", "flüße": "Flusse", "Štepánek": "ŠtepaneK"}.iteritems(): self.ae(0, icu.primary_strcmp(k, v)) # Test different types of collation self.ae(icu.primary_sort_key("Aä"), icu.primary_sort_key("aa")) self.assertLess(icu.numeric_sort_key("something 2"), icu.numeric_sort_key("something 11")) self.assertLess(icu.case_sensitive_sort_key("A"), icu.case_sensitive_sort_key("a")) self.ae(0, icu.strcmp("a", "A")) self.ae(cmp("a", "A"), icu.case_sensitive_strcmp("a", "A")) self.ae(0, icu.primary_strcmp("ä", "A"))
def setup_export_panel(self): self.export_panel = w = QWidget(self) self.stack.addWidget(w) w.l = l = QVBoxLayout(w) w.la = la = QLabel( _('Select which libraries you want to export below')) la.setWordWrap(True), l.addWidget(la) self.lib_list = ll = QListWidget(self) l.addWidget(ll) ll.setSelectionMode(QAbstractItemView.SelectionMode.ExtendedSelection) ll.setStyleSheet('QListView::item { padding: 5px }') ll.setAlternatingRowColors(True) lpaths = all_known_libraries() for lpath in sorted( lpaths, key=lambda x: numeric_sort_key(os.path.basename(x))): i = QListWidgetItem(self.export_lib_text(lpath), ll) i.setData(Qt.ItemDataRole.UserRole, lpath) i.setData(Qt.ItemDataRole.UserRole + 1, lpaths[lpath]) i.setIcon(QIcon(I('lt.png'))) i.setSelected(True) self.update_disk_usage.connect( (lambda i, sz: self.lib_list.item(i).setText( self.export_lib_text( self.lib_list.item(i).data(Qt.ItemDataRole.UserRole), sz)) ), type=Qt.ConnectionType.QueuedConnection)
def flatten_spine(self): names = defaultdict(int) styles, pseudo_styles = {}, defaultdict(dict) for item in self.items: html = item.data stylizer = self.stylizers[item] if self.specializer is not None: self.specializer(item, stylizer) body = html.find(XHTML('body')) fsize = self.context.dest.fbase self.flatten_node(body, stylizer, names, styles, pseudo_styles, fsize, item.id) items = sorted(((key, val) for (val, key) in iteritems(styles)), key=lambda x: numeric_sort_key(x[0])) # :hover must come after link and :active must come after :hover psels = sorted(pseudo_styles, key=lambda x: { 'hover': 1, 'active': 2 }.get(x, 0)) for psel in psels: styles = pseudo_styles[psel] if not styles: continue x = sorted(((k + ':' + psel, v) for v, k in iteritems(styles))) items.extend(x) css = ''.join(".%s {\n%s;\n}\n\n" % (key, val) for key, val in items) href = self.replace_css(css) global_css = self.collect_global_css() for item in self.items: stylizer = self.stylizers[item] self.flatten_head(item, href, global_css[item])
def find_pages(dir, sort_on_mtime=False, verbose=False): ''' Find valid comic pages in a previously un-archived comic. :param dir: Directory in which extracted comic lives :param sort_on_mtime: If True sort pages based on their last modified time. Otherwise, sort alphabetically. ''' extensions = {'jpeg', 'jpg', 'gif', 'png', 'webp'} pages = [] for datum in os.walk(dir): for name in datum[-1]: path = os.path.abspath(os.path.join(datum[0], name)) if '__MACOSX' in path: continue for ext in extensions: if path.lower().endswith('.'+ext): pages.append(path) break sep_counts = {x.replace(os.sep, '/').count('/') for x in pages} # Use the full path to sort unless the files are in folders of different # levels, in which case simply use the filenames. basename = os.path.basename if len(sep_counts) > 1 else lambda x: x if sort_on_mtime: key = lambda x:os.stat(x).st_mtime else: key = lambda x:numeric_sort_key(basename(x)) pages.sort(key=key) if verbose: prints('Found comic pages...') prints('\t'+'\n\t'.join([os.path.relpath(p, dir) for p in pages])) return pages
def find_pages(dir, sort_on_mtime=False, verbose=False): ''' Find valid comic pages in a previously un-archived comic. :param dir: Directory in which extracted comic lives :param sort_on_mtime: If True sort pages based on their last modified time. Otherwise, sort alphabetically. ''' extensions = {'jpeg', 'jpg', 'gif', 'png', 'webp'} pages = [] for datum in os.walk(dir): for name in datum[-1]: path = os.path.join(datum[0], name) if '__MACOSX' in path: continue for ext in extensions: if path.lower().endswith('.'+ext): pages.append(path) break if sort_on_mtime: key = lambda x:os.stat(x).st_mtime else: key = lambda x:numeric_sort_key(os.path.basename(x)) pages.sort(key=key) if verbose: prints('Found comic pages...') prints('\t'+'\n\t'.join([os.path.basename(p) for p in pages])) return pages
def find_pages(dir, sort_on_mtime=False, verbose=False): ''' Find valid comic pages in a previously un-archived comic. :param dir: Directory in which extracted comic lives :param sort_on_mtime: If True sort pages based on their last modified time. Otherwise, sort alphabetically. ''' extensions = {'jpeg', 'jpg', 'gif', 'png', 'webp'} pages = [] for datum in os.walk(dir): for name in datum[-1]: path = os.path.abspath(os.path.join(datum[0], name)) if '__MACOSX' in path: continue for ext in extensions: if path.lower().endswith('.' + ext): pages.append(path) break sep_counts = {x.replace(os.sep, '/').count('/') for x in pages} # Use the full path to sort unless the files are in folders of different # levels, in which case simply use the filenames. basename = os.path.basename if len(sep_counts) > 1 else lambda x: x if sort_on_mtime: key = lambda x: os.stat(x).st_mtime else: key = lambda x: numeric_sort_key(basename(x)) pages.sort(key=key) if verbose: prints('Found comic pages...') prints('\t' + '\n\t'.join([os.path.relpath(p, dir) for p in pages])) return pages
def test_sorting(self): ' Test the various sorting APIs ' german = '''Sonntag Montag Dienstag Januar Februar März Fuße Fluße Flusse flusse fluße flüße flüsse'''.split() german_good = '''Dienstag Februar flusse Flusse fluße Fluße flüsse flüße Fuße Januar März Montag Sonntag'''.split() french = '''dimanche lundi mardi janvier février mars déjà Meme deja même dejà bpef bœg Boef Mémé bœf boef bnef pêche pèché pêché pêche pêché'''.split() french_good = '''bnef boef Boef bœf bœg bpef deja dejà déjà dimanche février janvier lundi mardi mars Meme Mémé même pèché pêche pêche pêché pêché'''.split() # noqa # Test corner cases sort_key = icu.sort_key s = '\U0001f431' self.ae(sort_key(s), sort_key(s.encode(sys.getdefaultencoding())), 'UTF-8 encoded object not correctly decoded to generate sort key') self.ae(s.encode('utf-16'), s.encode('utf-16'), 'Undecodable bytestring not returned as itself') self.ae(b'', sort_key(None)) self.ae(0, icu.strcmp(None, b'')) self.ae(0, icu.strcmp(s, s.encode(sys.getdefaultencoding()))) # Test locales with make_collation_func('dsk', 'de', func='sort_key') as dsk: self.ae(german_good, sorted(german, key=dsk)) with make_collation_func('dcmp', 'de', template='_strcmp_template') as dcmp: for x in german: for y in german: self.ae(cmp(dsk(x), dsk(y)), dcmp(x, y)) with make_collation_func('fsk', 'fr', func='sort_key') as fsk: self.ae(french_good, sorted(french, key=fsk)) with make_collation_func('fcmp', 'fr', template='_strcmp_template') as fcmp: for x in french: for y in french: self.ae(cmp(fsk(x), fsk(y)), fcmp(x, y)) with make_collation_func('ssk', 'es', func='sort_key') as ssk: self.assertNotEqual(ssk('peña'), ssk('pena')) with make_collation_func('scmp', 'es', template='_strcmp_template') as scmp: self.assertNotEqual(0, scmp('pena', 'peña')) for k, v in {u'pèché': u'peche', u'flüße':u'Flusse', u'Štepánek':u'ŠtepaneK'}.iteritems(): self.ae(0, icu.primary_strcmp(k, v)) # Test different types of collation self.ae(icu.primary_sort_key('Aä'), icu.primary_sort_key('aa')) self.assertLess(icu.numeric_sort_key('something 2'), icu.numeric_sort_key('something 11')) self.assertLess(icu.case_sensitive_sort_key('A'), icu.case_sensitive_sort_key('a')) self.ae(0, icu.strcmp('a', 'A')) self.ae(cmp('a', 'A'), icu.case_sensitive_strcmp('a', 'A')) self.ae(0, icu.primary_strcmp('ä', 'A'))
def test_sorting(self): ' Test the various sorting APIs ' german = '''Sonntag Montag Dienstag Januar Februar März Fuße Fluße Flusse flusse fluße flüße flüsse'''.split() german_good = '''Dienstag Februar flusse Flusse fluße Fluße flüsse flüße Fuße Januar März Montag Sonntag'''.split() french = '''dimanche lundi mardi janvier février mars déjà Meme deja même dejà bpef bœg Boef Mémé bœf boef bnef pêche pèché pêché pêche pêché'''.split() french_good = '''bnef boef Boef bœf bœg bpef deja dejà déjà dimanche février janvier lundi mardi mars Meme Mémé même pèché pêche pêche pêché pêché'''.split() # noqa # Test corner cases sort_key = icu.sort_key s = '\U0001f431' self.ae(sort_key(s), sort_key(s.encode(sys.getdefaultencoding())), 'UTF-8 encoded object not correctly decoded to generate sort key') self.ae(s.encode('utf-16'), s.encode('utf-16'), 'Undecodable bytestring not returned as itself') self.ae(b'', sort_key(None)) self.ae(0, icu.strcmp(None, b'')) self.ae(0, icu.strcmp(s, s.encode(sys.getdefaultencoding()))) # Test locales with make_collation_func('dsk', 'de', func='sort_key') as dsk: self.ae(german_good, sorted(german, key=dsk)) with make_collation_func('dcmp', 'de', template='_strcmp_template') as dcmp: for x in german: for y in german: self.ae(cmp(dsk(x), dsk(y)), dcmp(x, y)) with make_collation_func('fsk', 'fr', func='sort_key') as fsk: self.ae(french_good, sorted(french, key=fsk)) with make_collation_func('fcmp', 'fr', template='_strcmp_template') as fcmp: for x in french: for y in french: self.ae(cmp(fsk(x), fsk(y)), fcmp(x, y)) with make_collation_func('ssk', 'es', func='sort_key') as ssk: self.assertNotEqual(ssk('peña'), ssk('pena')) with make_collation_func('scmp', 'es', template='_strcmp_template') as scmp: self.assertNotEqual(0, scmp('pena', 'peña')) for k, v in iteritems({u'pèché': u'peche', u'flüße':u'Flusse', u'Štepánek':u'ŠtepaneK'}): self.ae(0, icu.primary_strcmp(k, v)) # Test different types of collation self.ae(icu.primary_sort_key('Aä'), icu.primary_sort_key('aa')) self.assertLess(icu.numeric_sort_key('something 2'), icu.numeric_sort_key('something 11')) self.assertLess(icu.case_sensitive_sort_key('A'), icu.case_sensitive_sort_key('a')) self.ae(0, icu.strcmp('a', 'A')) self.ae(cmp('a', 'A'), icu.case_sensitive_strcmp('a', 'A')) self.ae(0, icu.primary_strcmp('ä', 'A'))
def resort(self): if self.sort_alphabetically.isChecked(): sorted_locations = sorted(self.locations, key=lambda (name, loc): numeric_sort_key(name)) else: sorted_locations = self.locations self.items.clear() for name, loc in sorted_locations: i = QListWidgetItem(name, self.items) i.setData(Qt.UserRole, loc) self.items.setCurrentRow(0)
def resort(self): if self.sort_alphabetically.isChecked(): sorted_locations = sorted(self.locations, key=lambda name_loc: numeric_sort_key(name_loc[0])) else: sorted_locations = self.locations self.items.clear() for name, loc in sorted_locations: i = QListWidgetItem(name, self.items) i.setData(Qt.UserRole, loc) self.items.setCurrentRow(0)
def setup_select_libraries_panel(self): self.imported_lib_widgets = [] self.frames = [] l = self.slp.layout() for lpath in sorted(self.importer.metadata['libraries'], key=lambda x:numeric_sort_key(os.path.basename(x))): f = QFrame(self) self.frames.append(f) l.addWidget(f) f.setFrameShape(f.HLine) w = ImportLocation(lpath, self.slp) l.addWidget(w) self.imported_lib_widgets.append(w) l.addStretch()
def populate_anchors(self, name): if name not in self.anchor_cache: from calibre.ebooks.oeb.base import XHTML_NS root = self.container.parsed(name) ac = self.anchor_cache[name] = [] for item in set(root.xpath('//*[@id]')) | set(root.xpath('//h:a[@name]', namespaces={'h':XHTML_NS})): frag = item.get('id', None) or item.get('name') if not frag: continue text = lead_text(item, num_words=4) ac.append((text, frag)) ac.sort(key=lambda text_frag: numeric_sort_key(text_frag[0])) self.anchor_names.model().set_names(self.anchor_cache[name]) self.update_target()
def set_display_name(name, item): if tprefs['file_list_shows_full_pathname']: text = name else: if name in processed: # We have an exact duplicate (can happen if there are # duplicates in the spine) item.setText(0, processed[name].text(0)) item.setText(1, processed[name].text(1)) return parts = name.split('/') text = parts.pop() while text in seen and parts: text = parts.pop() + '/' + text seen[text] = item item.setText(0, text) item.setText(1, as_hex_unicode(numeric_sort_key(text)))
def setup_export_panel(self): self.export_panel = w = QWidget(self) self.stack.addWidget(w) w.l = l = QVBoxLayout(w) w.la = la = QLabel(_('Select which libraries you want to export below')) la.setWordWrap(True), l.addWidget(la) self.lib_list = ll = QListWidget(self) l.addWidget(ll) ll.setSelectionMode(ll.ExtendedSelection) ll.setStyleSheet('QListView::item { padding: 5px }') ll.setAlternatingRowColors(True) lpaths = all_known_libraries() for lpath in sorted(lpaths, key=lambda x:numeric_sort_key(os.path.basename(x))): i = QListWidgetItem(self.export_lib_text(lpath), ll) i.setData(Qt.UserRole, lpath) i.setData(Qt.UserRole+1, lpaths[lpath]) i.setIcon(QIcon(I('lt.png'))) i.setSelected(True) self.update_disk_usage.connect(( lambda i, sz: self.lib_list.item(i).setText(self.export_lib_text(self.lib_list.item(i).data(Qt.UserRole), sz))), type=Qt.QueuedConnection)
def css_data(container, book_locale, result_data, *args): import tinycss from tinycss.css21 import RuleSet, ImportRule def css_rules(file_name, rules, sourceline=0): ans = [] for rule in rules: if isinstance(rule, RuleSet): selector = rule.selector.as_css() ans.append(CSSRule(selector, RuleLocation(file_name, sourceline + rule.line, rule.column))) elif isinstance(rule, ImportRule): import_name = safe_href_to_name(container, rule.uri, file_name) if import_name and container.exists(import_name): ans.append(import_name) elif getattr(rule, 'rules', False): ans.extend(css_rules(file_name, rule.rules, sourceline)) return ans parser = tinycss.make_full_parser() importable_sheets = {} html_sheets = {} spine_names = {name for name, is_linear in container.spine_names} style_path, link_path = XPath('//h:style'), XPath('//h:link/@href') for name, mt in container.mime_map.iteritems(): if mt in OEB_STYLES: importable_sheets[name] = css_rules(name, parser.parse_stylesheet(container.raw_data(name)).rules) elif mt in OEB_DOCS and name in spine_names: html_sheets[name] = [] for style in style_path(container.parsed(name)): if style.get('type', 'text/css') == 'text/css' and style.text: html_sheets[name].append( css_rules(name, parser.parse_stylesheet(force_unicode(style.text, 'utf-8')).rules, style.sourceline - 1)) rule_map = defaultdict(lambda : defaultdict(list)) def rules_in_sheet(sheet): for rule in sheet: if isinstance(rule, CSSRule): yield rule else: # @import rule isheet = importable_sheets.get(rule) if isheet is not None: for irule in rules_in_sheet(isheet): yield irule def sheets_for_html(name, root): for href in link_path(root): tname = safe_href_to_name(container, href, name) sheet = importable_sheets.get(tname) if sheet is not None: yield sheet tt_cache = {} def tag_text(elem): ans = tt_cache.get(elem) if ans is None: tag = elem.tag.rpartition('}')[-1] if elem.attrib: attribs = ' '.join('%s="%s"' % (k, prepare_string_for_xml(elem.get(k, ''), True)) for k in elem.keys()) return '<%s %s>' % (tag, attribs) ans = tt_cache[elem] = '<%s>' % tag def matches_for_selector(selector, select, class_map, rule): lsel = selector.lower() try: matches = tuple(select(selector)) except SelectorError: return () for elem in matches: for cls in elem.get('class', '').split(): if '.' + cls.lower() in lsel: class_map[cls][elem].append(rule) return (MatchLocation(tag_text(elem), elem.sourceline) for elem in matches) class_map = defaultdict(lambda : defaultdict(list)) for name, inline_sheets in html_sheets.iteritems(): root = container.parsed(name) cmap = defaultdict(lambda : defaultdict(list)) for elem in root.xpath('//*[@class]'): for cls in elem.get('class', '').split(): cmap[cls][elem] = [] select = Select(root, ignore_inappropriate_pseudo_classes=True) for sheet in chain(sheets_for_html(name, root), inline_sheets): for rule in rules_in_sheet(sheet): rule_map[rule][name].extend(matches_for_selector(rule.selector, select, cmap, rule)) for cls, elem_map in cmap.iteritems(): class_elements = class_map[cls][name] for elem, usage in elem_map.iteritems(): class_elements.append( ClassElement(name, elem.sourceline, elem.get('class'), tag_text(elem), tuple(usage))) result_data['classes'] = ans = [] for cls, name_map in class_map.iteritems(): la = tuple(ClassFileMatch(name, tuple(class_elements), numeric_sort_key(name)) for name, class_elements in name_map.iteritems() if class_elements) num_of_matches = sum(sum(len(ce.matched_rules) for ce in cfm.class_elements) for cfm in la) ans.append(ClassEntry(cls, num_of_matches, la, numeric_sort_key(cls))) ans = [] for rule, loc_map in rule_map.iteritems(): la = tuple(CSSFileMatch(name, tuple(locations), numeric_sort_key(name)) for name, locations in loc_map.iteritems() if locations) count = sum(len(fm.locations) for fm in la) ans.append(CSSEntry(rule, count, la, numeric_sort_key(rule.selector))) return ans
def sort_key(name): return nmap.get(name, len(nmap)), numeric_sort_key(name)
def text_sort_key(x): return numeric_sort_key(str(x or ''))
def key(account_key): return numeric_sort_key(account_key)
def key(account_key): return numeric_sort_key( as_unicode(self.accounts[account_key][0]) or '')
def key(account_key): return numeric_sort_key(self.aliases.get(account_key) or '')
def css_data(container, book_locale, result_data, *args): import tinycss from tinycss.css21 import RuleSet, ImportRule def css_rules(file_name, rules, sourceline=0): ans = [] for rule in rules: if isinstance(rule, RuleSet): selector = rule.selector.as_css() ans.append( CSSRule( selector, RuleLocation(file_name, sourceline + rule.line, rule.column))) elif isinstance(rule, ImportRule): import_name = safe_href_to_name(container, rule.uri, file_name) if import_name and container.exists(import_name): ans.append(import_name) elif getattr(rule, 'rules', False): ans.extend(css_rules(file_name, rule.rules, sourceline)) return ans parser = tinycss.make_full_parser() importable_sheets = {} html_sheets = {} spine_names = {name for name, is_linear in container.spine_names} style_path, link_path = XPath('//h:style'), XPath('//h:link/@href') for name, mt in iteritems(container.mime_map): if mt in OEB_STYLES: importable_sheets[name] = css_rules( name, parser.parse_stylesheet(container.raw_data(name)).rules) elif mt in OEB_DOCS and name in spine_names: html_sheets[name] = [] for style in style_path(container.parsed(name)): if style.get('type', 'text/css') == 'text/css' and style.text: html_sheets[name].append( css_rules( name, parser.parse_stylesheet( force_unicode(style.text, 'utf-8')).rules, style.sourceline - 1)) rule_map = defaultdict(lambda: defaultdict(list)) def rules_in_sheet(sheet): for rule in sheet: if isinstance(rule, CSSRule): yield rule else: # @import rule isheet = importable_sheets.get(rule) if isheet is not None: for irule in rules_in_sheet(isheet): yield irule def sheets_for_html(name, root): for href in link_path(root): tname = safe_href_to_name(container, href, name) sheet = importable_sheets.get(tname) if sheet is not None: yield sheet tt_cache = {} def tag_text(elem): ans = tt_cache.get(elem) if ans is None: tag = elem.tag.rpartition('}')[-1] if elem.attrib: attribs = ' '.join( '%s="%s"' % (k, prepare_string_for_xml(elem.get(k, ''), True)) for k in elem.keys()) return '<%s %s>' % (tag, attribs) ans = tt_cache[elem] = '<%s>' % tag def matches_for_selector(selector, select, class_map, rule): lsel = selector.lower() try: matches = tuple(select(selector)) except SelectorError: return () for elem in matches: for cls in elem.get('class', '').split(): if '.' + cls.lower() in lsel: class_map[cls][elem].append(rule) return (MatchLocation(tag_text(elem), elem.sourceline) for elem in matches) class_map = defaultdict(lambda: defaultdict(list)) for name, inline_sheets in iteritems(html_sheets): root = container.parsed(name) cmap = defaultdict(lambda: defaultdict(list)) for elem in root.xpath('//*[@class]'): for cls in elem.get('class', '').split(): cmap[cls][elem] = [] select = Select(root, ignore_inappropriate_pseudo_classes=True) for sheet in chain(sheets_for_html(name, root), inline_sheets): for rule in rules_in_sheet(sheet): rule_map[rule][name].extend( matches_for_selector(rule.selector, select, cmap, rule)) for cls, elem_map in iteritems(cmap): class_elements = class_map[cls][name] for elem, usage in iteritems(elem_map): class_elements.append( ClassElement(name, elem.sourceline, elem.get('class'), tag_text(elem), tuple(usage))) result_data['classes'] = ans = [] for cls, name_map in iteritems(class_map): la = tuple( ClassFileMatch(name, tuple(class_elements), numeric_sort_key(name)) for name, class_elements in iteritems(name_map) if class_elements) num_of_matches = sum( sum(len(ce.matched_rules) for ce in cfm.class_elements) for cfm in la) ans.append(ClassEntry(cls, num_of_matches, la, numeric_sort_key(cls))) ans = [] for rule, loc_map in iteritems(rule_map): la = tuple( CSSFileMatch(name, tuple(locations), numeric_sort_key(name)) for name, locations in iteritems(loc_map) if locations) count = sum(len(fm.locations) for fm in la) ans.append(CSSEntry(rule, count, la, numeric_sort_key(rule.selector))) return ans
def key(account_key): return numeric_sort_key(self.tags.get(account_key) or '')
def serialize(self, styles): for style in sorted(self.block_styles, key=lambda s:(s is not self.normal_block_style, numeric_sort_key(s.id))): style.serialize(styles, self.normal_block_style) for style in sorted(self.text_styles, key=lambda s:(s is not self.normal_text_style, numeric_sort_key(s.id))): style.serialize(styles, self.normal_text_style)
def key(account_key): return numeric_sort_key(self.subjects.get(account_key) or '')
def sort_key(filename): bn, ext = filename.rpartition('.')[::2] if not bn and ext: bn, ext = ext, bn return (numeric_sort_key(bn), numeric_sort_key(ext))
def sort_key(l): return (nmap.get(l.name, len(nmap)), numeric_sort_key(l.name), l.line_number)
def key(account_key): return numeric_sort_key(type(u'')(self.accounts[account_key][0]) or '')
def text_sort_key(x): return numeric_sort_key(unicode_type(x or ''))
def flatten_spine(self): names = defaultdict(int) styles, pseudo_styles = {}, defaultdict(dict) for item in self.items: html = item.data stylizer = self.stylizers[item] if self.specializer is not None: self.specializer(item, stylizer) body = html.find(XHTML('body')) fsize = self.context.dest.fbase self.flatten_node(body, stylizer, names, styles, pseudo_styles, fsize, item.id) items = sorted(((key, val) for (val, key) in iteritems(styles)), key=lambda x:numeric_sort_key(x[0])) # :hover must come after link and :active must come after :hover psels = sorted(pseudo_styles, key=lambda x : {'hover':1, 'active':2}.get(x, 0)) for psel in psels: styles = pseudo_styles[psel] if not styles: continue x = sorted(((k+':'+psel, v) for v, k in iteritems(styles))) items.extend(x) css = ''.join(".%s {\n%s;\n}\n\n" % (key, val) for key, val in items) href = self.replace_css(css) global_css = self.collect_global_css() for item in self.items: stylizer = self.stylizers[item] self.flatten_head(item, href, global_css[item])
def key(account_key): return numeric_sort_key(self.accounts[account_key][0] or '')
def text_sort_key(x): return numeric_sort_key(unicode(x or ''))
def key(account_key): return numeric_sort_key( type(u'')(self.accounts[account_key][0]) or '')
def css_data(container, book_locale): import tinycss from tinycss.css21 import RuleSet, ImportRule def css_rules(file_name, rules, sourceline=0): ans = [] for rule in rules: if isinstance(rule, RuleSet): selector = rule.selector.as_css() ans.append(CSSRule(selector, RuleLocation(file_name, sourceline + rule.line, rule.column))) elif isinstance(rule, ImportRule): import_name = safe_href_to_name(container, rule.uri, file_name) if import_name and container.exists(import_name): ans.append(import_name) elif getattr(rule, 'rules', False): ans.extend(css_rules(file_name, rule.rules, sourceline)) return ans parser = tinycss.make_full_parser() importable_sheets = {} html_sheets = {} spine_names = {name for name, is_linear in container.spine_names} style_path, link_path = XPath('//h:style'), XPath('//h:link/@href') for name, mt in container.mime_map.iteritems(): if mt in OEB_STYLES: importable_sheets[name] = css_rules(name, parser.parse_stylesheet(container.raw_data(name)).rules) elif mt in OEB_DOCS and name in spine_names: html_sheets[name] = [] for style in style_path(container.parsed(name)): if style.get('type', 'text/css') == 'text/css' and style.text: html_sheets[name].append( css_rules(name, parser.parse_stylesheet(force_unicode(style.text, 'utf-8')).rules, style.sourceline - 1)) rule_map = defaultdict(lambda : defaultdict(list)) pseudo_pat = re.compile(PSEUDO_PAT, re.I) cache = {} def rules_in_sheet(sheet): for rule in sheet: if isinstance(rule, CSSRule): yield rule else: # @import rule isheet = importable_sheets.get(rule) if isheet is not None: for irule in rules_in_sheet(isheet): yield irule def sheets_for_html(name, root): for href in link_path(root): tname = safe_href_to_name(container, href, name) sheet = importable_sheets.get(tname) if sheet is not None: yield sheet def tag_text(elem): tag = elem.tag.rpartition('}')[-1] if elem.attrib: attribs = ' '.join('%s="%s"' % (k, prepare_string_for_xml(elem.get(k, ''), True)) for k in elem.keys()) return '<%s %s>' % (tag, attribs) return '<%s>' % tag def matches_for_selector(selector, root): selector = pseudo_pat.sub('', selector) selector = MIN_SPACE_RE.sub(r'\1', selector) try: xp = cache[(True, selector)] except KeyError: xp = cache[(True, selector)] = build_selector(selector) try: matches = xp(root) except Exception: return () if not matches: try: xp = cache[(False, selector)] except KeyError: xp = cache[(False, selector)] = build_selector(selector, case_sensitive=False) try: matches = xp(root) except Exception: return () return (MatchLocation(tag_text(elem), elem.sourceline) for elem in matches) for name, inline_sheets in html_sheets.iteritems(): root = container.parsed(name) for sheet in chain(sheets_for_html(name, root), inline_sheets): for rule in rules_in_sheet(sheet): rule_map[rule][name].extend(matches_for_selector(rule.selector, root)) ans = [] for rule, loc_map in rule_map.iteritems(): la = tuple(CSSFileMatch(name, tuple(locations), numeric_sort_key(name)) for name, locations in loc_map.iteritems() if locations) count = sum(len(fm.locations) for fm in la) ans.append(CSSEntry(rule, count, la, numeric_sort_key(rule.selector))) return ans
def key(account_key): return numeric_sort_key(as_unicode(self.accounts[account_key][0]) or '')