コード例 #1
0
ファイル: normalize_css.py プロジェクト: artbycrunk/calibre
def normalize_edge(name, cssvalue):
    style = {}
    if isinstance(cssvalue, PropertyValue):
        primitives = [v.cssText for v in cssvalue]
    else:
        primitives = [cssvalue.cssText]
    if len(primitives) == 1:
        value, = primitives
        values = (value, value, value, value)
    elif len(primitives) == 2:
        vert, horiz = primitives
        values = (vert, horiz, vert, horiz)
    elif len(primitives) == 3:
        top, horiz, bottom = primitives
        values = (top, horiz, bottom, horiz)
    else:
        values = primitives[:4]
    if '-' in name:
        l, _, r = name.partition('-')
        for edge, value in zip(EDGES, values):
            style['%s-%s-%s' % (l, edge, r)] = value
    else:
        for edge, value in zip(EDGES, values):
            style['%s-%s' % (name, edge)] = value
    return style
コード例 #2
0
ファイル: engine.py プロジェクト: j-howell/calibre
    def drawTextItem(self, point, text_item):
        # return super(PdfEngine, self).drawTextItem(point, text_item)
        self.apply_graphics_state()
        gi = GlyphInfo(*self.qt_hack.get_glyphs(point, text_item))
        if not gi.indices:
            return
        metrics = self.fonts.get(gi.name)
        if metrics is None:
            from calibre.utils.fonts.utils import get_all_font_names
            try:
                names = get_all_font_names(gi.name, True)
                names = ' '.join('%s=%s'%(k, names[k]) for k in sorted(names))
            except Exception:
                names = 'Unknown'
            self.debug('Loading font: %s' % names)
            try:
                self.fonts[gi.name] = metrics = self.create_sfnt(text_item)
            except UnsupportedFont:
                self.debug('Failed to load font: %s, drawing text as outlines...' % names)
                return super(PdfEngine, self).drawTextItem(point, text_item)
        indices, positions = [], []
        ignore_glyphs = metrics.ignore_glyphs
        for glyph_id, gpos in zip(gi.indices, gi.positions):
            if glyph_id not in ignore_glyphs:
                indices.append(glyph_id), positions.append(gpos)
        for glyph_id in indices:
            try:
                metrics.glyph_map[glyph_id] = metrics.full_glyph_map[glyph_id]
            except (KeyError, ValueError):
                pass
        glyphs = []
        last_x = last_y = 0
        for glyph_index, (x, y) in zip(indices, positions):
            glyphs.append((x-last_x, last_y - y, glyph_index))
            last_x, last_y = x, y

        if not self.content_written_to_current_page:
            dy = self.graphics.current_state.transform.dy()
            ypositions = [y + dy for x, y in positions]
            miny = min(ypositions or (0,))
            maxy = max(ypositions or (self.pixel_height,))
            page_top = self.header_height if self.has_headers else 0
            page_bottom = self.pixel_height - (self.footer_height if self.has_footers else 0)
            if page_top <= miny <= page_bottom or page_top <= maxy <= page_bottom:
                self.content_written_to_current_page = 'drawTextItem'
            else:
                self.debug('Text in header/footer: miny=%s maxy=%s page_top=%s page_bottom=%s'% (
                    miny, maxy, page_top, page_bottom))
        self.pdf.draw_glyph_run([gi.stretch, 0, 0, -1, 0, 0], gi.size, metrics,
                                glyphs)
コード例 #3
0
ファイル: driver.py プロジェクト: j-howell/calibre
    def upload_books(self, files, names, on_card=None, end_session=True,
                     metadata=None):
        debug_print('USBMS: uploading %d books'%(len(files)))

        path = self._sanity_check(on_card, files)

        paths = []
        names = iter(names)
        metadata = iter(metadata)

        for i, infile in enumerate(files):
            mdata, fname = next(metadata), next(names)
            filepath = self.normalize_path(self.create_upload_path(path, mdata, fname))
            if not hasattr(infile, 'read'):
                infile = self.normalize_path(infile)
            filepath = self.put_file(infile, filepath, replace_file=True)
            paths.append(filepath)
            try:
                self.upload_cover(os.path.dirname(filepath),
                                  os.path.splitext(os.path.basename(filepath))[0],
                                  mdata, filepath)
            except:  # Failure to upload cover is not catastrophic
                import traceback
                traceback.print_exc()

            self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))

        self.report_progress(1.0, _('Transferring books to device...'))
        debug_print('USBMS: finished uploading %d books'%(len(files)))
        return list(zip(paths, cycle([on_card])))
コード例 #4
0
ファイル: driver.py プロジェクト: JimmXinu/calibre
 def filesystem_cache(self):
     if self._filesystem_cache is None:
         debug('Loading filesystem metadata...')
         st = time.time()
         from calibre.devices.mtp.filesystem_cache import FilesystemCache
         ts = self.total_space()
         all_storage = []
         items = []
         for storage_id, capacity in zip([self._main_id, self._carda_id,
             self._cardb_id], ts):
             if storage_id is None:
                 continue
             name = _('Unknown')
             for s in self.dev.data['storage']:
                 if s['id'] == storage_id:
                     name = s['name']
                     break
             storage = {'id':storage_id, 'size':capacity, 'name':name,
                     'is_folder':True, 'can_delete':False, 'is_system':True}
             self._currently_getting_sid = unicode_type(storage_id)
             id_map = self.dev.get_filesystem(storage_id, partial(
                     self._filesystem_callback, {}))
             for x in itervalues(id_map):
                 x['storage_id'] = storage_id
             all_storage.append(storage)
             items.append(itervalues(id_map))
         self._filesystem_cache = FilesystemCache(all_storage, chain(*items))
         debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
             time.time()-st, len(self._filesystem_cache)))
     return self._filesystem_cache
コード例 #5
0
ファイル: head.py プロジェクト: JimmXinu/calibre
    def __init__(self, *args, **kwargs):
        super(HeadTable, self).__init__(*args, **kwargs)

        field_types = (
                '_version_number' , 'l',
                '_font_revision'  , 'l',
                'checksum_adjustment' , 'L',
                'magic_number' , 'L',
                'flags' , 'H',
                'units_per_em' , 'H',
                '_created' , 'q',
                '_modified' , 'q',
                'x_min' , 'h',
                'y_min' , 'h',
                'x_max' , 'h',
                'y_max' , 'h',
                'mac_style' , 'H',
                'lowest_rec_ppem' , 'H',
                'font_direction_hint' , 'h',
                'index_to_loc_format' , 'h',
                'glyph_data_format'   , 'h'
        )

        self._fmt = ('>%s'%(''.join(field_types[1::2]))).encode('ascii')
        self._fields = field_types[0::2]

        for f, val in zip(self._fields, unpack_from(self._fmt, self.raw)):
            setattr(self, f, val)
コード例 #6
0
ファイル: reader.py プロジェクト: j-howell/calibre
 def _toc_from_spine(self, opf):
     self.log.warn('Generating default TOC from spine...')
     toc = self.oeb.toc
     titles = []
     headers = []
     for item in self.oeb.spine:
         if not item.linear:
             continue
         html = item.data
         title = ''.join(xpath(html, '/h:html/h:head/h:title/text()'))
         title = COLLAPSE_RE.sub(' ', title.strip())
         if title:
             titles.append(title)
         headers.append('(unlabled)')
         for tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'strong'):
             expr = '/h:html/h:body//h:%s[position()=1]/text()'
             header = ''.join(xpath(html, expr % tag))
             header = COLLAPSE_RE.sub(' ', header.strip())
             if header:
                 headers[-1] = header
                 break
     use = titles
     if len(titles) > len(set(titles)):
         use = headers
     for title, item in zip(use, self.oeb.spine):
         if not item.linear:
             continue
         toc.add(title, item.href)
     return True
コード例 #7
0
ファイル: dbus_service.py プロジェクト: kovidgoyal/calibre
    def _reflect_on_method(cls, func):
        args = func._dbus_args

        if func._dbus_in_signature:
            # convert signature into a tuple so length refers to number of
            # types, not number of characters. the length is checked by
            # the decorator to make sure it matches the length of args.
            in_sig = tuple(Signature(func._dbus_in_signature))
        else:
            # magic iterator which returns as many v's as we need
            in_sig = _VariantSignature()

        if func._dbus_out_signature:
            out_sig = Signature(func._dbus_out_signature)
        else:
            # its tempting to default to Signature('v'), but
            # for methods that return nothing, providing incorrect
            # introspection data is worse than providing none at all
            out_sig = []

        reflection_data = '    <method name="%s">\n' % (func.__name__)
        for pair in zip(in_sig, args):
            reflection_data += '      <arg direction="in"  type="%s" name="%s" />\n' % pair
        for type in out_sig:
            reflection_data += '      <arg direction="out" type="%s" />\n' % type
        reflection_data += '    </method>\n'

        return reflection_data
コード例 #8
0
ファイル: index.py プロジェクト: j-howell/calibre
def parse_indx_header(data):
    check_signature(data, b'INDX')
    words = INDEX_HEADER_FIELDS
    num = len(words)
    values = struct.unpack('>%dL' % num, data[4:4*(num+1)])
    ans = dict(zip(words, values))
    ordt1, ordt2 = ans['ordt1'], ans['ordt2']
    ans['ordt1_raw'], ans['ordt2_raw'] = [], []
    ans['ordt_map'] = ''

    if ordt1 > 0 and data[ordt1:ordt1+4] == b'ORDT':
        # I dont know what this is, but using it seems to be unnecessary, so
        # just leave it as the raw bytestring
        ans['ordt1_raw'] = data[ordt1+4:ordt1+4+ans['oentries']]
    if ordt2 > 0 and data[ordt2:ordt2+4] == b'ORDT':
        ans['ordt2_raw'] = raw = bytearray(data[ordt2+4:ordt2+4+2*ans['oentries']])
        if ans['code'] == 65002:
            # This appears to be EBCDIC-UTF (65002) encoded. I can't be
            # bothered to write a decoder for this (see
            # http://www.unicode.org/reports/tr16/) Just how stupid is Amazon?
            # Instead, we use a weird hack that seems to do the trick for all
            # the books with this type of ORDT record that I have come across.
            # Some EBSP book samples in KF8 format from Amazon have this type
            # of encoding.
            # Basically we try to interpret every second byte as a printable
            # ascii character. If we cannot, we map to the ? char.

            parsed = bytearray(ans['oentries'])
            for i in range(0, 2*ans['oentries'], 2):
                parsed[i//2] = raw[i+1] if 0x20 < raw[i+1] < 0x7f else ord(b'?')
            ans['ordt_map'] = bytes(parsed).decode('ascii')
        else:
            ans['ordt_map'] = '?'*ans['oentries']

    return ans
コード例 #9
0
ファイル: parse.py プロジェクト: j-howell/calibre
    def parse_text_assertion(self, raw, ans):
        oraw = raw
        if not raw.startswith('['):
            return oraw
        raw = raw[1:]
        ta = {}
        m, raw = self.do_match(self.ta1_pat, raw)
        if m is not None:
            before, after = m.groups()
            ta['before'] = self.unescape(before)
            if after is not None:
                ta['after'] = self.unescape(after)
        else:
            m, raw = self.do_match(self.ta2_pat, raw)
            if m is not None:
                ta['after'] = self.unescape(m.group(1))

        # parse parameters
        m, raw = self.do_match(self.parameters_pat, raw)
        if m is not None:
            params = {}
            for name, value in zip(m.captures(1), m.captures(2)):
                params[name] = tuple(map(self.unescape, self.csv_pat.match(value).captures(1)))
            if params:
                ta['params'] = params

        if not raw.startswith(']'):
            return oraw  # no closing ] or extra content in the assertion

        if ta:
            ans['text_assertion'] = ta
        return raw[1:]
コード例 #10
0
ファイル: write.py プロジェクト: artbycrunk/calibre
def uniq(vals, kmap=lambda x:x):
    ''' Remove all duplicates from vals, while preserving order. kmap must be a
    callable that returns a hashable value for every item in vals '''
    vals = vals or ()
    lvals = (kmap(x) for x in vals)
    seen = set()
    seen_add = seen.add
    return tuple(x for x, k in zip(vals, lvals) if k not in seen and not seen_add(k))
コード例 #11
0
ファイル: profiles.py プロジェクト: j-howell/calibre
 def __init__(self, *args, **kwargs):
     _Plugin.__init__(self, *args, **kwargs)
     self.width, self.height = self.screen_size
     fsizes = list(self.fsizes)
     self.fkey = list(self.fsizes)
     self.fsizes = []
     for (name, num), size in zip(FONT_SIZES, fsizes):
         self.fsizes.append((name, num, float(size)))
     self.fnames = dict((name, sz) for name, _, sz in self.fsizes if name)
     self.fnums = dict((num, sz) for _, num, sz in self.fsizes if num)
     self.width_pts = self.width * 72./self.dpi
     self.height_pts = self.height * 72./self.dpi
コード例 #12
0
ファイル: mobi8.py プロジェクト: j-howell/calibre
 def __init__(self, raw):
     if raw[:4] != b'FDST':
         raise ValueError('KF8 does not have a valid FDST record')
     self.sec_off, self.num_sections = struct.unpack_from(b'>LL', raw, 4)
     if self.sec_off != 12:
         raise ValueError('FDST record has unknown extra fields')
     secf = b'>%dL' % (self.num_sections*2)
     secs = struct.unpack_from(secf, raw, self.sec_off)
     rest = raw[self.sec_off+struct.calcsize(secf):]
     if rest:
         raise ValueError('FDST record has trailing data: '
                 '%s'%format_bytes(rest))
     self.sections = tuple(zip(secs[::2], secs[1::2]))
コード例 #13
0
ファイル: view.py プロジェクト: j-howell/calibre
    def calculate_length(self):
        delta = 0
        line_number_changes = ([], [])
        for v, lmap, changes in zip((self.view.left, self.view.right), ({}, {}), line_number_changes):
            b = v.document().firstBlock()
            ebl = v.document().documentLayout().ensureBlockLayout
            last_line_count = 0
            while b.isValid():
                ebl(b)
                lmap[b.blockNumber()] = last_line_count
                last_line_count += b.layout().lineCount()
                b = b.next()
            for top, bot, kind in v.changes:
                changes.append((lmap[top], lmap[bot], kind))

        changes = []
        for (l_top, l_bot, kind), (r_top, r_bot, kind) in zip(*line_number_changes):
            height = max(l_bot - l_top, r_bot - r_top)
            top = delta + l_top
            changes.append((top, top + height, kind))
            delta = top + height - l_bot
        self.changes, self.delta = (changes,) + line_number_changes, delta
コード例 #14
0
ファイル: widgets.py プロジェクト: JimmXinu/calibre
 def __call__(self, results):
     if results:
         self.current_result = 0
         prefixes = [QStaticText('<b>%s</b>' % os.path.basename(x)) for x in results]
         [(p.setTextFormat(Qt.RichText), p.setTextOption(self.text_option)) for p in prefixes]
         self.maxwidth = max([x.size().width() for x in prefixes])
         self.results = tuple((prefix, self.make_text(text, positions), text)
             for prefix, (text, positions) in zip(prefixes, iteritems(results)))
     else:
         self.results = ()
         self.current_result = -1
     self.max_result = min(10, len(self.results) - 1)
     self.mouse_hover_result = -1
     self.update()
コード例 #15
0
ファイル: legacy.py プロジェクト: j-howell/calibre
 def add_books(self, paths, formats, metadata, add_duplicates=True, return_ids=False):
     books = [(mi, {fmt:path}) for mi, path, fmt in zip(metadata, paths, formats)]
     book_ids, duplicates = self.new_api.add_books(books, add_duplicates=add_duplicates, dbapi=self)
     if duplicates:
         paths, formats, metadata = [], [], []
         for mi, format_map in duplicates:
             metadata.append(mi)
             for fmt, path in iteritems(format_map):
                 formats.append(fmt)
                 paths.append(path)
         duplicates = (paths, formats, metadata)
     ids = book_ids if return_ids else len(book_ids)
     if book_ids:
         self.data.books_added(book_ids)
     return duplicates or None, ids
コード例 #16
0
ファイル: head.py プロジェクト: JimmXinu/calibre
    def read_data(self):
        if hasattr(self, 'char_width'):
            return
        ver, = unpack_from(b'>H', self.raw)
        field_types = [
            'version' , 'H',
            'average_char_width', 'h',
            'weight_class', 'H',
            'width_class', 'H',
            'fs_type', 'H',
            'subscript_x_size', 'h',
            'subscript_y_size', 'h',
            'subscript_x_offset', 'h',
            'subscript_y_offset', 'h',
            'superscript_x_size', 'h',
            'superscript_y_size', 'h',
            'superscript_x_offset', 'h',
            'superscript_y_offset', 'h',
            'strikeout_size', 'h',
            'strikeout_position', 'h',
            'family_class', 'h',
            'panose', '10s',
            'ranges', '16s',
            'vendor_id', '4s',
            'selection', 'H',
            'first_char_index', 'H',
            'last_char_index', 'H',
            'typo_ascender', 'h',
            'typo_descender', 'h',
            'typo_line_gap', 'h',
            'win_ascent', 'H',
            'win_descent', 'H',
        ]
        if ver > 1:
            field_types += [
                'code_page_range', '8s',
                'x_height', 'h',
                'cap_height', 'h',
                'default_char', 'H',
                'break_char', 'H',
                'max_context', 'H',
            ]

        self._fmt = ('>%s'%(''.join(field_types[1::2]))).encode('ascii')
        self._fields = field_types[0::2]

        for f, val in zip(self._fields, unpack_from(self._fmt, self.raw)):
            setattr(self, f, val)
コード例 #17
0
ファイル: dbus_service.py プロジェクト: kovidgoyal/calibre
    def _reflect_on_signal(cls, func):
        args = func._dbus_args

        if func._dbus_signature:
            # convert signature into a tuple so length refers to number of
            # types, not number of characters
            sig = tuple(Signature(func._dbus_signature))
        else:
            # magic iterator which returns as many v's as we need
            sig = _VariantSignature()

        reflection_data = '    <signal name="%s">\n' % (func.__name__)
        for pair in zip(sig, args):
            reflection_data = reflection_data + '      <arg type="%s" name="%s" />\n' % pair
        reflection_data = reflection_data + '    </signal>\n'

        return reflection_data
コード例 #18
0
ファイル: driver.py プロジェクト: JimmXinu/calibre
    def upload_books(self, files, names, on_card=None, end_session=True,
                     metadata=None):
        debug('upload_books() called')
        from calibre.devices.utils import sanity_check
        sanity_check(on_card, files, self.card_prefix(), self.free_space())
        prefix = self.prefix_for_location(on_card)
        sid = {'carda':self._carda_id, 'cardb':self._cardb_id}.get(on_card,
                self._main_id)
        bl_idx = {'carda':1, 'cardb':2}.get(on_card, 0)
        storage = self.filesystem_cache.storage(sid)

        ans = []
        self.report_progress(0, _('Transferring books to device...'))
        i, total = 0, len(files)

        routing = {fmt:dest for fmt,dest in self.get_pref('rules')}

        for infile, fname, mi in zip(files, names, metadata):
            path = self.create_upload_path(prefix, mi, fname, routing)
            if path and self.is_folder_ignored(storage, path):
                raise MTPInvalidSendPathError('/'.join(path))
            parent = self.ensure_parent(storage, path)
            if hasattr(infile, 'read'):
                pos = infile.tell()
                infile.seek(0, 2)
                sz = infile.tell()
                infile.seek(pos)
                stream = infile
                close = False
            else:
                sz = os.path.getsize(infile)
                stream = lopen(infile, 'rb')
                close = True
            try:
                mtp_file = self.put_file(parent, path[-1], stream, sz)
            finally:
                if close:
                    stream.close()
            ans.append((mtp_file, bl_idx))
            i += 1
            self.report_progress(i/total, _('Transferred %s to device')%mi.title)

        self.report_progress(1, _('Transfer to device finished...'))
        debug('upload_books() ended')
        return ans
コード例 #19
0
ファイル: epub_output.py プロジェクト: j-howell/calibre
    def encrypt_fonts(self, uris, tdir, uuid):  # {{{
        from polyglot.binary import from_hex_bytes

        key = re.sub(r'[^a-fA-F0-9]', '', uuid)
        if len(key) < 16:
            raise ValueError('UUID identifier %r is invalid'%uuid)
        key = bytearray(from_hex_bytes((key + key)[:32]))
        paths = []
        with CurrentDir(tdir):
            paths = [os.path.join(*x.split('/')) for x in uris]
            uris = dict(zip(uris, paths))
            fonts = []
            for uri in list(uris.keys()):
                path = uris[uri]
                if not os.path.exists(path):
                    uris.pop(uri)
                    continue
                self.log.debug('Encrypting font:', uri)
                with lopen(path, 'r+b') as f:
                    data = f.read(1024)
                    if len(data) >= 1024:
                        f.seek(0)
                        for i in range(1024):
                            f.write(chr(ord(data[i]) ^ key[i%16]))
                    else:
                        self.log.warn('Font', path, 'is invalid, ignoring')
                if not isinstance(uri, unicode_type):
                    uri = uri.decode('utf-8')
                fonts.append(u'''
                <enc:EncryptedData>
                    <enc:EncryptionMethod Algorithm="http://ns.adobe.com/pdf/enc#RC"/>
                    <enc:CipherData>
                    <enc:CipherReference URI="%s"/>
                    </enc:CipherData>
                </enc:EncryptedData>
                '''%(uri.replace('"', '\\"')))
            if fonts:
                ans = '''<encryption
                    xmlns="urn:oasis:names:tc:opendocument:xmlns:container"
                    xmlns:enc="http://www.w3.org/2001/04/xmlenc#"
                    xmlns:deenc="http://ns.adobe.com/digitaleditions/enc">
                    '''
                ans += (u'\n'.join(fonts)).encode('utf-8')
                ans += '\n</encryption>'
                return ans
コード例 #20
0
ファイル: view.py プロジェクト: j-howell/calibre
 def get_state(self):
     state_map = {}
     expanded_categories = []
     hide_empty_categories = self.model().prefs['tag_browser_hide_empty_categories']
     crmap = self._model.category_row_map()
     for category in self._model.category_nodes:
         if (category.category_key in self.hidden_categories or (
             hide_empty_categories and len(category.child_tags()) == 0)):
             continue
         row = crmap.get(category.category_key)
         if row is not None:
             index = self._model.index(row, 0, QModelIndex())
             if self.isExpanded(index):
                 expanded_categories.append(category.category_key)
         states = [c.tag.state for c in category.child_tags()]
         names = [(c.tag.name, c.tag.category) for c in category.child_tags()]
         state_map[category.category_key] = dict(zip(names, states))
     return expanded_categories, state_map
コード例 #21
0
ファイル: covers.py プロジェクト: artbycrunk/calibre
def calibre_cover2(title, author_string='', series_string='', prefs=None, as_qimage=False, logo_path=None):
    init_environment()
    title, subtitle, footer = '<b>' + escape_formatting(title), '<i>' + escape_formatting(series_string), '<b>' + escape_formatting(author_string)
    prefs = prefs or cprefs
    prefs = {k:prefs.get(k) for k in cprefs.defaults}
    scale = 800. / prefs['cover_height']
    scale_cover(prefs, scale)
    prefs = Prefs(**prefs)
    img = QImage(prefs.cover_width, prefs.cover_height, QImage.Format_ARGB32)
    img.fill(Qt.white)
    # colors = to_theme('ffffff ffffff 000000 000000')
    color_theme = theme_to_colors(fallback_colors)

    class CalibeLogoStyle(Style):
        NAME = GUI_NAME = 'calibre'

        def __call__(self, painter, rect, color_theme, title_block, subtitle_block, footer_block):
            top = title_block.position.y + 10
            extra_spacing = subtitle_block.line_spacing // 2 if subtitle_block.line_spacing else title_block.line_spacing // 3
            height = title_block.height + subtitle_block.height + extra_spacing + title_block.leading
            top += height + 25
            bottom = footer_block.position.y - 50
            logo = QImage(logo_path or I('library.png'))
            pwidth, pheight = rect.width(), bottom - top
            scaled, width, height = fit_image(logo.width(), logo.height(), pwidth, pheight)
            x, y = (pwidth - width) // 2, (pheight - height) // 2
            rect = QRect(x, top + y, width, height)
            painter.setRenderHint(QPainter.SmoothPixmapTransform)
            painter.drawImage(rect, logo)
            return self.ccolor1, self.ccolor1, self.ccolor1
    style = CalibeLogoStyle(color_theme, prefs)
    title_block, subtitle_block, footer_block = layout_text(
        prefs, img, title, subtitle, footer, img.height() // 3, style)
    p = QPainter(img)
    rect = QRect(0, 0, img.width(), img.height())
    colors = style(p, rect, color_theme, title_block, subtitle_block, footer_block)
    for block, color in zip((title_block, subtitle_block, footer_block), colors):
        p.setPen(color)
        block.draw(p)
    p.end()
    img.setText('Generated cover', '%s %s' % (__appname__, __version__))
    if as_qimage:
        return img
    return pixmap_to_data(img)
コード例 #22
0
ファイル: driver.py プロジェクト: JimmXinu/calibre
    def add_books_to_metadata(self, mtp_files, metadata, booklists):
        debug('add_books_to_metadata() called')
        from calibre.devices.mtp.books import Book

        i, total = 0, len(mtp_files)
        self.report_progress(0, _('Adding books to device metadata listing...'))
        for x, mi in zip(mtp_files, metadata):
            mtp_file, bl_idx = x
            bl = booklists[bl_idx]
            book = Book(mtp_file.storage_id, '/'.join(mtp_file.mtp_relpath),
                    other=mi)
            book = bl.add_book(book, replace_metadata=True)
            if book is not None:
                book.size = mtp_file.size
                book.datetime = mtp_file.last_modified.timetuple()
                book.path = mtp_file.mtp_id_path
            i += 1
            self.report_progress(i/total, _('Added %s')%mi.title)

        self.report_progress(1, _('Adding complete'))
        debug('add_books_to_metadata() ended')
コード例 #23
0
ファイル: normalize_css.py プロジェクト: artbycrunk/calibre
 def test_border_condensation(self):
     vals = 'red solid 5px'
     css = '; '.join('border-%s-%s: %s' % (edge, p, v) for edge in EDGES for p, v in zip(BORDER_PROPS, vals.split()))
     style = parseStyle(css)
     condense_rule(style)
     for e, p in product(EDGES, BORDER_PROPS):
         self.assertFalse(style.getProperty('border-%s-%s' % (e, p)))
         self.assertFalse(style.getProperty('border-%s' % e))
         self.assertFalse(style.getProperty('border-%s' % p))
     self.assertEqual(style.getProperty('border').value, vals)
     css = '; '.join('border-%s-%s: %s' % (edge, p, v) for edge in ('top',) for p, v in zip(BORDER_PROPS, vals.split()))
     style = parseStyle(css)
     condense_rule(style)
     self.assertEqual(style.cssText, 'border-top: %s' % vals)
     css += ';' + '; '.join('border-%s-%s: %s' % (edge, p, v) for edge in ('right', 'left', 'bottom') for p, v in
                      zip(BORDER_PROPS, vals.replace('red', 'green').split()))
     style = parseStyle(css)
     condense_rule(style)
     self.assertEqual(len(style.getProperties()), 4)
     self.assertEqual(style.getProperty('border-top').value, vals)
     self.assertEqual(style.getProperty('border-left').value, vals.replace('red', 'green'))
コード例 #24
0
ファイル: fonts.py プロジェクト: j-howell/calibre
    def write_widths(self, objects):
        glyphs = sorted(self.used_glyphs|{0})
        widths = {g:self.metrics.pdf_scale(w) for g, w in zip(glyphs,
                                        self.metrics.glyph_widths(glyphs))}
        counter = Counter()
        for g, w in iteritems(widths):
            counter[w] += 1
        most_common = counter.most_common(1)[0][0]
        self.descendant_font['DW'] = most_common
        widths = {g:w for g, w in iteritems(widths) if w != most_common}

        groups = Array()
        for k, g in groupby(enumerate(widths), lambda i_x:i_x[0]-i_x[1]):
            group = list(map(itemgetter(1), g))
            gwidths = [widths[g] for g in group]
            if len(set(gwidths)) == 1 and len(group) > 1:
                w = (min(group), max(group), gwidths[0])
            else:
                w = (min(group), Array(gwidths))
            groups.extend(w)
        self.descendant_font['W'] = objects.add(groups)
コード例 #25
0
ファイル: head.py プロジェクト: JimmXinu/calibre
    def read_data(self, hmtx):
        if hasattr(self, 'ascender'):
            return
        field_types = (
            '_version_number' , 'l',
            'ascender', 'h',
            'descender', 'h',
            'line_gap', 'h',
            'advance_width_max', 'H',
            'min_left_size_bearing', 'h',
            'min_right_side_bearing', 'h',
            'x_max_extent', 'h',
            'caret_slope_rise', 'h',
            'caret_slop_run', 'h',
            'caret_offset', 'h',
            'r1', 'h',
            'r2', 'h',
            'r3', 'h',
            'r4', 'h',
            'metric_data_format', 'h',
            'number_of_h_metrics', 'H',
        )

        self._fmt = ('>%s'%(''.join(field_types[1::2]))).encode('ascii')
        self._fields = field_types[0::2]

        for f, val in zip(self._fields, unpack_from(self._fmt, self.raw)):
            setattr(self, f, val)

        raw = hmtx.raw
        num = self.number_of_h_metrics
        if len(raw) < 4*num:
            raise UnsupportedFont('The hmtx table has insufficient data')
        long_hor_metric = raw[:4*num]
        fmt = '>%dH'%(2*num)
        entries = unpack_from(fmt.encode('ascii'), long_hor_metric)
        self.advance_widths = entries[0::2]
        fmt = '>%dh'%(2*num)
        entries = unpack_from(fmt.encode('ascii'), long_hor_metric)
        self.left_side_bearings = entries[1::2]
コード例 #26
0
ファイル: covers.py プロジェクト: artbycrunk/calibre
def generate_cover(mi, prefs=None, as_qimage=False):
    init_environment()
    prefs = prefs or cprefs
    prefs = {k:prefs.get(k) for k in cprefs.defaults}
    prefs = Prefs(**prefs)
    color_theme = random.choice(load_color_themes(prefs))
    style = random.choice(load_styles(prefs))(color_theme, prefs)
    title, subtitle, footer = format_text(mi, prefs)
    img = QImage(prefs.cover_width, prefs.cover_height, QImage.Format_ARGB32)
    title_block, subtitle_block, footer_block = layout_text(
        prefs, img, title, subtitle, footer, img.height() // 3, style)
    p = QPainter(img)
    rect = QRect(0, 0, img.width(), img.height())
    colors = style(p, rect, color_theme, title_block, subtitle_block, footer_block)
    for block, color in zip((title_block, subtitle_block, footer_block), colors):
        p.setPen(color)
        block.draw(p)
    p.end()
    img.setText('Generated cover', '%s %s' % (__appname__, __version__))
    if as_qimage:
        return img
    return pixmap_to_data(img)
コード例 #27
0
ファイル: routes.py プロジェクト: JimmXinu/calibre
    def matches(self, path):
        args_map = self.defaults.copy()
        num = 0
        for component, (name, matched) in zip(path, self.matchers):
            num += 1
            if matched is True:
                args_map[name] = component
            elif not matched(component):
                return False
        if self.soak_up_extra and num < len(path):
            args_map[self.soak_up_extra] += '/' + '/'.join(path[num:])
            num = len(path)
        if num < len(path):
            return False

        def check(tc, val):
            try:
                return tc(val)
            except Exception:
                raise HTTPNotFound('Argument of incorrect type')
        for name, tc in iteritems(self.type_checkers):
            args_map[name] = check(tc, args_map[name])
        return (args_map[name] for name in self.names)
コード例 #28
0
    def write_widths(self, objects):
        glyphs = sorted(self.used_glyphs | {0})
        widths = {
            g: self.metrics.pdf_scale(w)
            for g, w in zip(glyphs, self.metrics.glyph_widths(glyphs))
        }
        counter = Counter()
        for g, w in widths.iteritems():
            counter[w] += 1
        most_common = counter.most_common(1)[0][0]
        self.descendant_font['DW'] = most_common
        widths = {g: w for g, w in widths.iteritems() if w != most_common}

        groups = Array()
        for k, g in groupby(enumerate(widths.iterkeys()),
                            lambda i_x: i_x[0] - i_x[1]):
            group = list(map(itemgetter(1), g))
            gwidths = [widths[g] for g in group]
            if len(set(gwidths)) == 1 and len(group) > 1:
                w = (min(group), max(group), gwidths[0])
            else:
                w = (min(group), Array(gwidths))
            groups.extend(w)
        self.descendant_font['W'] = objects.add(groups)
コード例 #29
0
ファイル: routes.py プロジェクト: onyx-Sean/calibre
    def matches(self, path):
        args_map = self.defaults.copy()
        num = 0
        for component, (name, matched) in zip(path, self.matchers):
            num += 1
            if matched is True:
                args_map[name] = component
            elif not matched(component):
                return False
        if self.soak_up_extra and num < len(path):
            args_map[self.soak_up_extra] += '/' + '/'.join(path[num:])
            num = len(path)
        if num < len(path):
            return False

        def check(tc, val):
            try:
                return tc(val)
            except Exception:
                raise HTTPNotFound('Argument of incorrect type')

        for name, tc in iteritems(self.type_checkers):
            args_map[name] = check(tc, args_map[name])
        return (args_map[name] for name in self.names)
コード例 #30
0
ファイル: ajax.py プロジェクト: yandong2023/calibre
def search_result(ctx, rd, db, query, num, offset, sort, sort_order, vl=''):
    multisort = [(sanitize_sort_field_name(db.field_metadata, s), ensure_val(o, 'asc', 'desc') == 'asc')
                 for s, o in zip(sort.split(','), cycle(sort_order.split(',')))]
    skeys = db.field_metadata.sortable_field_keys()
    for sfield, sorder in multisort:
        if sfield not in skeys:
            raise HTTPNotFound('%s is not a valid sort field'%sort)

    ids, parse_error = ctx.search(rd, db, query, vl=vl, report_restriction_errors=True)
    ids = db.multisort(fields=multisort, ids_to_sort=ids)
    total_num = len(ids)
    ids = ids[offset:offset+num]
    ans = {
        'total_num': total_num, 'sort_order':sort_order,
        'offset':offset, 'num':len(ids), 'sort':sort,
        'base_url':ctx.url_for(search, library_id=db.server_library_id),
        'query': query,
        'library_id': db.server_library_id,
        'book_ids':ids,
        'vl': vl,
    }
    if parse_error is not None:
        ans['bad_restriction'] = unicode(parse_error)
    return ans
コード例 #31
0
ファイル: ajax.py プロジェクト: j-howell/calibre
def search_result(ctx, rd, db, query, num, offset, sort, sort_order, vl=''):
    multisort = [(sanitize_sort_field_name(db.field_metadata, s), ensure_val(o, 'asc', 'desc') == 'asc')
                 for s, o in zip(sort.split(','), cycle(sort_order.split(',')))]
    skeys = db.field_metadata.sortable_field_keys()
    for sfield, sorder in multisort:
        if sfield not in skeys:
            raise HTTPNotFound('%s is not a valid sort field'%sort)

    ids, parse_error = ctx.search(rd, db, query, vl=vl, report_restriction_errors=True)
    ids = db.multisort(fields=multisort, ids_to_sort=ids)
    total_num = len(ids)
    ids = ids[offset:offset+num]
    ans = {
        'total_num': total_num, 'sort_order':sort_order,
        'offset':offset, 'num':len(ids), 'sort':sort,
        'base_url':ctx.url_for(search, library_id=db.server_library_id),
        'query': query,
        'library_id': db.server_library_id,
        'book_ids':ids,
        'vl': vl,
    }
    if parse_error is not None:
        ans['bad_restriction'] = unicode_type(parse_error)
    return ans
コード例 #32
0
ファイル: legacy.py プロジェクト: qving11/calibre
    def set_custom_bulk(self, ids, val, label=None, num=None,
                   append=False, notify=True, extras=None):
        if extras is not None and len(extras) != len(ids):
            raise ValueError('Length of ids and extras is not the same')
        field = self.custom_field_name(label, num)
        data = self.backend.custom_field_metadata(label, num)
        if data['datatype'] == 'composite':
            return set()
        if data['datatype'] == 'enumeration' and (
                val and val not in data['display']['enum_values']):
            return
        if not data['editable']:
            raise ValueError('Column %r is not editable'%data['label'])

        if append:
            for book_id in ids:
                self.set_custom(book_id, val, label=label, num=num, append=True, notify=False)
        else:
            with self.new_api.write_lock:
                self.new_api._set_field(field, {book_id:val for book_id in ids}, allow_case_change=False)
            if extras is not None:
                self.new_api._set_field(field + '_index', {book_id:val for book_id, val in zip(ids, extras)})
        if notify:
            self.notify('metadata', list(ids))
コード例 #33
0
ファイル: covers.py プロジェクト: artbycrunk/calibre
def to_theme(x):
    return {k:v for k, v in zip(ColorTheme._fields[:4], x.split())}
コード例 #34
0
    def paintEvent(self, event):
        QSplitterHandle.paintEvent(self, event)
        left, right = self.parent().left, self.parent().right
        painter = QPainter(self)
        painter.setClipRect(event.rect())
        w = self.width()
        h = self.height()
        painter.setRenderHints(QPainter.Antialiasing, True)

        C = 16  # Curve factor.

        def create_line(ly, ry, right_to_left=False):
            ' Create path that represents upper or lower line of change marker '
            line = QPainterPath()
            if not right_to_left:
                line.moveTo(0, ly)
                line.cubicTo(C, ly, w - C, ry, w, ry)
            else:
                line.moveTo(w, ry)
                line.cubicTo(w - C, ry, C, ly, 0, ly)
            return line

        ldoc, rdoc = left.document(), right.document()
        lorigin, rorigin = left.contentOffset(), right.contentOffset()
        lfv, rfv = left.firstVisibleBlock().blockNumber(), right.firstVisibleBlock().blockNumber()
        lines = []

        for (ltop, lbot, kind), (rtop, rbot, kind) in zip(left.changes, right.changes):
            if lbot < lfv and rbot < rfv:
                continue
            ly_top = left.blockBoundingGeometry(ldoc.findBlockByNumber(ltop)).translated(lorigin).y()
            ly_bot = left.blockBoundingGeometry(ldoc.findBlockByNumber(lbot)).translated(lorigin).y()
            ry_top = right.blockBoundingGeometry(rdoc.findBlockByNumber(rtop)).translated(rorigin).y()
            ry_bot = right.blockBoundingGeometry(rdoc.findBlockByNumber(rbot)).translated(rorigin).y()
            if max(ly_top, ly_bot, ry_top, ry_bot) < 0:
                continue
            if min(ly_top, ly_bot, ry_top, ry_bot) > h:
                break

            upper_line = create_line(ly_top, ry_top)
            lower_line = create_line(ly_bot, ry_bot, True)

            region = QPainterPath()
            region.moveTo(0, ly_top)
            region.connectPath(upper_line)
            region.lineTo(w, ry_bot)
            region.connectPath(lower_line)
            region.closeSubpath()

            painter.fillPath(region, left.diff_backgrounds[kind])
            for path, aa in zip((upper_line, lower_line), (ly_top != ry_top, ly_bot != ry_bot)):
                lines.append((kind, path, aa))

        for kind, path, aa in sorted(lines, key=lambda x:{'replace':0}.get(x[0], 1)):
            painter.setPen(left.diff_foregrounds[kind])
            painter.setRenderHints(QPainter.Antialiasing, aa)
            painter.drawPath(path)

        painter.setFont(left.heading_font)
        for (lnum, text), (rnum, text) in zip(left.headers, right.headers):
            ltop, lbot, rtop, rbot = lnum, lnum + 3, rnum, rnum + 3
            if lbot < lfv and rbot < rfv:
                continue
            ly_top = left.blockBoundingGeometry(ldoc.findBlockByNumber(ltop)).translated(lorigin).y()
            ly_bot = left.blockBoundingGeometry(ldoc.findBlockByNumber(lbot)).translated(lorigin).y()
            ry_top = right.blockBoundingGeometry(rdoc.findBlockByNumber(rtop)).translated(rorigin).y()
            ry_bot = right.blockBoundingGeometry(rdoc.findBlockByNumber(rbot)).translated(rorigin).y()
            if max(ly_top, ly_bot, ry_top, ry_bot) < 0:
                continue
            if min(ly_top, ly_bot, ry_top, ry_bot) > h:
                break
            ly = painter.boundingRect(3, ly_top, left.width(), ly_bot - ly_top - 5, Qt.TextSingleLine, text).bottom() + 3
            ry = painter.boundingRect(3, ry_top, right.width(), ry_bot - ry_top - 5, Qt.TextSingleLine, text).bottom() + 3
            line = create_line(ly, ry)
            painter.setPen(QPen(left.palette().text(), 2))
            painter.setRenderHints(QPainter.Antialiasing, ly != ry)
            painter.drawPath(line)

        painter.end()
        # Paint the splitter without the change lines if the mouse is over the
        # splitter
        if getattr(self, 'hover', False):
            QSplitterHandle.paintEvent(self, event)
コード例 #35
0
ファイル: index.py プロジェクト: jimman2003/calibre
    def __call__(self):
        self.control_bytes = self.calculate_control_bytes_for_each_entry(
            self.entries)

        index_blocks, idxt_blocks, record_counts, last_indices = [BytesIO()], [
            BytesIO()
        ], [0], [b'']
        buf = BytesIO()

        RECORD_LIMIT = 0x10000 - self.HEADER_LENGTH - 1048  # kindlegen uses 1048 (there has to be some margin because of block alignment)

        for i, (index_num, tags) in enumerate(self.entries):
            control_bytes = self.control_bytes[i]
            buf.seek(0), buf.truncate(0)
            index_num = (index_num.encode('utf-8') if isinstance(
                index_num, unicode_type) else index_num)
            raw = bytearray(index_num)
            raw.insert(0, len(index_num))
            buf.write(bytes(raw))
            buf.write(bytes(bytearray(control_bytes)))
            for tag in self.tag_types:
                values = tags.get(tag.name, None)
                if values is None:
                    continue
                try:
                    len(values)
                except TypeError:
                    values = [values]
                if values:
                    for val in values:
                        try:
                            buf.write(encint(val))
                        except ValueError:
                            raise ValueError('Invalid values for %r: %r' %
                                             (tag, values))
            raw = buf.getvalue()
            offset = index_blocks[-1].tell()
            idxt_pos = idxt_blocks[-1].tell()
            if offset + idxt_pos + len(raw) + 2 > RECORD_LIMIT:
                index_blocks.append(BytesIO())
                idxt_blocks.append(BytesIO())
                record_counts.append(0)
                offset = idxt_pos = 0
                last_indices.append(b'')
            record_counts[-1] += 1
            idxt_blocks[-1].write(pack(b'>H', self.HEADER_LENGTH + offset))
            index_blocks[-1].write(raw)
            last_indices[-1] = index_num

        index_records = []
        for index_block, idxt_block, record_count in zip(
                index_blocks, idxt_blocks, record_counts):
            index_block = align_block(index_block.getvalue())
            idxt_block = align_block(b'IDXT' + idxt_block.getvalue())
            # Create header for this index record
            header = b'INDX'
            buf.seek(0), buf.truncate(0)
            buf.write(pack(b'>I', self.HEADER_LENGTH))
            buf.write(b'\0' * 4)  # Unknown
            buf.write(
                pack(b'>I', 1)
            )  # Header type (0 for Index header record and 1 for Index records)
            buf.write(b'\0' * 4)  # Unknown

            # IDXT block offset
            buf.write(pack(b'>I', self.HEADER_LENGTH + len(index_block)))

            # Number of index entries in this record
            buf.write(pack(b'>I', record_count))

            buf.write(b'\xff' * 8)  # Unknown

            buf.write(b'\0' * 156)  # Unknown

            header += buf.getvalue()
            index_records.append(header + index_block + idxt_block)
            if len(index_records[-1]) > 0x10000:
                raise ValueError(
                    'Failed to rollover index blocks for very large index.')

        # Create the Index Header record
        tagx = self.generate_tagx()

        # Geometry of the index records is written as index entries pointed to
        # by the IDXT records
        buf.seek(0), buf.truncate()
        idxt = [b'IDXT']
        pos = IndexHeader.HEADER_LENGTH + len(tagx)
        for last_idx, num in zip(last_indices, record_counts):
            start = buf.tell()
            idxt.append(pack(b'>H', pos))
            buf.write(bytes(bytearray([len(last_idx)])) + last_idx)
            buf.write(pack(b'>H', num))
            pos += buf.tell() - start

        header = {
            'num_of_entries': sum(r for r in record_counts),
            'num_of_records': len(index_records),
            'num_of_cncx': len(self.cncx),
            'tagx': align_block(tagx),
            'geometry': align_block(buf.getvalue()),
            'idxt': align_block(b''.join(idxt)),
        }
        header = IndexHeader()(**header)
        self.records = [header] + index_records
        self.records.extend(self.cncx.records)
        return self.records
コード例 #36
0
ファイル: normalize_css.py プロジェクト: qving11/calibre
 def test_edge_condensation(self):
     for s, v in iteritems({
         (1, 1, 3) : None,
         (1, 2, 3, 4) : '2pt 3pt 4pt 1pt',
         (1, 2, 3, 2) : '2pt 3pt 2pt 1pt',
         (1, 2, 1, 3) : '2pt 1pt 3pt',
         (1, 2, 1, 2) : '2pt 1pt',
         (1, 1, 1, 1) : '1pt',
         ('2%', '2%', '2%', '2%') : '2%',
         tuple('0 0 0 0'.split()) : '0',
     }):
         for prefix in ('margin', 'padding'):
             css = {'%s-%s' % (prefix, x) : str(y)+'pt' if isinstance(y, numbers.Number) else y for x, y in zip(('left', 'top', 'right', 'bottom'), s)}
             css = '; '.join(('%s:%s' % (k, v) for k, v in iteritems(css)))
             style = parseStyle(css)
             condense_rule(style)
             val = getattr(style.getProperty(prefix), 'value', None)
             self.assertEqual(v, val)
             if val is not None:
                 for edge in EDGES:
                     self.assertFalse(getattr(style.getProperty('%s-%s' % (prefix, edge)), 'value', None))
コード例 #37
0
def to_theme(x):
    return {k:v for k, v in zip(ColorTheme._fields[:4], x.split())}
コード例 #38
0
def get_defaults(spec):
    num = len(spec.defaults or ())
    if not num:
        return {}
    return dict(zip(spec.args[-num:], spec.defaults))
コード例 #39
0
 def _build_dchunks(self):
     ddata = []
     directory = list(self._directory)
     directory.sort(key=lambda x: x.name.lower())
     qrn = 1 + (1 << 2)
     dchunk = io.BytesIO()
     dcount = 0
     quickref = []
     name = directory[0].name
     for entry in directory:
         en = entry.name
         if not isinstance(en, bytes):
             en = en.encode('utf-8')
         nxt = b''.join([
             decint(len(en)), en,
             decint(entry.section),
             decint(entry.offset),
             decint(entry.size)
         ])
         usedlen = dchunk.tell() + len(nxt) + (len(quickref) * 2) + 52
         if usedlen >= DCHUNK_SIZE:
             ddata.append((dchunk.getvalue(), quickref, dcount, name))
             dchunk = io.BytesIO()
             dcount = 0
             quickref = []
             name = en
         if (dcount % qrn) == 0:
             quickref.append(dchunk.tell())
         dchunk.write(nxt)
         dcount = dcount + 1
     ddata.append((dchunk.getvalue(), quickref, dcount, name))
     cidmax = len(ddata) - 1
     rdcount = 0
     dchunks = []
     dcounts = []
     ichunk = None
     if len(ddata) > 1:
         ichunk = io.BytesIO()
     for cid, (content, quickref, dcount, name) in zip(count(), ddata):
         dchunk = io.BytesIO()
         prev = cid - 1 if cid > 0 else ULL_NEG1
         next = cid + 1 if cid < cidmax else ULL_NEG1
         rem = DCHUNK_SIZE - (len(content) + 50)
         pad = rem - (len(quickref) * 2)
         dchunk.write(b'AOLL')
         dchunk.write(pack('<IQQQQQ', rem, cid, prev, next, rdcount, 1))
         dchunk.write(content)
         dchunk.write(b'\0' * pad)
         for ref in reversed(quickref):
             dchunk.write(pack('<H', ref))
         dchunk.write(pack('<H', dcount))
         rdcount = rdcount + dcount
         dchunks.append(dchunk.getvalue())
         dcounts.append(dcount)
         if ichunk:
             if not isinstance(name, bytes):
                 name = name.encode('utf-8')
             ichunk.write(decint(len(name)))
             ichunk.write(name)
             ichunk.write(decint(cid))
     if ichunk:
         rem = DCHUNK_SIZE - (ichunk.tell() + 16)
         pad = rem - 2
         ichunk = b''.join([
             b'AOLI',
             pack('<IQ', rem, len(dchunks)),
             ichunk.getvalue(), (b'\0' * pad),
             pack('<H', len(dchunks))
         ])
     return dcounts, dchunks, ichunk
コード例 #40
0
ファイル: rapydscript.py プロジェクト: zwlistu/calibre
def to_dict(obj):
    return dict(zip(list(obj.keys()), list(obj.values())))
コード例 #41
0
ファイル: head.py プロジェクト: zwlistu/calibre
    def read_data(self):
        if hasattr(self, 'char_width'):
            return
        ver, = unpack_from(b'>H', self.raw)
        field_types = [
            'version',
            'H',
            'average_char_width',
            'h',
            'weight_class',
            'H',
            'width_class',
            'H',
            'fs_type',
            'H',
            'subscript_x_size',
            'h',
            'subscript_y_size',
            'h',
            'subscript_x_offset',
            'h',
            'subscript_y_offset',
            'h',
            'superscript_x_size',
            'h',
            'superscript_y_size',
            'h',
            'superscript_x_offset',
            'h',
            'superscript_y_offset',
            'h',
            'strikeout_size',
            'h',
            'strikeout_position',
            'h',
            'family_class',
            'h',
            'panose',
            '10s',
            'ranges',
            '16s',
            'vendor_id',
            '4s',
            'selection',
            'H',
            'first_char_index',
            'H',
            'last_char_index',
            'H',
            'typo_ascender',
            'h',
            'typo_descender',
            'h',
            'typo_line_gap',
            'h',
            'win_ascent',
            'H',
            'win_descent',
            'H',
        ]
        if ver > 1:
            field_types += [
                'code_page_range',
                '8s',
                'x_height',
                'h',
                'cap_height',
                'h',
                'default_char',
                'H',
                'break_char',
                'H',
                'max_context',
                'H',
            ]

        self._fmt = ('>%s' % (''.join(field_types[1::2]))).encode('ascii')
        self._fields = field_types[0::2]

        for f, val in zip(self._fields, unpack_from(self._fmt, self.raw)):
            setattr(self, f, val)
コード例 #42
0
 def __call__(self, query):
     scores, positions = self.m.calculate_scores(query)
     for score, pos in zip(scores, positions):
         yield score, pos
コード例 #43
0
ファイル: terminal.py プロジェクト: zyhong/calibre
    import ctypes.wintypes

    class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
        _fields_ = [('dwSize', ctypes.wintypes._COORD),
                    ('dwCursorPosition', ctypes.wintypes._COORD),
                    ('wAttributes', ctypes.wintypes.WORD),
                    ('srWindow', ctypes.wintypes._SMALL_RECT),
                    ('dwMaximumWindowSize', ctypes.wintypes._COORD)]


def fmt(code):
    return '\033[%dm' % code


RATTRIBUTES = dict(
    zip(range(1, 9), ('bold', 'dark', '', 'underline', 'blink', '', 'reverse',
                      'concealed')))
ATTRIBUTES = {v: fmt(k) for k, v in iteritems(RATTRIBUTES)}
del ATTRIBUTES['']

RBACKGROUNDS = dict(
    zip(
        range(41, 48),
        ('red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'),
    ))
BACKGROUNDS = {v: fmt(k) for k, v in iteritems(RBACKGROUNDS)}

RCOLORS = dict(
    zip(
        range(31, 38),
        (
            'red',
コード例 #44
0
    def _write_content(self):
        # Build content sections
        self._build_sections()

        # Build directory chunks
        dcounts, dchunks, ichunk = self._build_dchunks()

        # Write headers
        self._write(LIT_MAGIC)
        self._write(pack('<IIII', 1, PRIMARY_SIZE, 5, SECONDARY_SIZE))
        self._write(packguid(LITFILE_GUID))
        offset = self._tell()
        pieces = list(range(offset, offset + (PIECE_SIZE * 5), PIECE_SIZE))
        self._write((5 * PIECE_SIZE) * b'\0')
        aoli1 = len(dchunks) if ichunk else ULL_NEG1
        last = len(dchunks) - 1
        ddepth = 2 if ichunk else 1
        self._write(
            pack('<IIQQQQIIIIQIIQQQQIIIIQIIIIQ', 2, 0x98,
                 aoli1, 0, last, 0, DCHUNK_SIZE, 2, 0, ddepth, 0,
                 len(self._directory), 0, ULL_NEG1, 0, 0, 0, CCHUNK_SIZE, 2, 0,
                 1, 0, len(dcounts), 0, 0x100000, 0x20000, 0))
        self._write(BLOCK_CAOL)
        self._write(BLOCK_ITSF)
        conoff_offset = self._tell()
        timestamp = int(time.time())
        self._write(pack('<QII', 0, timestamp, 0x409))

        # Piece #0
        piece0_offset = self._tell()
        self._write(pack('<II', 0x1fe, 0))
        filesz_offset = self._tell()
        self._write(pack('<QQ', 0, 0))
        self._writeat(pieces[0],
                      pack('<QQ', piece0_offset,
                           self._tell() - piece0_offset))

        # Piece #1: Directory chunks
        piece1_offset = self._tell()
        number = len(dchunks) + ((ichunk and 1) or 0)
        self._write(b'IFCM',
                    pack('<IIIQQ', 1, DCHUNK_SIZE, 0x100000, ULL_NEG1, number))
        for dchunk in dchunks:
            self._write(dchunk)
        if ichunk:
            self._write(ichunk)
        self._writeat(pieces[1],
                      pack('<QQ', piece1_offset,
                           self._tell() - piece1_offset))

        # Piece #2: Count chunks
        piece2_offset = self._tell()
        self._write(b'IFCM',
                    pack('<IIIQQ', 1, CCHUNK_SIZE, 0x20000, ULL_NEG1, 1))
        cchunk = io.BytesIO()
        last = 0
        for i, dcount in zip(count(), dcounts):
            cchunk.write(decint(last))
            cchunk.write(decint(dcount))
            cchunk.write(decint(i))
            last = dcount
        cchunk = cchunk.getvalue()
        rem = CCHUNK_SIZE - (len(cchunk) + 50)
        self._write(b'AOLL', pack('<IQQQQQ', rem, 0, ULL_NEG1, ULL_NEG1, 0, 1))
        filler = b'\0' * rem
        self._write(cchunk, filler, pack('<H', len(dcounts)))
        self._writeat(pieces[2],
                      pack('<QQ', piece2_offset,
                           self._tell() - piece2_offset))

        # Piece #3: GUID3
        piece3_offset = self._tell()
        self._write(packguid(PIECE3_GUID))
        self._writeat(pieces[3],
                      pack('<QQ', piece3_offset,
                           self._tell() - piece3_offset))

        # Piece #4: GUID4
        piece4_offset = self._tell()
        self._write(packguid(PIECE4_GUID))
        self._writeat(pieces[4],
                      pack('<QQ', piece4_offset,
                           self._tell() - piece4_offset))

        # The actual section content
        content_offset = self._tell()
        self._writeat(conoff_offset, pack('<Q', content_offset))
        self._write(self._sections[0].getvalue())
        self._writeat(filesz_offset, pack('<Q', self._tell()))
コード例 #45
0
ファイル: __init__.py プロジェクト: gitfmc/calibre
    match = get_title_sort_pat(lang).search(title)
    if match:
        try:
            prep = match.group(1)
        except IndexError:
            pass
        else:
            title = title[len(prep):] + ', ' + prep
            if title[0] in _ignore_starts:
                title = title[1:]
    return title.strip()


coding = list(
    zip([1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1], [
        "M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"
    ]))


def roman(num):
    if num <= 0 or num >= 4000 or int(num) != num:
        return str(num)
    result = []
    for d, r in coding:
        while num >= d:
            result.append(r)
            num -= d
    return ''.join(result)


def fmt_sidx(i, fmt='%.2f', use_roman=False):
コード例 #46
0
            return
        self.next_item(False)
        for id_ in self.ids:
            self.rejected_ids.add(id_)
            oldmi, newmi = self.get_metadata(id_)
            self.accepted[id_] = (False, None)
        self.ids = []
        self.accept()

    def keyPressEvent(self, ev):
        if ev.key() in (Qt.Key_Enter, Qt.Key_Return):
            ev.accept()
            return
        return QDialog.keyPressEvent(self, ev)


if __name__ == '__main__':
    from calibre.gui2 import Application
    from calibre.library import db
    app = Application([])
    db = db()
    ids = sorted(db.all_ids(), reverse=True)
    ids = tuple(zip(ids[0::2], ids[1::2]))
    gm = partial(db.get_metadata, index_is_id=True, get_cover=True, cover_as_data=True)
    get_metadata = lambda x:map(gm, ids[x])
    d = CompareMany(list(range(len(ids))), get_metadata, db.field_metadata, db=db)
    if d.exec_() == d.Accepted:
        for changed, mi in d.accepted.itervalues():
            if changed and mi is not None:
                print(mi)
コード例 #47
0
ファイル: diff.py プロジェクト: j-howell/calibre
            return
        self.next_item(False)
        for id_ in self.ids:
            self.rejected_ids.add(id_)
            oldmi, newmi = self.get_metadata(id_)
            self.accepted[id_] = (False, None)
        self.ids = []
        self.accept()

    def keyPressEvent(self, ev):
        if ev.key() in (Qt.Key_Enter, Qt.Key_Return):
            ev.accept()
            return
        return QDialog.keyPressEvent(self, ev)


if __name__ == '__main__':
    from calibre.gui2 import Application
    from calibre.library import db
    app = Application([])
    db = db()
    ids = sorted(db.all_ids(), reverse=True)
    ids = tuple(zip(ids[0::2], ids[1::2]))
    gm = partial(db.get_metadata, index_is_id=True, get_cover=True, cover_as_data=True)
    get_metadata = lambda x:list(map(gm, ids[x]))
    d = CompareMany(list(range(len(ids))), get_metadata, db.field_metadata, db=db)
    if d.exec_() == d.Accepted:
        for changed, mi in itervalues(d.accepted):
            if changed and mi is not None:
                print(mi)
コード例 #48
0
 def edge_dict(prefix, expected):
     return {
         '%s-%s' % (prefix, edge): x
         for edge, x in zip(EDGES, expected)
     }
コード例 #49
0
from polyglot.builtins import zip, unicode_type

DAY_MAP = dict(Sun=0, Mon=1, Tue=2, Wed=3, Thu=4, Fri=5, Sat=6)
MONTH_MAP = dict(Jan=1,
                 Feb=2,
                 Mar=3,
                 Apr=4,
                 May=5,
                 Jun=6,
                 Jul=7,
                 Aug=8,
                 Sep=9,
                 Oct=10,
                 Nov=11,
                 Dec=12)
INVERSE_DAY_MAP = dict(zip(DAY_MAP.values(), DAY_MAP.keys()))
INVERSE_MONTH_MAP = dict(zip(MONTH_MAP.values(), MONTH_MAP.keys()))


def strptime(src):
    src = src.strip()
    src = src.split()
    src[0] = unicode_type(DAY_MAP[src[0][:-1]]) + ','
    src[2] = unicode_type(MONTH_MAP[src[2]])
    return time.strptime(' '.join(src), '%w, %d %m %Y %H:%M:%S %Z')


def strftime(epoch, zone=time.gmtime):
    src = time.strftime("%w, %d %m %Y %H:%M:%S GMT", zone(epoch)).split()
    src[0] = INVERSE_DAY_MAP[int(src[0][:-1])] + ','
    src[2] = INVERSE_MONTH_MAP[int(src[2])]
コード例 #50
0
    def test_legacy_direct(self):  # {{{
        'Test read-only methods that are directly equivalent in the old and new interface'
        from calibre.ebooks.metadata.book.base import Metadata
        from datetime import timedelta
        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old()
        newstag = ndb.new_api.get_item_id('tags', 'news')

        self.assertEqual(dict(db.prefs), dict(ndb.prefs))

        for meth, args in iteritems({
                'find_identical_books': [(Metadata('title one',
                                                   ['author one']), ),
                                         (Metadata('unknown'), ),
                                         (Metadata('xxxx'), )],
                'get_books_for_category': [('tags', newstag),
                                           ('#formats', 'FMT1')],
                'get_next_series_num_for': [('A Series One', )],
                'get_id_from_uuid': [('ddddd', ), (db.uuid(1, True), )],
                'cover': [(0, ), (1, ), (2, )],
                'get_author_id': [('author one', ), ('unknown', ),
                                  ('xxxxx', )],
                'series_id': [(0, ), (1, ), (2, )],
                'publisher_id': [(0, ), (1, ), (2, )],
                '@tags_older_than': [
                    ('News', None),
                    ('Tag One', None),
                    ('xxxx', None),
                    ('Tag One', None, 'News'),
                    ('News', None, 'xxxx'),
                    ('News', None, None, ['xxxxxxx']),
                    ('News', None, 'Tag One', ['Author Two', 'Author One']),
                    ('News', timedelta(0), None, None),
                    ('News', timedelta(100000)),
                ],
                'format': [(1, 'FMT1', True), (2, 'FMT1', True),
                           (0, 'xxxxxx')],
                'has_format': [(1, 'FMT1', True), (2, 'FMT1', True),
                               (0, 'xxxxxx')],
                'sizeof_format': [(1, 'FMT1', True), (2, 'FMT1', True),
                                  (0, 'xxxxxx')],
                '@format_files': [(0, ), (1, ), (2, )],
                'formats': [(0, ), (1, ), (2, )],
                'max_size': [(0, ), (1, ), (2, )],
                'format_hash': [(1, 'FMT1'), (1, 'FMT2'), (2, 'FMT1')],
                'author_sort_from_authors':
            [(['Author One', 'Author Two', 'Unknown'], )],
                'has_book': [(Metadata('title one'), ),
                             (Metadata('xxxx1111'), )],
                'has_id': [(1, ), (2, ), (3, ), (9999, )],
                'id': [
                    (1, ),
                    (2, ),
                    (0, ),
                ],
                'index': [
                    (1, ),
                    (2, ),
                    (3, ),
                ],
                'row': [
                    (1, ),
                    (2, ),
                    (3, ),
                ],
                'is_empty': [()],
                'count': [()],
                'all_author_names': [()],
                'all_tag_names': [()],
                'all_series_names': [()],
                'all_publisher_names': [()],
                '!all_authors': [()],
                '!all_tags2': [()],
                '@all_tags': [()],
                '@get_all_identifier_types': [()],
                '!all_publishers': [()],
                '!all_titles': [()],
                '!all_series': [()],
                'standard_field_keys': [()],
                'all_field_keys': [()],
                'searchable_fields': [()],
                'search_term_to_field_key': [('author', ), ('tag', )],
                'metadata_for_field': [('title', ), ('tags', )],
                'sortable_field_keys': [()],
                'custom_field_keys': [(True, ), (False, )],
                '!get_usage_count_by_id': [('authors', ), ('tags', ),
                                           ('series', ), ('publisher', ),
                                           ('#tags', ), ('languages', )],
                'get_field': [(1, 'title'), (2, 'tags'), (0, 'rating'),
                              (1, 'authors'), (2, 'series'), (1, '#tags')],
                'all_formats': [()],
                'get_authors_with_ids': [()],
                '!get_tags_with_ids': [()],
                '!get_series_with_ids': [()],
                '!get_publishers_with_ids': [()],
                '!get_ratings_with_ids': [()],
                '!get_languages_with_ids': [()],
                'tag_name': [(3, )],
                'author_name': [(3, )],
                'series_name': [(3, )],
                'authors_sort_strings': [(0, ), (1, ), (2, )],
                'author_sort_from_book': [(0, ), (1, ), (2, )],
                'authors_with_sort_strings': [(0, ), (1, ), (2, )],
                'book_on_device_string': [(1, ), (2, ), (3, )],
                'books_in_series_of': [(0, ), (1, ), (2, )],
                'books_with_same_title': [(Metadata(db.title(0)), ),
                                          (Metadata(db.title(1)), ),
                                          (Metadata('1234'), )],
        }):
            fmt = lambda x: x
            if meth[0] in {'!', '@'}:
                fmt = {'!': dict, '@': frozenset}[meth[0]]
                meth = meth[1:]
            elif meth == 'get_authors_with_ids':
                fmt = lambda val: {x[0]: tuple(x[1:]) for x in val}
            for a in args:
                self.assertEqual(
                    fmt(getattr(db, meth)(*a)), fmt(getattr(ndb, meth)(*a)),
                    'The method: %s() returned different results for argument %s'
                    % (meth, a))

        def f(
            x, y
        ):  # get_top_level_move_items is broken in the old db on case-insensitive file systems
            x.discard('metadata_db_prefs_backup.json')
            return x, y

        self.assertEqual(f(*db.get_top_level_move_items()),
                         f(*ndb.get_top_level_move_items()))
        d1, d2 = BytesIO(), BytesIO()
        db.copy_cover_to(1, d1, True)
        ndb.copy_cover_to(1, d2, True)
        self.assertTrue(d1.getvalue() == d2.getvalue())
        d1, d2 = BytesIO(), BytesIO()
        db.copy_format_to(1, 'FMT1', d1, True)
        ndb.copy_format_to(1, 'FMT1', d2, True)
        self.assertTrue(d1.getvalue() == d2.getvalue())
        old = db.get_data_as_dict(prefix='test-prefix')
        new = ndb.get_data_as_dict(prefix='test-prefix')
        for o, n in zip(old, new):
            o = {
                unicode_type(k) if isinstance(k, bytes) else k:
                set(v) if isinstance(v, list) else v
                for k, v in iteritems(o)
            }
            n = {
                k: set(v) if isinstance(v, list) else v
                for k, v in iteritems(n)
            }
            self.assertEqual(o, n)

        ndb.search('title:Unknown')
        db.search('title:Unknown')
        self.assertEqual(db.row(3), ndb.row(3))
        self.assertRaises(ValueError, ndb.row, 2)
        self.assertRaises(ValueError, db.row, 2)
        db.close()
コード例 #51
0
    def create_oebbook(self, htmlpath, basedir, opts, log, mi):
        import uuid
        from calibre.ebooks.conversion.plumber import create_oebbook
        from calibre.ebooks.oeb.base import (DirContainer, rewrite_links,
                                             urlnormalize, urldefrag,
                                             BINARY_MIME, OEB_STYLES, xpath,
                                             urlquote)
        from calibre import guess_type
        from calibre.ebooks.oeb.transforms.metadata import \
            meta_info_to_oeb_metadata
        from calibre.ebooks.html.input import get_filelist
        from calibre.ebooks.metadata import string_to_authors
        from calibre.utils.localization import canonicalize_lang
        import css_parser, logging
        css_parser.log.setLevel(logging.WARN)
        self.OEB_STYLES = OEB_STYLES
        oeb = create_oebbook(log,
                             None,
                             opts,
                             self,
                             encoding=opts.input_encoding,
                             populate=False)
        self.oeb = oeb

        metadata = oeb.metadata
        meta_info_to_oeb_metadata(mi, metadata, log)
        if not metadata.language:
            l = canonicalize_lang(getattr(opts, 'language', None))
            if not l:
                oeb.logger.warn('Language not specified')
                l = get_lang().replace('_', '-')
            metadata.add('language', l)
        if not metadata.creator:
            a = getattr(opts, 'authors', None)
            if a:
                a = string_to_authors(a)
            if not a:
                oeb.logger.warn('Creator not specified')
                a = [self.oeb.translate(__('Unknown'))]
            for aut in a:
                metadata.add('creator', aut)
        if not metadata.title:
            oeb.logger.warn('Title not specified')
            metadata.add('title', self.oeb.translate(__('Unknown')))
        bookid = unicode_type(uuid.uuid4())
        metadata.add('identifier', bookid, id='uuid_id', scheme='uuid')
        for ident in metadata.identifier:
            if 'id' in ident.attrib:
                self.oeb.uid = metadata.identifier[0]
                break

        filelist = get_filelist(htmlpath, basedir, opts, log)
        filelist = [f for f in filelist if not f.is_binary]
        htmlfile_map = {}
        for f in filelist:
            path = f.path
            oeb.container = DirContainer(os.path.dirname(path),
                                         log,
                                         ignore_opf=True)
            bname = os.path.basename(path)
            id, href = oeb.manifest.generate(id='html',
                                             href=sanitize_file_name(bname))
            htmlfile_map[path] = href
            item = oeb.manifest.add(id, href, 'text/html')
            if path == htmlpath and '%' in path:
                bname = urlquote(bname)
            item.html_input_href = bname
            oeb.spine.add(item, True)

        self.added_resources = {}
        self.log = log
        self.log('Normalizing filename cases')
        for path, href in htmlfile_map.items():
            if not self.is_case_sensitive(path):
                path = path.lower()
            self.added_resources[path] = href
        self.urlnormalize, self.DirContainer = urlnormalize, DirContainer
        self.urldefrag = urldefrag
        self.guess_type, self.BINARY_MIME = guess_type, BINARY_MIME

        self.log('Rewriting HTML links')
        for f in filelist:
            path = f.path
            dpath = os.path.dirname(path)
            oeb.container = DirContainer(dpath, log, ignore_opf=True)
            href = htmlfile_map[path]
            try:
                item = oeb.manifest.hrefs[href]
            except KeyError:
                item = oeb.manifest.hrefs[urlnormalize(href)]
            rewrite_links(item.data, partial(self.resource_adder, base=dpath))

        for item in oeb.manifest.values():
            if item.media_type in self.OEB_STYLES:
                dpath = None
                for path, href in self.added_resources.items():
                    if href == item.href:
                        dpath = os.path.dirname(path)
                        break
                css_parser.replaceUrls(
                    item.data, partial(self.resource_adder, base=dpath))

        toc = self.oeb.toc
        self.oeb.auto_generated_toc = True
        titles = []
        headers = []
        for item in self.oeb.spine:
            if not item.linear:
                continue
            html = item.data
            title = ''.join(xpath(html, '/h:html/h:head/h:title/text()'))
            title = re.sub(r'\s+', ' ', title.strip())
            if title:
                titles.append(title)
            headers.append('(unlabled)')
            for tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'strong'):
                expr = '/h:html/h:body//h:%s[position()=1]/text()'
                header = ''.join(xpath(html, expr % tag))
                header = re.sub(r'\s+', ' ', header.strip())
                if header:
                    headers[-1] = header
                    break
        use = titles
        if len(titles) > len(set(titles)):
            use = headers
        for title, item in zip(use, self.oeb.spine):
            if not item.linear:
                continue
            toc.add(title, item.href)

        oeb.container = DirContainer(getcwd(), oeb.log, ignore_opf=True)
        return oeb
コード例 #52
0
 def edge_dict(prefix, expected):
     return {
         '{}-{}'.format(prefix, edge): x
         for edge, x in zip(EDGES, expected)
     }