Esempio n. 1
0
def get_metadata(stream, extract_cover=True):
    zin = zipfile.ZipFile(stream, 'r')
    odfs = odfmetaparser()
    parser = xml.sax.make_parser()
    parser.setFeature(xml.sax.handler.feature_namespaces, 1)
    parser.setContentHandler(odfs)
    content = zin.read('meta.xml')
    parser.parse(StringIO(content))
    data = odfs.seenfields
    mi = MetaInformation(None, [])
    if 'title' in data:
        mi.title = data['title']
    if data.get('initial-creator', '').strip():
        mi.authors = string_to_authors(data['initial-creator'])
    elif 'creator' in data:
        mi.authors = string_to_authors(data['creator'])
    if 'description' in data:
        mi.comments = data['description']
    if 'language' in data:
        mi.language = data['language']
    if data.get('keywords', ''):
        mi.tags = [x.strip() for x in data['keywords'].split(',') if x.strip()]
    opfmeta = False  # we need this later for the cover
    opfnocover = False
    if data.get('opf.metadata','') == 'true':
        # custom metadata contains OPF information
        opfmeta = True
        if data.get('opf.titlesort', ''):
            mi.title_sort = data['opf.titlesort']
        if data.get('opf.authors', ''):
            mi.authors = string_to_authors(data['opf.authors'])
        if data.get('opf.authorsort', ''):
            mi.author_sort = data['opf.authorsort']
        if data.get('opf.isbn', ''):
            isbn = check_isbn(data['opf.isbn'])
            if isbn is not None:
                mi.isbn = isbn
        if data.get('opf.publisher', ''):
            mi.publisher = data['opf.publisher']
        if data.get('opf.pubdate', ''):
            mi.pubdate = parse_date(data['opf.pubdate'], assume_utc=True)
        if data.get('opf.series', ''):
            mi.series = data['opf.series']
            if data.get('opf.seriesindex', ''):
                try:
                    mi.series_index = float(data['opf.seriesindex'])
                except ValueError:
                    mi.series_index = 1.0
        if data.get('opf.language', ''):
            cl = canonicalize_lang(data['opf.language'])
            if cl:
                mi.languages = [cl]
        opfnocover = data.get('opf.nocover', 'false') == 'true'
    if not opfnocover:
        try:
            read_cover(stream, zin, mi, opfmeta, extract_cover)
        except:
            pass  # Do not let an error reading the cover prevent reading other data

    return mi
Esempio n. 2
0
File: odt.py Progetto: sss/calibre
def get_metadata(stream, extract_cover=True):
    zin = zipfile.ZipFile(stream, 'r')
    odfs = odfmetaparser()
    parser = xml.sax.make_parser()
    parser.setFeature(xml.sax.handler.feature_namespaces, 1)
    parser.setContentHandler(odfs)
    content = zin.read('meta.xml')
    parser.parse(StringIO(content))
    data = odfs.seenfields
    mi = MetaInformation(None, [])
    if data.has_key('title'):
        mi.title = data['title']
    if data.get('initial-creator', '').strip():
        mi.authors = string_to_authors(data['initial-creator'])
    elif data.has_key('creator'):
        mi.authors = string_to_authors(data['creator'])
    if data.has_key('description'):
        mi.comments = data['description']
    if data.has_key('language'):
        mi.language = data['language']
    if data.get('keywords', ''):
        mi.tags = [x.strip() for x in data['keywords'].split(',') if x.strip()]
    opfmeta = False # we need this later for the cover
    opfnocover = False
    if data.get('opf.metadata','') == 'true':
        # custom metadata contains OPF information
        opfmeta = True
        if data.get('opf.titlesort', ''):
            mi.title_sort = data['opf.titlesort']
        if data.get('opf.authors', ''):
            mi.authors = string_to_authors(data['opf.authors'])
        if data.get('opf.authorsort', ''):
            mi.author_sort = data['opf.authorsort']
        if data.get('opf.isbn', ''):
            isbn = check_isbn(data['opf.isbn'])
            if isbn is not None:
                mi.isbn = isbn
        if data.get('opf.publisher', ''):
            mi.publisher = data['opf.publisher']
        if data.get('opf.pubdate', ''):
            mi.pubdate = parse_date(data['opf.pubdate'], assume_utc=True)
        if data.get('opf.series', ''):
            mi.series = data['opf.series']
            if data.get('opf.seriesindex', ''):
                try:
                    mi.series_index = float(data['opf.seriesindex'])
                except ValueError:
                    mi.series_index = 1.0
        if data.get('opf.language', ''):
            cl = canonicalize_lang(data['opf.language'])
            if cl:
                mi.languages = [cl]
        opfnocover = data.get('opf.nocover', 'false') == 'true'
    if not opfnocover:
        try:
            read_cover(stream, zin, mi, opfmeta, extract_cover)
        except:
            pass # Do not let an error reading the cover prevent reading other data

    return mi
Esempio n. 3
0
def set_metadata_opf2(root, cover_prefix, mi, opf_version,
                      cover_data=None, apply_null=False, update_timestamp=False, force_identifiers=False, add_missing_cover=True):
    mi = MetaInformation(mi)
    for x in ('guide', 'toc', 'manifest', 'spine'):
        setattr(mi, x, None)
    opf = OPF(None, preparsed_opf=root, read_toc=False)
    if mi.languages:
        mi.languages = normalize_languages(list(opf.raw_languages) or [], mi.languages)

    opf.smart_update(mi, apply_null=apply_null)
    if getattr(mi, 'uuid', None):
        opf.application_id = mi.uuid
    if apply_null or force_identifiers:
        opf.set_identifiers(mi.get_identifiers())
    else:
        orig = opf.get_identifiers()
        orig.update(mi.get_identifiers())
        opf.set_identifiers({k:v for k, v in orig.iteritems() if k and v})
    if update_timestamp and mi.timestamp is not None:
        opf.timestamp = mi.timestamp
    raster_cover = opf.raster_cover
    if raster_cover is None and cover_data is not None and add_missing_cover:
        guide_raster_cover = opf.guide_raster_cover
        i = None
        if guide_raster_cover is not None:
            i = guide_raster_cover
            raster_cover = i.get('href')
        else:
            if cover_prefix and not cover_prefix.endswith('/'):
                cover_prefix += '/'
            name = cover_prefix + 'cover.jpg'
            i = create_manifest_item(opf.root, name, 'cover')
            if i is not None:
                raster_cover = name
        if i is not None:
            if opf_version.major < 3:
                [x.getparent().remove(x) for x in opf.root.xpath('//*[local-name()="meta" and @name="cover"]')]
                m = opf.create_metadata_element('meta', is_dc=False)
                m.set('name', 'cover'), m.set('content', i.get('id'))
            else:
                for x in opf.root.xpath('//*[local-name()="item" and contains(@properties, "cover-image")]'):
                    x.set('properties', x.get('properties').replace('cover-image', '').strip())
                i.set('properties', 'cover-image')

    with pretty_print:
        return opf.render(), raster_cover
Esempio n. 4
0
def set_metadata_opf2(root, cover_prefix, mi, opf_version,
                      cover_data=None, apply_null=False, update_timestamp=False, force_identifiers=False, add_missing_cover=True):
    mi = MetaInformation(mi)
    for x in ('guide', 'toc', 'manifest', 'spine'):
        setattr(mi, x, None)
    opf = OPF(None, preparsed_opf=root, read_toc=False)
    if mi.languages:
        mi.languages = normalize_languages(list(opf.raw_languages) or [], mi.languages)

    opf.smart_update(mi, apply_null=apply_null)
    if getattr(mi, 'uuid', None):
        opf.application_id = mi.uuid
    if apply_null or force_identifiers:
        opf.set_identifiers(mi.get_identifiers())
    else:
        orig = opf.get_identifiers()
        orig.update(mi.get_identifiers())
        opf.set_identifiers({k:v for k, v in orig.items() if k and v})
    if update_timestamp and mi.timestamp is not None:
        opf.timestamp = mi.timestamp
    raster_cover = opf.raster_cover
    if raster_cover is None and cover_data is not None and add_missing_cover:
        guide_raster_cover = opf.guide_raster_cover
        i = None
        if guide_raster_cover is not None:
            i = guide_raster_cover
            raster_cover = i.get('href')
        else:
            if cover_prefix and not cover_prefix.endswith('/'):
                cover_prefix += '/'
            name = cover_prefix + 'cover.jpg'
            i = create_manifest_item(opf.root, name, 'cover')
            if i is not None:
                raster_cover = name
        if i is not None:
            if opf_version.major < 3:
                [x.getparent().remove(x) for x in opf.root.xpath('//*[local-name()="meta" and @name="cover"]')]
                m = opf.create_metadata_element('meta', is_dc=False)
                m.set('name', 'cover'), m.set('content', i.get('id'))
            else:
                for x in opf.root.xpath('//*[local-name()="item" and contains(@properties, "cover-image")]'):
                    x.set('properties', x.get('properties').replace('cover-image', '').strip())
                i.set('properties', 'cover-image')

    with pretty_print:
        return opf.render(), raster_cover
Esempio n. 5
0
def do_add_empty(dbctx, title, authors, isbn, tags, series, series_index,
                 cover, identifiers, languages):
    mi = MetaInformation(None)
    if title is not None:
        mi.title = title
    if authors:
        mi.authors = authors
    if identifiers:
        mi.set_identifiers(identifiers)
    if isbn:
        mi.isbn = isbn
    if tags:
        mi.tags = tags
    if series:
        mi.series, mi.series_index = series, series_index
    if cover:
        mi.cover = cover
    if languages:
        mi.languages = languages
    ids, duplicates = dbctx.run('add', 'empty', read_cover(mi))
    prints(_('Added book ids: %s') % ','.join(map(str, ids)))
Esempio n. 6
0
def do_add_empty(
    dbctx, title, authors, isbn, tags, series, series_index, cover, identifiers,
    languages
):
    mi = MetaInformation(None)
    if title is not None:
        mi.title = title
    if authors:
        mi.authors = authors
    if identifiers:
        mi.set_identifiers(identifiers)
    if isbn:
        mi.isbn = isbn
    if tags:
        mi.tags = tags
    if series:
        mi.series, mi.series_index = series, series_index
    if cover:
        mi.cover = cover
    if languages:
        mi.languages = languages
    ids, duplicates = dbctx.run('add', 'empty', read_cover(mi))
    prints(_('Added book ids: %s') % ','.join(map(str, ids)))
Esempio n. 7
0
def set_metadata(stream, mi, apply_null=False, update_timestamp=False):
    stream.seek(0)
    reader = OCFZipReader(stream, root=os.getcwdu())
    raster_cover = reader.opf.raster_cover
    mi = MetaInformation(mi)
    new_cdata = None
    replacements = {}
    try:
        new_cdata = mi.cover_data[1]
        if not new_cdata:
            raise Exception('no cover')
    except:
        try:
            new_cdata = open(mi.cover, 'rb').read()
        except:
            pass
    new_cover = cpath = None
    if new_cdata and raster_cover:
        try:
            cpath = posixpath.join(posixpath.dirname(reader.opf_path),
                                   raster_cover)
            cover_replacable = not reader.encryption_meta.is_encrypted(cpath) and \
                    os.path.splitext(cpath)[1].lower() in ('.png', '.jpg', '.jpeg')
            if cover_replacable:
                new_cover = _write_new_cover(new_cdata, cpath)
                replacements[cpath] = open(new_cover.name, 'rb')
        except:
            import traceback
            traceback.print_exc()

    for x in ('guide', 'toc', 'manifest', 'spine'):
        setattr(mi, x, None)
    if mi.languages:
        langs = []
        for lc in mi.languages:
            lc2 = lang_as_iso639_1(lc)
            if lc2: lc = lc2
            langs.append(lc)
        mi.languages = langs

    reader.opf.smart_update(mi)
    if apply_null:
        if not getattr(mi, 'series', None):
            reader.opf.series = None
        if not getattr(mi, 'tags', []):
            reader.opf.tags = []
        if not getattr(mi, 'isbn', None):
            reader.opf.isbn = None
    if update_timestamp and mi.timestamp is not None:
        reader.opf.timestamp = mi.timestamp

    newopf = StringIO(reader.opf.render())
    safe_replace(stream,
                 reader.container[OPF.MIMETYPE],
                 newopf,
                 extra_replacements=replacements)
    try:
        if cpath is not None:
            replacements[cpath].close()
            os.remove(replacements[cpath].name)
    except:
        pass
Esempio n. 8
0
    def _start_merge(self, book_list):
        db = self.gui.current_db
        self.previous = self.gui.library_view.currentIndex()
        # if any bad, bail.
        bad_list = filter(lambda x: not x['good'], book_list)
        if len(bad_list) > 0:
            d = error_dialog(self.gui,
                             _('Cannot Merge Epubs'),
                             _('%s books failed.') % len(bad_list),
                             det_msg='\n'.join(
                                 map(lambda x: x['error'], bad_list)))
            d.exec_()
        else:
            d = OrderEPUBsDialog(
                self.gui,
                _('Order EPUBs to Merge'),
                prefs,
                self.qaction.icon(),
                book_list,
            )
            d.exec_()
            if d.result() != d.Accepted:
                return

            book_list = d.get_books()

            logger.debug("2:%s" % (time.time() - self.t))
            self.t = time.time()

            deftitle = "%s %s" % (book_list[0]['title'], prefs['mergeword'])
            mi = MetaInformation(deftitle, ["Temp Author"])

            # if all same series, use series for name.  But only if all.
            serieslist = map(lambda x: x['series'],
                             filter(lambda x: x['series'] != None, book_list))
            if len(serieslist) == len(book_list):
                mi.title = serieslist[0]
                for sr in serieslist:
                    if mi.title != sr:
                        mi.title = deftitle
                        break

            # logger.debug("======================= mi.title:\n%s\n========================="%mi.title)

            mi.authors = list()
            authorslists = map(lambda x: x['authors'], book_list)
            for l in authorslists:
                for a in l:
                    if a not in mi.authors:
                        mi.authors.append(a)
            #mi.authors = [item for sublist in authorslists for item in sublist]

            # logger.debug("======================= mi.authors:\n%s\n========================="%mi.authors)

            #mi.author_sort = ' & '.join(map(lambda x : x['author_sort'], book_list))

            # logger.debug("======================= mi.author_sort:\n%s\n========================="%mi.author_sort)

            # set publisher if all from same publisher.
            publishers = set(map(lambda x: x['publisher'], book_list))
            if len(publishers) == 1:
                mi.publisher = publishers.pop()

            # logger.debug("======================= mi.publisher:\n%s\n========================="%mi.publisher)

            tagslists = map(lambda x: x['tags'], book_list)
            mi.tags = [item for sublist in tagslists for item in sublist]
            mi.tags.extend(prefs['mergetags'].split(','))

            # logger.debug("======================= mergetags:\n%s\n========================="%prefs['mergetags'])
            # logger.debug("======================= m.tags:\n%s\n========================="%mi.tags)

            languageslists = map(lambda x: x['languages'], book_list)
            mi.languages = [
                item for sublist in languageslists for item in sublist
            ]

            mi.series = ''

            # ======================= make book comments =========================

            if len(mi.authors) > 1:
                booktitle = lambda x: _("%s by %s") % (x['title'], ' & '.join(
                    x['authors']))
            else:
                booktitle = lambda x: x['title']

            mi.comments = (_("%s containing:") + "\n\n") % prefs['mergeword']

            if prefs['includecomments']:

                def bookcomments(x):
                    if x['comments']:
                        return '<b>%s</b>\n\n%s' % (booktitle(x),
                                                    x['comments'])
                    else:
                        return '<b>%s</b>\n' % booktitle(x)

                mi.comments += ('<div class="mergedbook">' +
                                '<hr></div><div class="mergedbook">'.join(
                                    [bookcomments(x)
                                     for x in book_list]) + '</div>')
            else:
                mi.comments += '\n'.join([booktitle(x) for x in book_list])

            # ======================= make book entry =========================

            book_id = db.create_book_entry(mi, add_duplicates=True)

            # set default cover to same as first book
            coverdata = db.cover(book_list[0]['calibre_id'], index_is_id=True)
            if coverdata:
                db.set_cover(book_id, coverdata)

            # ======================= custom columns ===================

            logger.debug("3:%s" % (time.time() - self.t))
            self.t = time.time()

            # have to get custom from db for each book.
            idslist = map(lambda x: x['calibre_id'], book_list)

            custom_columns = self.gui.library_view.model().custom_columns
            for col, action in prefs['custom_cols'].iteritems():
                #logger.debug("col: %s action: %s"%(col,action))

                if col not in custom_columns:
                    logger.debug("%s not an existing column, skipping." % col)
                    continue

                coldef = custom_columns[col]
                #logger.debug("coldef:%s"%coldef)

                if action not in permitted_values[coldef['datatype']]:
                    logger.debug(
                        "%s not a valid column type for %s, skipping." %
                        (col, action))
                    continue

                label = coldef['label']

                found = False
                value = None
                idx = None
                if action == 'first':
                    idx = 0

                if action == 'last':
                    idx = -1

                if action in ['first', 'last']:
                    value = db.get_custom(idslist[idx],
                                          label=label,
                                          index_is_id=True)
                    if coldef['datatype'] == 'series' and value != None:
                        # get the number-in-series, too.
                        value = "%s [%s]" % (
                            value,
                            db.get_custom_extra(
                                idslist[idx], label=label, index_is_id=True))
                    found = True

                if action in ('add', 'average', 'averageall'):
                    value = 0.0
                    count = 0
                    for bid in idslist:
                        try:
                            value += db.get_custom(bid,
                                                   label=label,
                                                   index_is_id=True)
                            found = True
                            # only count ones with values unless averageall
                            count += 1
                        except:
                            # if not set, it's None and fails.
                            # only count ones with values unless averageall
                            if action == 'averageall':
                                count += 1

                    if found and action in ('average', 'averageall'):
                        value = value / count

                    if coldef['datatype'] == 'int':
                        value += 0.5  # so int rounds instead of truncs.

                if action == 'and':
                    value = True
                    for bid in idslist:
                        try:
                            value = value and db.get_custom(
                                bid, label=label, index_is_id=True)
                            found = True
                        except:
                            # if not set, it's None and fails.
                            pass

                if action == 'or':
                    value = False
                    for bid in idslist:
                        try:
                            value = value or db.get_custom(
                                bid, label=label, index_is_id=True)
                            found = True
                        except:
                            # if not set, it's None and fails.
                            pass

                if action == 'newest':
                    value = None
                    for bid in idslist:
                        try:
                            ivalue = db.get_custom(bid,
                                                   label=label,
                                                   index_is_id=True)
                            if not value or ivalue > value:
                                value = ivalue
                                found = True
                        except:
                            # if not set, it's None and fails.
                            pass

                if action == 'oldest':
                    value = None
                    for bid in idslist:
                        try:
                            ivalue = db.get_custom(bid,
                                                   label=label,
                                                   index_is_id=True)
                            if not value or ivalue < value:
                                value = ivalue
                                found = True
                        except:
                            # if not set, it's None and fails.
                            pass

                if action == 'union':
                    if not coldef['is_multiple']:
                        action = 'concat'
                    else:
                        value = set()
                        for bid in idslist:
                            try:
                                value = value.union(
                                    db.get_custom(bid,
                                                  label=label,
                                                  index_is_id=True))
                                found = True
                            except:
                                # if not set, it's None and fails.
                                pass

                if action == 'concat':
                    value = ""
                    for bid in idslist:
                        try:
                            value = value + ' ' + db.get_custom(
                                bid, label=label, index_is_id=True)
                            found = True
                        except:
                            # if not set, it's None and fails.
                            pass
                    value = value.strip()

                if found and value != None:
                    db.set_custom(book_id, value, label=label, commit=False)

            db.commit()

            logger.debug("4:%s" % (time.time() - self.t))
            self.t = time.time()

            self.gui.library_view.model().books_added(1)
            self.gui.library_view.select_rows([book_id])

            logger.debug("5:%s" % (time.time() - self.t))
            self.t = time.time()

            confirm(
                '\n' +
                _('''The book for the new Merged EPUB has been created and default metadata filled in.

However, the EPUB will *not* be created until after you've reviewed, edited, and closed the metadata dialog that follows.'''
                  ), 'epubmerge_created_now_edit_again', self.gui)

            self.gui.iactions['Edit Metadata'].edit_metadata(False)

            logger.debug("5:%s" % (time.time() - self.t))
            self.t = time.time()
            self.gui.tags_view.recount()

            totalsize = sum(map(lambda x: x['epub_size'], book_list))

            logger.debug("merging %s EPUBs totaling %s" %
                         (len(book_list), gethumanreadable(totalsize)))
            if len(book_list) > 100 or totalsize > 5 * 1024 * 1024:
                confirm(
                    '\n' +
                    _('''You're merging %s EPUBs totaling %s.  Calibre will be locked until the merge is finished.'''
                      ) % (len(book_list), gethumanreadable(totalsize)),
                    'epubmerge_edited_now_merge_again', self.gui)

            self.gui.status_bar.show_message(
                _('Merging %s EPUBs...') % len(book_list), 60000)

            mi = db.get_metadata(book_id, index_is_id=True)

            mergedepub = PersistentTemporaryFile(suffix='.epub')
            epubstomerge = map(lambda x: x['epub'], book_list)

            coverjpgpath = None
            if mi.has_cover:
                # grab the path to the real image.
                coverjpgpath = os.path.join(db.library_path,
                                            db.path(book_id, index_is_id=True),
                                            'cover.jpg')

            self.do_merge(mergedepub,
                          epubstomerge,
                          authoropts=mi.authors,
                          titleopt=mi.title,
                          descopt=mi.comments,
                          tags=mi.tags,
                          languages=mi.languages,
                          titlenavpoints=prefs['titlenavpoints'],
                          flattentoc=prefs['flattentoc'],
                          printtimes=True,
                          coverjpgpath=coverjpgpath,
                          keepmetadatafiles=prefs['keepmeta'])

            logger.debug("6:%s" % (time.time() - self.t))
            logger.debug(_("Merge finished, output in:\n%s") % mergedepub.name)
            self.t = time.time()
            db.add_format_with_hooks(book_id,
                                     'EPUB',
                                     mergedepub,
                                     index_is_id=True)

            logger.debug("7:%s" % (time.time() - self.t))
            self.t = time.time()

            self.gui.status_bar.show_message(
                _('Finished merging %s EPUBs.') % len(book_list), 3000)
            self.gui.library_view.model().refresh_ids([book_id])
            self.gui.tags_view.recount()
            current = self.gui.library_view.currentIndex()
            self.gui.library_view.model().current_changed(
                current, self.previous)
Esempio n. 9
0
    def _start_merge(self,book_list,tdir=None):
        db=self.gui.current_db
        self.previous = self.gui.library_view.currentIndex()
        # if any bad, bail.
        bad_list = [ x for x in book_list if not x['good'] ]
        if len(bad_list) > 0:
            d = error_dialog(self.gui,
                             _('Cannot Merge Epubs'),
                             _('%s books failed.')%len(bad_list),
                             det_msg='\n'.join( [ x['error'] for x in bad_list ]))
            d.exec_()
        else:
            d = OrderEPUBsDialog(self.gui,
                                 _('Order EPUBs to Merge'),
                                 prefs,
                                 self.qaction.icon(),
                                 book_list,
                                 )
            d.exec_()
            if d.result() != d.Accepted:
                return

            book_list = d.get_books()

            logger.debug("2:%s"%(time.time()-self.t))
            self.t = time.time()

            deftitle = "%s %s" % (book_list[0]['title'],prefs['mergeword'])
            mi = MetaInformation(deftitle,["Temp Author"])

            # if all same series, use series for name.  But only if all.
            serieslist = [ x['series'] for x in book_list if x['series'] != None ]
            if len(serieslist) == len(book_list):
                mi.title = serieslist[0]
                for sr in serieslist:
                    if mi.title != sr:
                        mi.title = deftitle;
                        break

            # logger.debug("======================= mi.title:\n%s\n========================="%mi.title)

            mi.authors = list()
            authorslists = [ x['authors'] for x in book_list ]
            for l in authorslists:
                for a in l:
                    if a not in mi.authors:
                        mi.authors.append(a)
            #mi.authors = [item for sublist in authorslists for item in sublist]

            # logger.debug("======================= mi.authors:\n%s\n========================="%mi.authors)

            #mi.author_sort = ' & '.join([ x['author_sort'] for x in book_list ])

            # logger.debug("======================= mi.author_sort:\n%s\n========================="%mi.author_sort)

            # set publisher if all from same publisher.
            publishers = set([ x['publisher'] for x in book_list ])
            if len(publishers) == 1:
                mi.publisher = publishers.pop()

            # logger.debug("======================= mi.publisher:\n%s\n========================="%mi.publisher)

            tagslists = [ x['tags'] for x in book_list ]
            mi.tags = [item for sublist in tagslists for item in sublist]
            mi.tags.extend(prefs['mergetags'].split(','))

            # logger.debug("======================= mergetags:\n%s\n========================="%prefs['mergetags'])
            # logger.debug("======================= m.tags:\n%s\n========================="%mi.tags)

            languageslists = [ x['languages'] for x in book_list ]
            mi.languages = [item for sublist in languageslists for item in sublist]

            mi.series = ''
            if prefs['firstseries'] and book_list[0]['series']:
                mi.series = book_list[0]['series']
                mi.series_index = book_list[0]['series_index']

            # ======================= make book comments =========================

            if len(mi.authors) > 1:
                booktitle = lambda x : _("%s by %s") % (x['title'],' & '.join(x['authors']))
            else:
                booktitle = lambda x : x['title']

            mi.comments = ("<p>"+_("%s containing:")+"</p>") % prefs['mergeword']

            if prefs['includecomments']:
                def bookcomments(x):
                    if x['comments']:
                        return '<p><b>%s</b></p>%s'%(booktitle(x),x['comments'])
                    else:
                        return '<b>%s</b><br/>'%booktitle(x)

                mi.comments += ('<div class="mergedbook">' +
                                '<hr></div><div class="mergedbook">'.join([ bookcomments(x) for x in book_list]) +
                                '</div>')
            else:
                mi.comments += '<br/>'.join( [ booktitle(x) for x in book_list ] )

            # ======================= make book entry =========================

            book_id = db.create_book_entry(mi,
                                           add_duplicates=True)

            # set default cover to same as first book
            coverdata = db.cover(book_list[0]['calibre_id'],index_is_id=True)
            if coverdata:
                db.set_cover(book_id, coverdata)

            # ======================= custom columns ===================

            logger.debug("3:%s"%(time.time()-self.t))
            self.t = time.time()

            # have to get custom from db for each book.
            idslist = [ x['calibre_id'] for x in book_list ]

            custom_columns = self.gui.library_view.model().custom_columns
            for col, action in six.iteritems(prefs['custom_cols']):
                #logger.debug("col: %s action: %s"%(col,action))

                if col not in custom_columns:
                    logger.debug("%s not an existing column, skipping."%col)
                    continue

                coldef = custom_columns[col]
                #logger.debug("coldef:%s"%coldef)

                if action not in permitted_values[coldef['datatype']]:
                    logger.debug("%s not a valid column type for %s, skipping."%(col,action))
                    continue

                label = coldef['label']

                found = False
                value = None
                idx = None
                if action == 'first':
                    idx = 0

                if action == 'last':
                    idx = -1

                if action in ['first','last']:
                    value = db.get_custom(idslist[idx], label=label, index_is_id=True)
                    if coldef['datatype'] == 'series' and value != None:
                        # get the number-in-series, too.
                        value = "%s [%s]"%(value, db.get_custom_extra(idslist[idx], label=label, index_is_id=True))
                    found = True

                if action in ('add','average','averageall'):
                    value = 0.0
                    count = 0
                    for bid in idslist:
                        try:
                            value += db.get_custom(bid, label=label, index_is_id=True)
                            found = True
                            # only count ones with values unless averageall
                            count += 1
                        except:
                            # if not set, it's None and fails.
                            # only count ones with values unless averageall
                            if action == 'averageall':
                                count += 1

                    if found and action in ('average','averageall'):
                        value = value / count

                    if coldef['datatype'] == 'int':
                        value += 0.5 # so int rounds instead of truncs.

                if action == 'and':
                    value = True
                    for bid in idslist:
                        try:
                            value = value and db.get_custom(bid, label=label, index_is_id=True)
                            found = True
                        except:
                            # if not set, it's None and fails.
                            pass

                if action == 'or':
                    value = False
                    for bid in idslist:
                        try:
                            value = value or db.get_custom(bid, label=label, index_is_id=True)
                            found = True
                        except:
                            # if not set, it's None and fails.
                            pass

                if action == 'newest':
                    value = None
                    for bid in idslist:
                        try:
                            ivalue = db.get_custom(bid, label=label, index_is_id=True)
                            if not value or  ivalue > value:
                                value = ivalue
                                found = True
                        except:
                            # if not set, it's None and fails.
                            pass

                if action == 'oldest':
                    value = None
                    for bid in idslist:
                        try:
                            ivalue = db.get_custom(bid, label=label, index_is_id=True)
                            if not value or  ivalue < value:
                                value = ivalue
                                found = True
                        except:
                            # if not set, it's None and fails.
                            pass

                if action == 'union':
                    if not coldef['is_multiple']:
                        action = 'concat'
                    else:
                        value = set()
                        for bid in idslist:
                            try:
                                value = value.union(db.get_custom(bid, label=label, index_is_id=True))
                                found = True
                            except:
                                # if not set, it's None and fails.
                                pass

                if action == 'concat':
                    value = ""
                    for bid in idslist:
                        try:
                            value = value + ' ' + db.get_custom(bid, label=label, index_is_id=True)
                            found = True
                        except:
                            # if not set, it's None and fails.
                            pass
                    value = value.strip()

                if action == 'now':
                    value = datetime.now()
                    found = True
                    logger.debug("now: %s"%value)

                if found and value != None:
                    logger.debug("value: %s"%value)
                    db.set_custom(book_id,value,label=label,commit=False)

            db.commit()

            logger.debug("4:%s"%(time.time()-self.t))
            self.t = time.time()

            self.gui.library_view.model().books_added(1)
            self.gui.library_view.select_rows([book_id])

            logger.debug("5:%s"%(time.time()-self.t))
            self.t = time.time()

            confirm('\n'+_('''The book for the new Merged EPUB has been created and default metadata filled in.

However, the EPUB will *not* be created until after you've reviewed, edited, and closed the metadata dialog that follows.'''),
                    'epubmerge_created_now_edit_again',
                    self.gui,
                    title=_("EpubMerge"),
                    show_cancel_button=False)

            self.gui.iactions['Edit Metadata'].edit_metadata(False)

            logger.debug("5:%s"%(time.time()-self.t))
            self.t = time.time()
            self.gui.tags_view.recount()

            totalsize = sum([ x['epub_size'] for x in book_list ])
            logger.debug("merging %s EPUBs totaling %s"%(len(book_list),gethumanreadable(totalsize)))
            confirm('\n'+_('''EpubMerge will be done in a Background job.  The merged EPUB will not appear in the Library until finished.

You are merging %s EPUBs totaling %s.''')%(len(book_list),gethumanreadable(totalsize)),
                    'epubmerge_background_merge_again',
                    self.gui,
                    title=_("EpubMerge"),
                    show_cancel_button=False)

            # if len(book_list) > 100 or totalsize > 5*1024*1024:
            #     confirm('\n'+_('''You're merging %s EPUBs totaling %s.  Calibre will be locked until the merge is finished.''')%(len(book_list),gethumanreadable(totalsize)),
            #             'epubmerge_edited_now_merge_again',
            #             self.gui)

            self.gui.status_bar.show_message(_('Merging %s EPUBs...')%len(book_list), 60000)

            mi = db.get_metadata(book_id,index_is_id=True)

            mergedepub = PersistentTemporaryFile(prefix="output_",
                                                 suffix='.epub',
                                                 dir=tdir)
            epubstomerge = [ x['epub'] for x in book_list ]
            epubtitles = {}
            for x in book_list:
                # save titles indexed by epub for reporting from BG
                epubtitles[x['epub']]=_("%s by %s") % (x['title'],' & '.join(x['authors']))

            coverjpgpath = None
            if mi.has_cover:
                # grab the path to the real image.
                coverjpgpath = os.path.join(db.library_path, db.path(book_id, index_is_id=True), 'cover.jpg')


            func = 'arbitrary_n'
            cpus = self.gui.job_manager.server.pool_size
            args = ['calibre_plugins.epubmerge.jobs',
                    'do_merge_bg',
                    ({'book_id':book_id,
                      'book_count':len(book_list),
                      'tdir':tdir,
                      'outputepubfn':mergedepub.name,
                      'inputepubfns':epubstomerge, # already .name'ed
                      'epubtitles':epubtitles, # for reporting
                      'authoropts':mi.authors,
                      'titleopt':mi.title,
                      'descopt':mi.comments,
                      'tags':mi.tags,
                      'languages':mi.languages,
                      'titlenavpoints':prefs['titlenavpoints'],
                      'originalnavpoints':prefs['originalnavpoints'],
                      'flattentoc':prefs['flattentoc'],
                      'printtimes':True,
                      'coverjpgpath':coverjpgpath,
                      'keepmetadatafiles':prefs['keepmeta']
                      },
                     cpus)]
            desc = _('EpubMerge: %s')%mi.title
            job = self.gui.job_manager.run_job(
                self.Dispatcher(self.merge_done),
                func, args=args,
                description=desc)

            self.gui.jobs_pointer.start()
            self.gui.status_bar.show_message(_('Starting EpubMerge'),3000)
Esempio n. 10
0
    def _start_merge(self,book_list):
        db=self.gui.current_db
        self.previous = self.gui.library_view.currentIndex()
        # if any bad, bail.
        bad_list = filter(lambda x : not x['good'], book_list)
        if len(bad_list) > 0:
            d = error_dialog(self.gui,
                             _('Cannot Merge Epubs'),
                             _('%s books failed.')%len(bad_list),
                             det_msg='\n'.join(map(lambda x : x['error'] , bad_list)))
            d.exec_()
        else:
            d = OrderEPUBsDialog(self.gui,
                                 _('Order EPUBs to Merge'),
                                 prefs,
                                 self.qaction.icon(),
                                 book_list,
                                 )
            d.exec_()
            if d.result() != d.Accepted:
                return

            book_list = d.get_books()
            
            print("2:%s"%(time.time()-self.t))
            self.t = time.time()

            deftitle = "%s %s" % (book_list[0]['title'],prefs['mergeword'])
            mi = MetaInformation(deftitle,["Temp Author"])

            # if all same series, use series for name.  But only if all.
            serieslist = map(lambda x : x['series'], filter(lambda x : x['series'] != None, book_list))
            if len(serieslist) == len(book_list):
                mi.title = serieslist[0]
                for sr in serieslist:
                    if mi.title != sr:
                        mi.title = deftitle;
                        break
                
            # print("======================= mi.title:\n%s\n========================="%mi.title)

            mi.authors = list()
            authorslists = map(lambda x : x['authors'], book_list)
            for l in authorslists:
                for a in l:
                    if a not in mi.authors:
                        mi.authors.append(a)
            #mi.authors = [item for sublist in authorslists for item in sublist]

            # print("======================= mi.authors:\n%s\n========================="%mi.authors)
            
            #mi.author_sort = ' & '.join(map(lambda x : x['author_sort'], book_list))

            # print("======================= mi.author_sort:\n%s\n========================="%mi.author_sort)

            # set publisher if all from same publisher.
            publishers = set(map(lambda x : x['publisher'], book_list))
            if len(publishers) == 1:
                mi.publisher = publishers.pop()
            
            # print("======================= mi.publisher:\n%s\n========================="%mi.publisher)

            tagslists = map(lambda x : x['tags'], book_list)
            mi.tags = [item for sublist in tagslists for item in sublist]
            mi.tags.extend(prefs['mergetags'].split(','))

            # print("======================= mergetags:\n%s\n========================="%prefs['mergetags'])
            # print("======================= m.tags:\n%s\n========================="%mi.tags)
            
            languageslists = map(lambda x : x['languages'], book_list)
            mi.languages = [item for sublist in languageslists for item in sublist]

            mi.series = ''

            # ======================= make book comments =========================
            
            if len(mi.authors) > 1:
                booktitle = lambda x : _("%s by %s") % (x['title'],' & '.join(x['authors']))
            else:
                booktitle = lambda x : x['title']
                
            mi.comments = (_("%s containing:")+"\n\n") % prefs['mergeword']
            
            if prefs['includecomments']:
                def bookcomments(x):
                    if x['comments']:
                        return '<b>%s</b>\n\n%s'%(booktitle(x),x['comments'])
                    else:
                        return '<b>%s</b>\n'%booktitle(x)
                    
                mi.comments += ('<div class="mergedbook">' +
                                '<hr></div><div class="mergedbook">'.join([ bookcomments(x) for x in book_list]) +
                                '</div>')
            else:
                mi.comments += '\n'.join( [ booktitle(x) for x in book_list ] )
                
            # ======================= make book entry =========================

            book_id = db.create_book_entry(mi,
                                           add_duplicates=True)

            # set default cover to same as first book
            coverdata = db.cover(book_list[0]['calibre_id'],index_is_id=True)
            if coverdata:
                db.set_cover(book_id, coverdata)
            
            # ======================= custom columns ===================

            print("3:%s"%(time.time()-self.t))
            self.t = time.time()

            # have to get custom from db for each book.
            idslist = map(lambda x : x['calibre_id'], book_list)
            
            custom_columns = self.gui.library_view.model().custom_columns
            for col, action in prefs['custom_cols'].iteritems():
                #print("col: %s action: %s"%(col,action))
                
                if col not in custom_columns:
                    print("%s not an existing column, skipping."%col)
                    continue
                
                coldef = custom_columns[col]
                #print("coldef:%s"%coldef)
                
                if action not in permitted_values[coldef['datatype']]:
                    print("%s not a valid column type for %s, skipping."%(col,action))
                    continue
                
                label = coldef['label']

                found = False
                value = None
                idx = None
                if action == 'first':
                    idx = 0

                if action == 'last':
                    idx = -1

                if action in ['first','last']:
                    value = db.get_custom(idslist[idx], label=label, index_is_id=True)
                    if coldef['datatype'] == 'series' and value != None:
                        # get the number-in-series, too.
                        value = "%s [%s]"%(value, db.get_custom_extra(idslist[idx], label=label, index_is_id=True))
                    found = True

                if action in ('add','average','averageall'):
                    value = 0.0
                    count = 0
                    for bid in idslist:
                        try:
                            value += db.get_custom(bid, label=label, index_is_id=True)
                            found = True
                            # only count ones with values unless averageall
                            count += 1
                        except:
                            # if not set, it's None and fails.
                            # only count ones with values unless averageall
                            if action == 'averageall':
                                count += 1
                                
                    if found and action in ('average','averageall'):
                        value = value / count
                        
                    if coldef['datatype'] == 'int':
                        value += 0.5 # so int rounds instead of truncs.
                
                if action == 'and':
                    value = True
                    for bid in idslist:
                        try:
                            value = value and db.get_custom(bid, label=label, index_is_id=True)
                            found = True
                        except:
                            # if not set, it's None and fails.
                            pass
                
                if action == 'or':
                    value = False
                    for bid in idslist:
                        try:
                            value = value or db.get_custom(bid, label=label, index_is_id=True)
                            found = True
                        except:
                            # if not set, it's None and fails.
                            pass
                
                if action == 'newest':
                    value = None
                    for bid in idslist:
                        try:
                            ivalue = db.get_custom(bid, label=label, index_is_id=True)
                            if not value or  ivalue > value:
                                value = ivalue
                                found = True
                        except:
                            # if not set, it's None and fails.
                            pass
                    
                if action == 'oldest':
                    value = None
                    for bid in idslist:
                        try:
                            ivalue = db.get_custom(bid, label=label, index_is_id=True)
                            if not value or  ivalue < value:
                                value = ivalue
                                found = True
                        except:
                            # if not set, it's None and fails.
                            pass
                    
                if action == 'union':
                    if not coldef['is_multiple']:
                        action = 'concat'
                    else:
                        value = set()
                        for bid in idslist:
                            try:
                                value = value.union(db.get_custom(bid, label=label, index_is_id=True))
                                found = True
                            except:
                                # if not set, it's None and fails.
                                pass
                        
                if action == 'concat':
                    value = ""
                    for bid in idslist:
                        try:
                            value = value + ' ' + db.get_custom(bid, label=label, index_is_id=True)
                            found = True
                        except:
                            # if not set, it's None and fails.
                            pass
                    value = value.strip()
                    
                if found and value != None:
                    db.set_custom(book_id,value,label=label,commit=False)
                
            db.commit()
            
            print("4:%s"%(time.time()-self.t))
            self.t = time.time()
            
            self.gui.library_view.model().books_added(1)
            self.gui.library_view.select_rows([book_id])
            
            print("5:%s"%(time.time()-self.t))
            self.t = time.time()
            
            confirm('\n'+_('''The book for the new Merged EPUB has been created and default metadata filled in.

However, the EPUB will *not* be created until after you've reviewed, edited, and closed the metadata dialog that follows.'''),
                    'epubmerge_created_now_edit_again',
                    self.gui)
            
            self.gui.iactions['Edit Metadata'].edit_metadata(False)

            print("5:%s"%(time.time()-self.t))
            self.t = time.time()
            self.gui.tags_view.recount()

            totalsize = sum(map(lambda x : x['epub_size'], book_list))

            print("merging %s EPUBs totaling %s"%(len(book_list),gethumanreadable(totalsize)))
            if len(book_list) > 100 or totalsize > 5*1024*1024:
                confirm('\n'+_('''You're merging %s EPUBs totaling %s.  Calibre will be locked until the merge is finished.''')%(len(book_list),gethumanreadable(totalsize)),
                        'epubmerge_edited_now_merge_again',
                        self.gui)
            
            self.gui.status_bar.show_message(_('Merging %s EPUBs...')%len(book_list), 60000)

            mi = db.get_metadata(book_id,index_is_id=True)
            
            mergedepub = PersistentTemporaryFile(suffix='.epub')
            epubstomerge = map(lambda x : x['epub'] , book_list)
            
            coverjpgpath = None
            if mi.has_cover:
                # grab the path to the real image.
                coverjpgpath = os.path.join(db.library_path, db.path(book_id, index_is_id=True), 'cover.jpg')
                
            self.do_merge( mergedepub,
                           epubstomerge,
                           authoropts=mi.authors,
                           titleopt=mi.title,
                           descopt=mi.comments,
                           tags=mi.tags,
                           languages=mi.languages,
                           titlenavpoints=prefs['titlenavpoints'],
                           flattentoc=prefs['flattentoc'],
                           printtimes=True,
                           coverjpgpath=coverjpgpath,
                           keepmetadatafiles=prefs['keepmeta'] )
                 
            print("6:%s"%(time.time()-self.t))
            print(_("Merge finished, output in:\n%s")%mergedepub.name)
            self.t = time.time()
            db.add_format_with_hooks(book_id,
                                     'EPUB',
                                     mergedepub, index_is_id=True)
            
            print("7:%s"%(time.time()-self.t))
            self.t = time.time()
            
            self.gui.status_bar.show_message(_('Finished merging %s EPUBs.')%len(book_list), 3000)
            self.gui.library_view.model().refresh_ids([book_id])
            self.gui.tags_view.recount()
            current = self.gui.library_view.currentIndex()
            self.gui.library_view.model().current_changed(current, self.previous)
Esempio n. 11
0
def get_metadata(stream, extract_cover=True):
    whitespace = re.compile(r'\s+')

    def normalize(s):
        return whitespace.sub(' ', s).strip()

    with ZipFile(stream) as zf:
        meta = zf.read('meta.xml')
        root = fromstring(meta)

        def find(field):
            ns, tag = fields[field]
            ans = root.xpath('//ns0:{}'.format(tag), namespaces={'ns0': ns})
            if ans:
                return normalize(
                    tostring(ans[0],
                             method='text',
                             encoding='unicode',
                             with_tail=False)).strip()

        mi = MetaInformation(None, [])
        title = find('title')
        if title:
            mi.title = title
        creator = find('initial-creator') or find('creator')
        if creator:
            mi.authors = string_to_authors(creator)
        desc = find('description')
        if desc:
            mi.comments = desc
        lang = find('language')
        if lang and canonicalize_lang(lang):
            mi.languages = [canonicalize_lang(lang)]
        kw = find('keyword') or find('keywords')
        if kw:
            mi.tags = [x.strip() for x in kw.split(',') if x.strip()]
        data = {}
        for tag in root.xpath('//ns0:user-defined',
                              namespaces={'ns0': fields['user-defined'][0]}):
            name = (tag.get('{%s}name' % METANS) or '').lower()
            vtype = tag.get('{%s}value-type' % METANS) or 'string'
            val = tag.text
            if name and val:
                if vtype == 'boolean':
                    val = val == 'true'
                data[name] = val
        opfmeta = False  # we need this later for the cover
        opfnocover = False
        if data.get('opf.metadata'):
            # custom metadata contains OPF information
            opfmeta = True
            if data.get('opf.titlesort', ''):
                mi.title_sort = data['opf.titlesort']
            if data.get('opf.authors', ''):
                mi.authors = string_to_authors(data['opf.authors'])
            if data.get('opf.authorsort', ''):
                mi.author_sort = data['opf.authorsort']
            if data.get('opf.isbn', ''):
                isbn = check_isbn(data['opf.isbn'])
                if isbn is not None:
                    mi.isbn = isbn
            if data.get('opf.publisher', ''):
                mi.publisher = data['opf.publisher']
            if data.get('opf.pubdate', ''):
                mi.pubdate = parse_date(data['opf.pubdate'], assume_utc=True)
            if data.get('opf.identifiers'):
                try:
                    mi.identifiers = json.loads(data['opf.identifiers'])
                except Exception:
                    pass
            if data.get('opf.rating'):
                try:
                    mi.rating = max(0, min(float(data['opf.rating']), 10))
                except Exception:
                    pass
            if data.get('opf.series', ''):
                mi.series = data['opf.series']
                if data.get('opf.seriesindex', ''):
                    try:
                        mi.series_index = float(data['opf.seriesindex'])
                    except Exception:
                        mi.series_index = 1.0
            if data.get('opf.language', ''):
                cl = canonicalize_lang(data['opf.language'])
                if cl:
                    mi.languages = [cl]
            opfnocover = data.get('opf.nocover', False)
        if not opfnocover:
            try:
                read_cover(stream, zf, mi, opfmeta, extract_cover)
            except Exception:
                pass  # Do not let an error reading the cover prevent reading other data

    return mi
Esempio n. 12
0
    def _do_split(self,
                  db,
                  source_id,
                  misource,
                  splitepub,
                  origlines,
                  newspecs,
                  deftitle=None):

        linenums, changedtocs, checkedalways = newspecs
        # logger.debug("updated tocs:%s"%changedtocs)
        if not self.has_lines(linenums):
            return
        #logger.debug("2:%s"%(time.time()-self.t))
        self.t = time.time()

        #logger.debug("linenums:%s"%linenums)

        defauthors = None

        if not deftitle and prefs['copytoctitle']:
            if linenums[0] in changedtocs:
                deftitle=changedtocs[linenums[0]][0] # already unicoded()'ed
            elif len(origlines[linenums[0]]['toc']) > 0:
                deftitle=unicode(origlines[linenums[0]]['toc'][0])
            #logger.debug("deftitle:%s"%deftitle)

        if not deftitle and prefs['copytitle']:
            deftitle = _("%s Split") % misource.title

        if prefs['copyauthors']:
            defauthors = misource.authors

        mi = MetaInformation(deftitle,defauthors)

        if prefs['copytags']:
            mi.tags = misource.tags # [item for sublist in tagslists for item in sublist]

        if prefs['copylanguages']:
            mi.languages = misource.languages

        if prefs['copyseries']:
            mi.series = misource.series

        if prefs['copydate']:
            mi.timestamp = misource.timestamp

        if prefs['copyrating']:
            mi.rating = misource.rating

        if prefs['copypubdate']:
            mi.pubdate = misource.pubdate

        if prefs['copypublisher']:
            mi.publisher = misource.publisher

        if prefs['copyidentifiers']:
            mi.set_identifiers(misource.get_identifiers())

        if prefs['copycomments'] and misource.comments:
            mi.comments = "<p>"+_("Split from:")+"</p>" + misource.comments

        #logger.debug("mi:%s"%mi)
        book_id = db.create_book_entry(mi,
                                       add_duplicates=True)

        if prefs['copycover'] and misource.has_cover:
            db.set_cover(book_id, db.cover(source_id,index_is_id=True))

        #logger.debug("3:%s"%(time.time()-self.t))
        self.t = time.time()

        custom_columns = self.gui.library_view.model().custom_columns
        for col, action in prefs['custom_cols'].iteritems():
            #logger.debug("col: %s action: %s"%(col,action))

            if col not in custom_columns:
                #logger.debug("%s not an existing column, skipping."%col)
                continue

            coldef = custom_columns[col]
            #logger.debug("coldef:%s"%coldef)
            label = coldef['label']
            value = db.get_custom(source_id, label=label, index_is_id=True)
            if value:
                db.set_custom(book_id,value,label=label,commit=False)

        #logger.debug("3.5:%s"%(time.time()-self.t))
        self.t = time.time()

        if prefs['sourcecol'] != '' and prefs['sourcecol'] in custom_columns \
                and prefs['sourcetemplate']:
            val = SafeFormat().safe_format(prefs['sourcetemplate'], misource, 'EpubSplit Source Template Error', misource)
            #logger.debug("Attempting to set %s to %s"%(prefs['sourcecol'],val))
            label = custom_columns[prefs['sourcecol']]['label']
            if custom_columns[prefs['sourcecol']]['datatype'] == 'series':
                val = val + (" [%s]"%self.book_count)
            db.set_custom(book_id, val, label=label, commit=False)
        self.book_count = self.book_count+1
        db.commit()

        #logger.debug("4:%s"%(time.time()-self.t))
        self.t = time.time()

        self.gui.library_view.model().books_added(1)
        self.gui.library_view.select_rows([book_id])

        #logger.debug("5:%s"%(time.time()-self.t))
        self.t = time.time()

        editconfig_txt = _('You can enable or disable Edit Metadata in Preferences > Plugins > EpubSplit.')
        if prefs['editmetadata']:
            confirm('\n'+_('''The book for the new Split EPUB has been created and default metadata filled in.

However, the EPUB will *not* be created until after you've reviewed, edited, and closed the metadata dialog that follows.

You can fill in the metadata yourself, or use download metadata for known books.

If you download or add a cover image, it will be included in the generated EPUB.''')+'\n\n'+
                    editconfig_txt+'\n',
                    'epubsplit_created_now_edit_again',
                    self.gui)
            self.gui.iactions['Edit Metadata'].edit_metadata(False)

        try:
            QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
            #logger.debug("5:%s"%(time.time()-self.t))
            self.t = time.time()
            self.gui.tags_view.recount()

            self.gui.status_bar.show_message(_('Splitting off from EPUB...'), 60000)

            mi = db.get_metadata(book_id,index_is_id=True)

            outputepub = PersistentTemporaryFile(suffix='.epub')

            coverjpgpath = None
            if mi.has_cover:
                # grab the path to the real image.
                coverjpgpath = os.path.join(db.library_path, db.path(book_id, index_is_id=True), 'cover.jpg')

            outlist = list(set(linenums + checkedalways))
            outlist.sort()
            splitepub.write_split_epub(outputepub,
                                       outlist,
                                       changedtocs=changedtocs,
                                       authoropts=mi.authors,
                                       titleopt=mi.title,
                                       descopt=mi.comments,
                                       tags=mi.tags,
                                       languages=mi.languages,
                                       coverjpgpath=coverjpgpath)

            #logger.debug("6:%s"%(time.time()-self.t))
            self.t = time.time()
            db.add_format_with_hooks(book_id,
                                     'EPUB',
                                     outputepub, index_is_id=True)

            #logger.debug("7:%s"%(time.time()-self.t))
            self.t = time.time()

            self.gui.status_bar.show_message(_('Finished splitting off EPUB.'), 3000)
            self.gui.library_view.model().refresh_ids([book_id])
            self.gui.tags_view.recount()
            current = self.gui.library_view.currentIndex()
            self.gui.library_view.model().current_changed(current, self.previous)
        finally:
            QApplication.restoreOverrideCursor()

        if not prefs['editmetadata']:
            confirm('<p>'+
                    '</p><p>'.join([_('<b><u>%s</u> by %s</b> has been created and default metadata filled in.')%(mi.title,', '.join(mi.authors)),
                                   _('EpubSplit now skips the Edit Metadata step by default.'),
                                   editconfig_txt])+
                    '</p>',
                    'epubsplit_created_now_no_edit_again',
                    self.gui)
Esempio n. 13
0
    def _do_split(self,
                  db,
                  source_id,
                  misource,
                  splitepub,
                  newspecs,
                  deftitle=None,
                  editmeta=True):

        linenums, changedtocs = newspecs
        # logger.debug("updated tocs:%s"%changedtocs)

        # logger.debug("2:%s"%(time.time()-self.t))
        self.t = time.time()

        # logger.debug("linenums:%s"%linenums)

        defauthors = None

        if not deftitle and prefs['copytitle']:
            deftitle = _("نمونه %s") % misource.title

        if prefs['copyauthors']:
            defauthors = misource.authors

        mi = MetaInformation(deftitle, defauthors)

        if prefs['copytags']:
            mi.tags = misource.tags  # [item for sublist in tagslists for item in sublist]

        if prefs['copylanguages']:
            mi.languages = misource.languages

        if prefs['copyseries']:
            mi.series = misource.series

        if prefs['copydate']:
            mi.timestamp = misource.timestamp

        if prefs['copyrating']:
            mi.rating = misource.rating

        if prefs['copypubdate']:
            mi.pubdate = misource.pubdate

        if prefs['copypublisher']:
            mi.publisher = misource.publisher

        if prefs['copyidentifiers']:
            mi.set_identifiers(misource.get_identifiers())

        if prefs['copycomments'] and misource.comments:
            mi.comments = _("Split from:") + "\n\n" + misource.comments

        # logger.debug("mi:%s"%mi)
        book_id = db.create_book_entry(mi,
                                       add_duplicates=True)

        if prefs['copycover'] and misource.has_cover:
            db.set_cover(book_id, db.cover(source_id, index_is_id=True))

        # logger.debug("3:%s"%(time.time()-self.t))
        self.t = time.time()

        custom_columns = self.gui.library_view.model().custom_columns
        for col, action in prefs['custom_cols'].iteritems():
            # logger.debug("col: %s action: %s"%(col,action))

            if col not in custom_columns:
                # logger.debug("%s not an existing column, skipping."%col)
                continue

            coldef = custom_columns[col]
            # logger.debug("coldef:%s"%coldef)
            label = coldef['label']
            value = db.get_custom(source_id, label=label, index_is_id=True)
            if value:
                db.set_custom(book_id, value, label=label, commit=False)

        # logger.debug("3.5:%s"%(time.time()-self.t))
        self.t = time.time()

        if prefs['sourcecol'] != '' and prefs['sourcecol'] in custom_columns \
                and prefs['sourcetemplate']:
            val = SafeFormat().safe_format(prefs['sourcetemplate'], misource, 'EpubSplit Source Template Error',
                                           misource)
            # logger.debug("Attempting to set %s to %s"%(prefs['sourcecol'],val))
            label = custom_columns[prefs['sourcecol']]['label']
            db.set_custom(book_id, val, label=label, commit=False)

        db.commit()

        # logger.debug("4:%s"%(time.time()-self.t))
        self.t = time.time()

        self.gui.library_view.model().books_added(1)
        self.gui.library_view.select_rows([book_id])

        # logger.debug("5:%s"%(time.time()-self.t))
        self.t = time.time()

        # if editmeta:
        #     confirm('\n'+_('کتاب نمونه ساخته شود؟')+'\n',
        #             'epubsplit_created_now_edit_again',
        #             self.gui)
        #
        #     self.gui.iactions['Edit Metadata'].edit_metadata(False)

        # logger.debug("5:%s"%(time.time()-self.t))
        self.t = time.time()
        self.gui.tags_view.recount()

        self.gui.status_bar.show_message(_('فایل نمونه ساخته شد'), 60000)

        mi = db.get_metadata(book_id, index_is_id=True)

        outputepub = PersistentTemporaryFile(suffix='.epub')

        coverjpgpath = None
        # if mi.has_cover:
        #     # grab the path to the real image.
        #     coverjpgpath = os.path.join(db.library_path, db.path(book_id, index_is_id=True), 'cover.jpg')

        splitepub.write_split_epub(outputepub,
                                   linenums,
                                   changedtocs=changedtocs,
                                   authoropts=mi.authors,
                                   titleopt=mi.title,
                                   descopt=mi.comments,
                                   tags=mi.tags,
                                   languages=mi.languages,
                                   coverjpgpath=coverjpgpath)

        # logger.debug("6:%s"%(time.time()-self.t))
        self.t = time.time()
        db.add_format_with_hooks(book_id,
                                 'EPUB',
                                 outputepub, index_is_id=True)

        # logger.debug("7:%s"%(time.time()-self.t))
        self.t = time.time()

        self.gui.status_bar.show_message(_('Finished splitting off EPUB.'), 3000)
        self.gui.library_view.model().refresh_ids([book_id])
        self.gui.tags_view.recount()
        current = self.gui.library_view.currentIndex()
        self.gui.library_view.model().current_changed(current, self.previous)
Esempio n. 14
0
    def _do_split(self,
                  db,
                  source_id,
                  misource,
                  splitepub,
                  origlines,
                  newspecs,
                  deftitle=None):

        linenums, changedtocs, checkedalways = newspecs
        # logger.debug("updated tocs:%s"%changedtocs)
        if not self.has_lines(linenums):
            return
        #logger.debug("2:%s"%(time.time()-self.t))
        self.t = time.time()

        #logger.debug("linenums:%s"%linenums)

        defauthors = None

        if not deftitle and prefs['copytoctitle']:
            if linenums[0] in changedtocs:
                deftitle = changedtocs[linenums[0]][0]  # already unicoded()'ed
            elif len(origlines[linenums[0]]['toc']) > 0:
                deftitle = unicode(origlines[linenums[0]]['toc'][0])
            #logger.debug("deftitle:%s"%deftitle)

        if not deftitle and prefs['copytitle']:
            deftitle = _("%s Split") % misource.title

        if prefs['copyauthors']:
            defauthors = misource.authors

        mi = MetaInformation(deftitle, defauthors)

        if prefs['copytags']:
            mi.tags = misource.tags  # [item for sublist in tagslists for item in sublist]

        if prefs['copylanguages']:
            mi.languages = misource.languages

        if prefs['copyseries']:
            mi.series = misource.series

        if prefs['copydate']:
            mi.timestamp = misource.timestamp

        if prefs['copyrating']:
            mi.rating = misource.rating

        if prefs['copypubdate']:
            mi.pubdate = misource.pubdate

        if prefs['copypublisher']:
            mi.publisher = misource.publisher

        if prefs['copyidentifiers']:
            mi.set_identifiers(misource.get_identifiers())

        if prefs['copycomments'] and misource.comments:
            mi.comments = "<p>" + _("Split from:") + "</p>" + misource.comments

        #logger.debug("mi:%s"%mi)
        book_id = db.create_book_entry(mi, add_duplicates=True)

        if prefs['copycover'] and misource.has_cover:
            db.set_cover(book_id, db.cover(source_id, index_is_id=True))

        #logger.debug("3:%s"%(time.time()-self.t))
        self.t = time.time()

        custom_columns = self.gui.library_view.model().custom_columns
        for col, action in six.iteritems(prefs['custom_cols']):
            #logger.debug("col: %s action: %s"%(col,action))

            if col not in custom_columns:
                #logger.debug("%s not an existing column, skipping."%col)
                continue

            coldef = custom_columns[col]
            #logger.debug("coldef:%s"%coldef)
            label = coldef['label']
            value = db.get_custom(source_id, label=label, index_is_id=True)
            if value:
                db.set_custom(book_id, value, label=label, commit=False)

        #logger.debug("3.5:%s"%(time.time()-self.t))
        self.t = time.time()

        if prefs['sourcecol'] != '' and prefs['sourcecol'] in custom_columns \
                and prefs['sourcetemplate']:
            val = SafeFormat().safe_format(prefs['sourcetemplate'], misource,
                                           'EpubSplit Source Template Error',
                                           misource)
            #logger.debug("Attempting to set %s to %s"%(prefs['sourcecol'],val))
            label = custom_columns[prefs['sourcecol']]['label']
            if custom_columns[prefs['sourcecol']]['datatype'] == 'series':
                val = val + (" [%s]" % self.book_count)
            db.set_custom(book_id, val, label=label, commit=False)
        self.book_count = self.book_count + 1
        db.commit()

        #logger.debug("4:%s"%(time.time()-self.t))
        self.t = time.time()

        self.gui.library_view.model().books_added(1)
        self.gui.library_view.select_rows([book_id])

        #logger.debug("5:%s"%(time.time()-self.t))
        self.t = time.time()

        editconfig_txt = _(
            'You can enable or disable Edit Metadata in Preferences > Plugins > EpubSplit.'
        )
        if prefs['editmetadata']:
            confirm(
                '\n' +
                _('''The book for the new Split EPUB has been created and default metadata filled in.

However, the EPUB will *not* be created until after you've reviewed, edited, and closed the metadata dialog that follows.

You can fill in the metadata yourself, or use download metadata for known books.

If you download or add a cover image, it will be included in the generated EPUB.'''
                  ) + '\n\n' + editconfig_txt + '\n',
                'epubsplit_created_now_edit_again', self.gui)
            self.gui.iactions['Edit Metadata'].edit_metadata(False)

        try:
            QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
            #logger.debug("5:%s"%(time.time()-self.t))
            self.t = time.time()
            self.gui.tags_view.recount()

            self.gui.status_bar.show_message(_('Splitting off from EPUB...'),
                                             60000)

            mi = db.get_metadata(book_id, index_is_id=True)

            outputepub = PersistentTemporaryFile(suffix='.epub')

            coverjpgpath = None
            if mi.has_cover:
                # grab the path to the real image.
                coverjpgpath = os.path.join(db.library_path,
                                            db.path(book_id, index_is_id=True),
                                            'cover.jpg')

            outlist = list(set(linenums + checkedalways))
            outlist.sort()
            splitepub.write_split_epub(outputepub,
                                       outlist,
                                       changedtocs=changedtocs,
                                       authoropts=mi.authors,
                                       titleopt=mi.title,
                                       descopt=mi.comments,
                                       tags=mi.tags,
                                       languages=mi.languages,
                                       coverjpgpath=coverjpgpath)

            #logger.debug("6:%s"%(time.time()-self.t))
            self.t = time.time()
            db.add_format_with_hooks(book_id,
                                     'EPUB',
                                     outputepub,
                                     index_is_id=True)

            #logger.debug("7:%s"%(time.time()-self.t))
            self.t = time.time()

            self.gui.status_bar.show_message(_('Finished splitting off EPUB.'),
                                             3000)
            self.gui.library_view.model().refresh_ids([book_id])
            self.gui.tags_view.recount()
            current = self.gui.library_view.currentIndex()
            self.gui.library_view.model().current_changed(
                current, self.previous)
            if self.gui.cover_flow:
                self.gui.cover_flow.dataChanged()
        finally:
            QApplication.restoreOverrideCursor()

        if not prefs['editmetadata']:
            confirm(
                '<p>' + '</p><p>'.join([
                    _('<b><u>%s</u> by %s</b> has been created and default metadata filled in.'
                      ) % (mi.title, ', '.join(mi.authors)),
                    _('EpubSplit now skips the Edit Metadata step by default.'
                      ), editconfig_txt
                ]) + '</p>', 'epubsplit_created_now_no_edit_again', self.gui)
Esempio n. 15
0
def set_metadata(stream, mi, apply_null=False, update_timestamp=False):
    stream.seek(0)
    reader = OCFZipReader(stream, root=os.getcwdu())
    raster_cover = reader.opf.raster_cover
    mi = MetaInformation(mi)
    new_cdata = None
    replacements = {}
    try:
        new_cdata = mi.cover_data[1]
        if not new_cdata:
            raise Exception('no cover')
    except:
        try:
            new_cdata = open(mi.cover, 'rb').read()
        except:
            pass
    new_cover = cpath = None
    if new_cdata and raster_cover:
        try:
            cpath = posixpath.join(posixpath.dirname(reader.opf_path),
                    raster_cover)
            cover_replacable = not reader.encryption_meta.is_encrypted(cpath) and \
                    os.path.splitext(cpath)[1].lower() in ('.png', '.jpg', '.jpeg')
            if cover_replacable:
                new_cover = _write_new_cover(new_cdata, cpath)
                replacements[cpath] = open(new_cover.name, 'rb')
        except:
            import traceback
            traceback.print_exc()

    for x in ('guide', 'toc', 'manifest', 'spine'):
        setattr(mi, x, None)
    if mi.languages:
        langs = []
        for lc in mi.languages:
            lc2 = lang_as_iso639_1(lc)
            if lc2: lc = lc2
            langs.append(lc)
        mi.languages = langs


    reader.opf.smart_update(mi)
    if apply_null:
        if not getattr(mi, 'series', None):
            reader.opf.series = None
        if not getattr(mi, 'tags', []):
            reader.opf.tags = []
        if not getattr(mi, 'isbn', None):
            reader.opf.isbn = None
    if update_timestamp and mi.timestamp is not None:
        reader.opf.timestamp = mi.timestamp

    newopf = StringIO(reader.opf.render())
    safe_replace(stream, reader.container[OPF.MIMETYPE], newopf,
            extra_replacements=replacements)
    try:
        if cpath is not None:
            replacements[cpath].close()
            os.remove(replacements[cpath].name)
    except:
        pass