Beispiel #1
0
    def process_result(self, group_id, result):
        if result.err:
            mi = self.report_metadata_failure(group_id, result.traceback)
            paths = self.file_groups[group_id]
            has_cover = False
            duplicate_info = set() if self.add_formats_to_existing else False
        else:
            paths, opf, has_cover, duplicate_info = result.value
            try:
                mi = OPF(BytesIO(opf), basedir=self.tdir, populate_spine=False, try_to_guess_cover=False).to_book_metadata()
                mi.read_metadata_failed = False
            except Exception:
                mi = self.report_metadata_failure(group_id, traceback.format_exc())

        if mi.is_null('title'):
            for path in paths:
                mi.title = os.path.splitext(os.path.basename(path))[0]
                break
        if mi.application_id == '__calibre_dummy__':
            mi.application_id = None
        if gprefs.get('tag_map_on_add_rules'):
            from calibre.ebooks.metadata.tag_mapper import map_tags
            mi.tags = map_tags(mi.tags, gprefs['tag_map_on_add_rules'])
        if self.author_map_rules:
            from calibre.ebooks.metadata.author_mapper import map_authors
            new_authors = map_authors(mi.authors, self.author_map_rules)
            if new_authors != mi.authors:
                mi.authors = new_authors
                if self.db is None:
                    mi.author_sort = authors_to_sort_string(mi.authors)
                else:
                    mi.author_sort = self.db.author_sort_from_authors(mi.authors)

        self.pd.msg = mi.title

        cover_path = os.path.join(self.tdir, '%s.cdata' % group_id) if has_cover else None

        if self.db is None:
            if paths:
                self.items.append((mi, cover_path, paths))
            return

        if self.add_formats_to_existing:
            identical_book_ids = find_identical_books(mi, self.find_identical_books_data)
            if identical_book_ids:
                try:
                    self.merge_books(mi, cover_path, paths, identical_book_ids)
                except Exception:
                    a = self.report.append
                    a(''), a('-' * 70)
                    a(_('Failed to merge the book: ') + mi.title)
                    [a('\t' + f) for f in paths]
                    a(_('With error:')), a(traceback.format_exc())
            else:
                self.add_book(mi, cover_path, paths)
        else:
            if duplicate_info or icu_lower(mi.title or _('Unknown')) in self.added_duplicate_info:
                self.duplicates.append((mi, cover_path, paths))
            else:
                self.add_book(mi, cover_path, paths)
Beispiel #2
0
    def process_result(self, group_id, result):
        if result.err:
            mi = self.report_metadata_failure(group_id, result.traceback)
            paths = self.file_groups[group_id]
            has_cover = False
            duplicate_info = set() if self.add_formats_to_existing else False
        else:
            paths, opf, has_cover, duplicate_info = result.value
            try:
                mi = OPF(BytesIO(opf), basedir=self.tdir, populate_spine=False, try_to_guess_cover=False).to_book_metadata()
                mi.read_metadata_failed = False
            except Exception:
                mi = self.report_metadata_failure(group_id, traceback.format_exc())

        if mi.is_null('title'):
            for path in paths:
                mi.title = os.path.splitext(os.path.basename(path))[0]
                break
        if mi.application_id == '__calibre_dummy__':
            mi.application_id = None
        if gprefs.get('tag_map_on_add_rules'):
            from calibre.ebooks.metadata.tag_mapper import map_tags
            mi.tags = map_tags(mi.tags, gprefs['tag_map_on_add_rules'])
        if self.author_map_rules:
            from calibre.ebooks.metadata.author_mapper import map_authors
            new_authors = map_authors(mi.authors, self.author_map_rules)
            if new_authors != mi.authors:
                mi.authors = new_authors
                if self.db is None:
                    mi.author_sort = authors_to_sort_string(mi.authors)
                else:
                    mi.author_sort = self.db.author_sort_from_authors(mi.authors)

        self.pd.msg = mi.title

        cover_path = os.path.join(self.tdir, '%s.cdata' % group_id) if has_cover else None

        if self.db is None:
            if paths:
                self.items.append((mi, cover_path, paths))
            return

        if self.add_formats_to_existing:
            identical_book_ids = find_identical_books(mi, self.find_identical_books_data)
            if identical_book_ids:
                try:
                    self.merge_books(mi, cover_path, paths, identical_book_ids)
                except Exception:
                    a = self.report.append
                    a(''), a('-' * 70)
                    a(_('Failed to merge the book: ') + mi.title)
                    [a('\t' + f) for f in paths]
                    a(_('With error:')), a(traceback.format_exc())
            else:
                self.add_book(mi, cover_path, paths)
        else:
            if duplicate_info or icu_lower(mi.title or _('Unknown')) in self.added_duplicate_info:
                self.duplicates.append((mi, cover_path, paths))
            else:
                self.add_book(mi, cover_path, paths)
Beispiel #3
0
    def do_add(self, data):
        from calibre.ebooks.metadata.opf2 import OPF

        gui = self.parent()
        if gui is None:
            return
        m = gui.library_view.model()
        count = 0

        needs_rescan = False
        duplicates = []
        added_ids = set()

        for fname, tdir in data:
            path_to_remove = os.path.join(self.worker.path, fname)
            paths = [path_to_remove]
            fpath = os.path.join(tdir, 'file_changed_by_plugins')
            if os.path.exists(fpath):
                with open(fpath) as f:
                    paths[0] = f.read()
            sz = os.path.join(tdir, 'size.txt')
            try:
                with open(sz, 'rb') as f:
                    sz = int(f.read())
                if sz != os.stat(paths[0]).st_size:
                    raise Exception('Looks like the file was written to after'
                            ' we tried to read metadata')
            except:
                needs_rescan = True
                try:
                    self.worker.staging.remove(fname)
                except KeyError:
                    pass

                continue

            mi = os.path.join(tdir, 'metadata.opf')
            if not os.access(mi, os.R_OK):
                continue
            mi = OPF(open(mi, 'rb'), tdir, populate_spine=False).to_book_metadata()
            if gprefs.get('tag_map_on_add_rules'):
                from calibre.ebooks.metadata.tag_mapper import map_tags
                mi.tags = map_tags(mi.tags, gprefs['tag_map_on_add_rules'])
            if gprefs.get('author_map_on_add_rules'):
                from calibre.ebooks.metadata.author_mapper import (
                    compile_rules, map_authors
                )
                new_authors = map_authors(mi.authors, compile_rules(gprefs['author_map_on_add_rules']))
                if new_authors != mi.authors:
                    mi.authors = new_authors
                    mi.author_sort = gui.current_db.new_api.author_sort_from_authors(mi.authors)
            mi = [mi]
            dups, ids = m.add_books(paths,
                    [os.path.splitext(fname)[1][1:].upper()], mi,
                    add_duplicates=not gprefs['auto_add_check_for_duplicates'],
                    return_ids=True)
            added_ids |= set(ids)
            num = len(ids)
            if dups:
                path = dups[0][0]
                with open(os.path.join(tdir, 'dup_cache.'+dups[1][0].lower()),
                        'wb') as dest, open(path, 'rb') as src:
                    shutil.copyfileobj(src, dest)
                    dups[0][0] = dest.name
                duplicates.append(dups)

            try:
                os.remove(path_to_remove)
                self.worker.staging.remove(fname)
            except:
                import traceback
                traceback.print_exc()
            count += num

        if duplicates:
            paths, formats, metadata = [], [], []
            for p, f, mis in duplicates:
                paths.extend(p)
                formats.extend(f)
                metadata.extend(mis)
            dups = [(mic, mic.cover, [p]) for mic, p in zip(metadata, paths)]
            d = DuplicatesQuestion(m.db, dups, parent=gui)
            dups = tuple(d.duplicates)
            if dups:
                paths, formats, metadata = [], [], []
                for mi, cover, book_paths in dups:
                    paths.extend(book_paths)
                    formats.extend([p.rpartition('.')[-1] for p in book_paths])
                    metadata.extend([mi for i in book_paths])
                ids = m.add_books(paths, formats, metadata,
                        add_duplicates=True, return_ids=True)[1]
                added_ids |= set(ids)
                num = len(ids)
                count += num

        for fname, tdir in data:
            try:
                shutil.rmtree(tdir)
            except:
                pass

        if added_ids and gprefs['auto_add_auto_convert']:
            self.auto_convert.emit(added_ids)

        if count > 0:
            m.books_added(count)
            gui.status_bar.show_message(
                (_('Added a book automatically from {src}') if count == 1 else _('Added {num} books automatically from {src}')).format(
                    num=count, src=self.worker.path), 2000)
            gui.refresh_cover_browser()

        if needs_rescan:
            QTimer.singleShot(2000, self.dir_changed)
Beispiel #4
0
    def do_add(self, data):
        from calibre.ebooks.metadata.opf2 import OPF

        gui = self.parent()
        if gui is None:
            return
        m = gui.library_view.model()
        count = 0

        needs_rescan = False
        duplicates = []
        added_ids = set()

        for fname, tdir in data:
            paths = [os.path.join(self.worker.path, fname)]
            sz = os.path.join(tdir, 'size.txt')
            try:
                with open(sz, 'rb') as f:
                    sz = int(f.read())
                if sz != os.stat(paths[0]).st_size:
                    raise Exception('Looks like the file was written to after'
                            ' we tried to read metadata')
            except:
                needs_rescan = True
                try:
                    self.worker.staging.remove(fname)
                except KeyError:
                    pass

                continue

            mi = os.path.join(tdir, 'metadata.opf')
            if not os.access(mi, os.R_OK):
                continue
            mi = OPF(open(mi, 'rb'), tdir, populate_spine=False).to_book_metadata()
            if gprefs.get('tag_map_on_add_rules'):
                from calibre.ebooks.metadata.tag_mapper import map_tags
                mi.tags = map_tags(mi.tags, gprefs['tag_map_on_add_rules'])
            if gprefs.get('author_map_on_add_rules'):
                from calibre.ebooks.metadata.author_mapper import map_authors, compile_rules
                new_authors = map_authors(mi.authors, compile_rules(gprefs['author_map_on_add_rules']))
                if new_authors != mi.authors:
                    mi.authors = new_authors
                    mi.author_sort = gui.current_db.new_api.author_sort_from_authors(mi.authors)
            mi = [mi]
            dups, ids = m.add_books(paths,
                    [os.path.splitext(fname)[1][1:].upper()], mi,
                    add_duplicates=not gprefs['auto_add_check_for_duplicates'],
                    return_ids=True)
            added_ids |= set(ids)
            num = len(ids)
            if dups:
                path = dups[0][0]
                with open(os.path.join(tdir, 'dup_cache.'+dups[1][0].lower()),
                        'wb') as dest, open(path, 'rb') as src:
                    shutil.copyfileobj(src, dest)
                    dups[0][0] = dest.name
                duplicates.append(dups)

            try:
                os.remove(paths[0])
                self.worker.staging.remove(fname)
            except:
                import traceback
                traceback.print_exc()
            count += num

        if duplicates:
            paths, formats, metadata = [], [], []
            for p, f, mis in duplicates:
                paths.extend(p)
                formats.extend(f)
                metadata.extend(mis)
            dups = [(mic, mic.cover, [p]) for mic, p in zip(metadata, paths)]
            d = DuplicatesQuestion(m.db, dups, parent=gui)
            dups = tuple(d.duplicates)
            if dups:
                paths, formats, metadata = [], [], []
                for mi, cover, book_paths in dups:
                    paths.extend(book_paths)
                    formats.extend([p.rpartition('.')[-1] for p in book_paths])
                    metadata.extend([mi for i in book_paths])
                ids = m.add_books(paths, formats, metadata,
                        add_duplicates=True, return_ids=True)[1]
                added_ids |= set(ids)
                num = len(ids)
                count += num

        for fname, tdir in data:
            try:
                shutil.rmtree(tdir)
            except:
                pass

        if added_ids and gprefs['auto_add_auto_convert']:
            self.auto_convert.emit(added_ids)

        if count > 0:
            m.books_added(count)
            gui.status_bar.show_message(
                (_('Added a book automatically from {src}') if count == 1 else _('Added {num} books automatically from {src}')).format(
                    num=count, src=self.worker.path), 2000)
            gui.refresh_cover_browser()

        if needs_rescan:
            QTimer.singleShot(2000, self.dir_changed)