Ejemplo n.º 1
0
 def setEditorData(self, editor, index):
     if check_key_modifier(Qt.ControlModifier):
         val = UNDEFINED_QDATETIME
     elif check_key_modifier(Qt.ShiftModifier + Qt.ControlModifier):
         val = now()
     else:
         val = index.data(Qt.EditRole)
         if is_date_undefined(val):
             val = now()
     editor.setDateTime(val)
Ejemplo n.º 2
0
 def setEditorData(self, editor, index):
     if check_key_modifier(Qt.ControlModifier):
         val = UNDEFINED_QDATETIME
     elif check_key_modifier(Qt.ShiftModifier + Qt.ControlModifier):
         val = now()
     else:
         val = index.data(Qt.EditRole)
         if is_date_undefined(val):
             val = now()
     editor.setDateTime(val)
Ejemplo n.º 3
0
 def setEditorData(self, editor, index):
     if check_key_modifier(Qt.ControlModifier):
         val = UNDEFINED_QDATETIME
     elif check_key_modifier(Qt.ShiftModifier + Qt.ControlModifier):
         val = now()
     else:
         m = index.model()
         # db col is not named for the field, but for the table number. To get it,
         # gui column -> column label -> table number -> db column
         val = m.db.data[index.row()][m.custom_columns[m.column_map[index.column()]]['rec_index']]
         if val is None:
             val = now()
     editor.setDateTime(val)
Ejemplo n.º 4
0
 def setEditorData(self, editor, index):
     if check_key_modifier(Qt.ControlModifier):
         val = UNDEFINED_QDATETIME
     elif check_key_modifier(Qt.ShiftModifier + Qt.ControlModifier):
         val = now()
     else:
         m = index.model()
         # db col is not named for the field, but for the table number. To get it,
         # gui column -> column label -> table number -> db column
         val = m.db.data[index.row()][m.custom_columns[m.column_map[index.column()]]['rec_index']]
         if val is None:
             val = now()
     editor.setDateTime(val)
Ejemplo n.º 5
0
    def __init__(self, parent, db):
        QDialog.__init__(self, parent)
        self.setupUi(self)
        for val, text in [(0, '')] + [(i, date(2010, i, 1).strftime('%B')) for i in xrange(1, 13)]:
            self.date_month.addItem(text, val)
        for val, text in [('today', _('Today')), ('yesterday', _('Yesterday')), ('thismonth', _('This month'))]:
            self.date_human.addItem(text, val)
        self.date_year.setValue(now().year)
        self.date_day.setSpecialValueText(u' \xa0')
        vals = [((v['search_terms'] or [k])[0], v['name'] or k) for k, v in db.field_metadata.iteritems() if v.get('datatype', None) == 'datetime']
        for k, v in sorted(vals, key=lambda (k, v): sort_key(v)):
            self.date_field.addItem(v, k)

        self.date_year.valueChanged.connect(lambda : self.sel_date.setChecked(True))
        self.date_month.currentIndexChanged.connect(lambda : self.sel_date.setChecked(True))
        self.date_day.valueChanged.connect(lambda : self.sel_date.setChecked(True))
        self.date_daysago.valueChanged.connect(lambda : self.sel_daysago.setChecked(True))
        self.date_human.currentIndexChanged.connect(lambda : self.sel_human.setChecked(True))
        init_dateop(self.dateop_date)
        self.sel_date.setChecked(True)
        self.mc = ''
        searchables = sorted(db.field_metadata.searchable_fields(),
                             key=lambda x: sort_key(x if x[0] != '#' else x[1:]))
        self.general_combo.addItems(searchables)

        all_authors = db.all_authors()
        all_authors.sort(key=lambda x : sort_key(x[1]))
        self.authors_box.setEditText('')
        self.authors_box.set_separator('&')
        self.authors_box.set_space_before_sep(True)
        self.authors_box.set_add_separator(tweaks['authors_completer_append_separator'])
        self.authors_box.update_items_cache(db.all_author_names())

        all_series = db.all_series()
        all_series.sort(key=lambda x : sort_key(x[1]))
        self.series_box.set_separator(None)
        self.series_box.update_items_cache([x[1] for x in all_series])
        self.series_box.show_initial_value('')

        all_tags = db.all_tags()
        self.tags_box.update_items_cache(all_tags)

        self.box_last_values = copy.deepcopy(box_values)
        if self.box_last_values:
            for k,v in self.box_last_values.items():
                if k == 'general_index':
                    continue
                getattr(self, k).setText(v)
            self.general_combo.setCurrentIndex(
                    self.general_combo.findText(self.box_last_values['general_index']))

        self.clear_button.clicked.connect(self.clear_button_pushed)

        current_tab = gprefs.get('advanced search dialog current tab', 0)
        self.tabWidget.setCurrentIndex(current_tab)
        if current_tab == 1:
            self.matchkind.setCurrentIndex(last_matchkind)

        self.tabWidget.currentChanged[int].connect(self.tab_changed)
        self.tab_changed(current_tab)
Ejemplo n.º 6
0
 def _update_drive_info(self, storage, location_code, name=None):
     from calibre.utils.date import isoformat, now
     from calibre.utils.config import from_json, to_json
     import uuid
     f = storage.find_path(self.calibre_file_paths['driveinfo'].split('/'))
     dinfo = {}
     if f is not None:
         try:
             stream = self.get_mtp_file(f)
             dinfo = json.load(stream, object_hook=from_json)
         except:
             prints('Failed to load existing driveinfo.calibre file, with error:')
             traceback.print_exc()
             dinfo = {}
     if dinfo.get('device_store_uuid', None) is None:
         dinfo['device_store_uuid'] = unicode(uuid.uuid4())
     if dinfo.get('device_name', None) is None:
         dinfo['device_name'] = self.current_friendly_name
     if name is not None:
         dinfo['device_name'] = name
     dinfo['location_code'] = location_code
     dinfo['last_library_uuid'] = getattr(self, 'current_library_uuid', None)
     dinfo['calibre_version'] = '.'.join([unicode(i) for i in numeric_version])
     dinfo['date_last_connected'] = isoformat(now())
     dinfo['mtp_prefix'] = storage.storage_prefix
     raw = json.dumps(dinfo, default=to_json)
     self.put_calibre_file(storage, 'driveinfo', BytesIO(raw), len(raw))
     self.driveinfo[location_code] = dinfo
Ejemplo n.º 7
0
 def setEditorData(self, editor, index):
     m = index.model()
     # db col is not named for the field, but for the table number. To get it,
     # gui column -> column label -> table number -> db column
     val = m.db.data[index.row()][m.custom_columns[m.column_map[index.column()]]['rec_index']]
     if val is None:
         val = now()
     editor.setDateTime(val)
Ejemplo n.º 8
0
 def setEditorData(self, editor, index):
     m = index.model()
     # db col is not named for the field, but for the table number. To get it,
     # gui column -> column label -> table number -> db column
     val = m.db.data[index.row()][m.custom_columns[m.column_map[index.column()]]['rec_index']]
     if val is None:
         val = now()
     editor.setDateTime(val)
Ejemplo n.º 9
0
 def setEditorData(self, editor, index):
     if check_key_modifier(Qt.KeyboardModifier.ControlModifier):
         val = UNDEFINED_QDATETIME
     elif check_key_modifier(Qt.KeyboardModifier.ShiftModifier + Qt.KeyboardModifier.ControlModifier):
         val = now()
     else:
         val = index.data(Qt.ItemDataRole.EditRole)
     editor.setDateTime(val)
 def __init__(self, date_read, is_read_only=False, default_to_today=False):
     if date_read == UNDEFINED_DATE and default_to_today:
         date_read = now()
     if is_read_only:
         QTableWidgetItem.__init__(self, format_date(date_read, None), QtGui.QTableWidgetItem.UserType)
         self.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)
     else:
         QTableWidgetItem.__init__(self, '', QtGui.QTableWidgetItem.UserType)
         self.setData(Qt.DisplayRole, QDateTime(date_read))
Ejemplo n.º 11
0
 def __init__(self, date_read, is_read_only=False, default_to_today=False):
     if date_read == UNDEFINED_DATE and default_to_today:
         date_read = now()
     if is_read_only:
         QTableWidgetItem.__init__(self, format_date(date_read, None), QtGui.QTableWidgetItem.UserType)
         self.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)
     else:
         QTableWidgetItem.__init__(self, '', QtGui.QTableWidgetItem.UserType)
         self.setData(Qt.DisplayRole, QDateTime(date_read))
Ejemplo n.º 12
0
 def setEditorData(self, editor, index):
     val = index.data(Qt.EditRole)
     if is_date_undefined(val) or check_key_modifier(Qt.ControlModifier):
         val = QDate(2000, 1, 1)
     elif check_key_modifier(Qt.ShiftModifier):
         val = now()
     if isinstance(val, QDateTime):
         val = val.date()
     editor.setDate(val)
Ejemplo n.º 13
0
 def do_test(cache, book_id):
     for field in ('path', 'uuid', 'author_sort', 'timestamp', 'pubdate', 'title', 'authors', 'series_index', 'sort'):
         self.assertTrue(cache.field_for(field, book_id))
     for field in ('size', 'cover'):
         self.assertFalse(cache.field_for(field, book_id))
     self.assertEqual(book_id, cache.fields['uuid'].table.uuid_to_id_map[cache.field_for('uuid', book_id)])
     self.assertLess(now() - cache.field_for('timestamp', book_id), timedelta(seconds=30))
     self.assertEqual(('Created One', ('Creator One', 'Creator Two')), (cache.field_for('title', book_id), cache.field_for('authors', book_id)))
     self.assertEqual(cache.field_for('series_index', book_id), 1.0)
     self.assertEqual(cache.field_for('pubdate', book_id), UNDEFINED_DATE)
Ejemplo n.º 14
0
 def do_test(cache, book_id):
     for field in ('path', 'uuid', 'author_sort', 'timestamp', 'pubdate', 'title', 'authors', 'series_index', 'sort'):
         self.assertTrue(cache.field_for(field, book_id))
     for field in ('size', 'cover'):
         self.assertFalse(cache.field_for(field, book_id))
     self.assertEqual(book_id, cache.fields['uuid'].table.uuid_to_id_map[cache.field_for('uuid', book_id)])
     self.assertLess(now() - cache.field_for('timestamp', book_id), timedelta(seconds=30))
     self.assertEqual(('Created One', ('Creator One', 'Creator Two')), (cache.field_for('title', book_id), cache.field_for('authors', book_id)))
     self.assertEqual(cache.field_for('series_index', book_id), 1.0)
     self.assertEqual(cache.field_for('pubdate', book_id), UNDEFINED_DATE)
Ejemplo n.º 15
0
 def setEditorData(self, editor, index):
     val = index.data(Qt.EditRole)
     if check_key_modifier(Qt.ControlModifier):
         val = UNDEFINED_QDATETIME
     elif check_key_modifier(Qt.ShiftModifier + Qt.ControlModifier):
         val = now()
     elif is_date_undefined(val):
         val = QDate(2000, 1, 1)
     if isinstance(val, QDateTime):
         val = val.date()
     editor.setDate(val)
Ejemplo n.º 16
0
 def setEditorData(self, editor, index):
     val = index.data(Qt.ItemDataRole.EditRole)
     if check_key_modifier(Qt.KeyboardModifier.ControlModifier):
         val = UNDEFINED_QDATETIME
     elif check_key_modifier(Qt.KeyboardModifier.ShiftModifier + Qt.KeyboardModifier.ControlModifier):
         val = now()
     elif is_date_undefined(val):
         val = QDate.currentDate()
     if isinstance(val, QDateTime):
         val = val.date()
     editor.setDate(val)
Ejemplo n.º 17
0
    def __init__(self, date_read, is_read_only=False, default_to_today=False, fmt=None):
#        debug_print("DateTableWidgetItem:__init__ - date_read=", date_read)
        if date_read is None or date_read == UNDEFINED_DATE and default_to_today:
            date_read = now()
        if is_read_only:
            super(DateTableWidgetItem, self).__init__(format_date(date_read, fmt))
            self.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)
            self.setData(Qt.DisplayRole, QDateTime(date_read))
        else:
            super(DateTableWidgetItem, self).__init__('')
            self.setData(Qt.DisplayRole, QDateTime(date_read))
Ejemplo n.º 18
0
    def __init__(self, date_read, is_read_only=False, default_to_today=False, fmt=None):
#        debug_print("DateTableWidgetItem:__init__ - date_read=", date_read)
        if date_read is None or date_read == UNDEFINED_DATE and default_to_today:
            date_read = now()
        if is_read_only:
            QTableWidgetItem.__init__(self, format_date(date_read, fmt), QTableWidgetItem.UserType)
            self.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)
            self.setData(Qt.DisplayRole, QDateTime(date_read))
        else:
            QTableWidgetItem.__init__(self, '', QTableWidgetItem.UserType)
            self.setData(Qt.DisplayRole, QDateTime(date_read))
Ejemplo n.º 19
0
 def setEditorData(self, editor, index):
     val = index.data(Qt.EditRole)
     if check_key_modifier(Qt.ControlModifier):
         val = UNDEFINED_QDATETIME
     elif check_key_modifier(Qt.ShiftModifier + Qt.ControlModifier):
         val = now()
     elif is_date_undefined(val):
         val = QDate.currentDate()
     if isinstance(val, QDateTime):
         val = val.date()
     editor.setDate(val)
Ejemplo n.º 20
0
 def default_mi(self):
     from calibre.ebooks.metadata.book.base import Metadata
     mi = Metadata(_('A sample book'), [_('Author One'), _('Author Two')])
     mi.series = _('A series of samples')
     mi.series_index = 4
     mi.tags = [_('Tag One'), _('Tag Two')]
     mi.publisher = _('Some publisher')
     mi.rating = 4
     mi.identifiers = {'isbn':'123456789', 'url': 'https://calibre-ebook.com'}
     mi.languages = ['eng', 'fra']
     mi.pubdate = mi.timestamp = now()
     return mi
Ejemplo n.º 21
0
 def default_mi(self):
     from calibre.ebooks.metadata.book.base import Metadata
     mi = Metadata(_('A sample book'), [_('Author One'), _('Author Two')])
     mi.series = _('A series of samples')
     mi.series_index = 4
     mi.tags = [_('Tag One'), _('Tag Two')]
     mi.publisher = _('Some publisher')
     mi.rating = 4
     mi.identifiers = {'isbn':'123456789', 'url': 'http://calibre-ebook.com'}
     mi.languages = ['eng', 'fra']
     mi.pubdate = mi.timestamp = now()
     return mi
Ejemplo n.º 22
0
    def _doit(self, newdb):
        for i, x in enumerate(self.ids):
            mi = self.db.get_metadata(x,
                                      index_is_id=True,
                                      get_cover=True,
                                      cover_as_data=True)
            if not gprefs['preserve_date_on_ctl']:
                mi.timestamp = now()
            self.progress(i, mi.title)
            fmts = self.db.formats(x, index_is_id=True)
            if not fmts:
                fmts = []
            else:
                fmts = fmts.split(',')
            identical_book_list = set()
            paths = []
            for fmt in fmts:
                p = self.db.format(x, fmt, index_is_id=True, as_path=True)
                if p:
                    paths.append(p)
            try:
                if not self.add_duplicates:
                    if prefs['add_formats_to_existing'] or prefs[
                            'check_for_dupes_on_ctl']:
                        # Scanning for dupes can be slow on a large library so
                        # only do it if the option is set
                        identical_book_list = newdb.find_identical_books(mi)
                    if identical_book_list:  # books with same author and nearly same title exist in newdb
                        if prefs['add_formats_to_existing']:
                            self.automerge_book(x, mi, identical_book_list,
                                                paths, newdb)
                        else:  # Report duplicates for later processing
                            self.duplicate_ids[x] = (mi.title, mi.authors)
                        continue

                newdb.import_book(
                    mi,
                    paths,
                    notify=False,
                    import_hooks=False,
                    apply_import_tags=tweaks[
                        'add_new_book_tags_when_importing_books'],
                    preserve_uuid=self.delete_after)
                co = self.db.conversion_options(x, 'PIPE')
                if co is not None:
                    newdb.set_conversion_options(x, 'PIPE', co)
                self.processed.add(x)
            finally:
                for path in paths:
                    try:
                        os.remove(path)
                    except:
                        pass
Ejemplo n.º 23
0
 def _update_driveinfo_record(self, dinfo, prefix, location_code, name=None):
     import uuid
     if not isinstance(dinfo, dict):
         dinfo = {}
     if dinfo.get('device_store_uuid', None) is None:
         dinfo['device_store_uuid'] = unicode(uuid.uuid4())
     if dinfo.get('device_name') is None:
         dinfo['device_name'] = self.get_gui_name()
     if name is not None:
         dinfo['device_name'] = name
     dinfo['location_code'] = location_code
     dinfo['last_library_uuid'] = getattr(self, 'current_library_uuid', None)
     dinfo['calibre_version'] = '.'.join([unicode(i) for i in numeric_version])
     dinfo['date_last_connected'] = isoformat(now())
     dinfo['prefix'] = prefix.replace('\\', '/')
     return dinfo
Ejemplo n.º 24
0
    def _doit(self, newdb):
        for i, x in enumerate(self.ids):
            mi = self.db.get_metadata(x, index_is_id=True, get_cover=True,
                    cover_as_data=True)
            if not gprefs['preserve_date_on_ctl']:
                mi.timestamp = now()
            self.progress(i, mi.title)
            fmts = self.db.formats(x, index_is_id=True)
            if not fmts:
                fmts = []
            else:
                fmts = fmts.split(',')
            identical_book_list = set()
            paths = []
            for fmt in fmts:
                p = self.db.format(x, fmt, index_is_id=True,
                    as_path=True)
                if p:
                    paths.append(p)
            try:
                if not self.add_duplicates:
                    if prefs['add_formats_to_existing'] or prefs['check_for_dupes_on_ctl']:
                        # Scanning for dupes can be slow on a large library so
                        # only do it if the option is set
                        identical_book_list = newdb.find_identical_books(mi)
                    if identical_book_list:  # books with same author and nearly same title exist in newdb
                        if prefs['add_formats_to_existing']:
                            self.automerge_book(x, mi, identical_book_list, paths, newdb)
                        else:  # Report duplicates for later processing
                            self.duplicate_ids[x] = (mi.title, mi.authors)
                        continue

                newdb.import_book(mi, paths, notify=False, import_hooks=False,
                    apply_import_tags=tweaks['add_new_book_tags_when_importing_books'],
                    preserve_uuid=self.delete_after)
                co = self.db.conversion_options(x, 'PIPE')
                if co is not None:
                    newdb.set_conversion_options(x, 'PIPE', co)
                self.processed.add(x)
            finally:
                for path in paths:
                    try:
                        os.remove(path)
                    except:
                        pass
Ejemplo n.º 25
0
def copy_one_book(
        book_id, src_db, dest_db, duplicate_action='add', automerge_action='overwrite',
        preserve_date=True, identical_books_data=None, preserve_uuid=False):
    db = src_db.new_api
    newdb = dest_db.new_api
    with db.safe_read_lock, newdb.write_lock:
        mi = db.get_metadata(book_id, get_cover=True, cover_as_data=True)
        if not preserve_date:
            mi.timestamp = now()
        format_map = {}
        fmts = list(db.formats(book_id, verify_formats=False))
        for fmt in fmts:
            path = db.format_abspath(book_id, fmt)
            if path:
                format_map[fmt.upper()] = path
        identical_book_list = set()
        new_authors = {k for k, v in iteritems(newdb.get_item_ids('authors', mi.authors)) if v is None}
        new_book_id = None
        return_data = {
                'book_id': book_id, 'title': mi.title, 'authors': mi.authors, 'author': mi.format_field('authors')[1],
                'action': 'add', 'new_book_id': None
        }
        if duplicate_action != 'add':
            # Scanning for dupes can be slow on a large library so
            # only do it if the option is set
            if identical_books_data is None:
                identical_books_data = identical_books_data = newdb.data_for_find_identical_books()
            identical_book_list = find_identical_books(mi, identical_books_data)
            if identical_book_list:  # books with same author and nearly same title exist in newdb
                if duplicate_action == 'add_formats_to_existing':
                    new_book_id = automerge_book(automerge_action, book_id, mi, identical_book_list, newdb, format_map)
                    return_data['action'] = 'automerge'
                    return_data['new_book_id'] = new_book_id
                    postprocess_copy(book_id, new_book_id, new_authors, db, newdb, identical_books_data, duplicate_action)
                else:
                    return_data['action'] = 'duplicate'
                return return_data

        new_book_id = newdb.add_books(
            [(mi, format_map)], add_duplicates=True, apply_import_tags=tweaks['add_new_book_tags_when_importing_books'],
            preserve_uuid=preserve_uuid, run_hooks=False)[0][0]
        postprocess_copy(book_id, new_book_id, new_authors, db, newdb, identical_books_data, duplicate_action)
        return_data['new_book_id'] = new_book_id
        return return_data
Ejemplo n.º 26
0
def copy_one_book(
        book_id, src_db, dest_db, duplicate_action='add', automerge_action='overwrite',
        preserve_date=True, identical_books_data=None, preserve_uuid=False):
    db = src_db.new_api
    newdb = dest_db.new_api
    with db.safe_read_lock, newdb.write_lock:
        mi = db.get_metadata(book_id, get_cover=True, cover_as_data=True)
        if not preserve_date:
            mi.timestamp = now()
        format_map = {}
        fmts = list(db.formats(book_id, verify_formats=False))
        for fmt in fmts:
            path = db.format_abspath(book_id, fmt)
            if path:
                format_map[fmt.upper()] = path
        identical_book_list = set()
        new_authors = {k for k, v in iteritems(newdb.get_item_ids('authors', mi.authors)) if v is None}
        new_book_id = None
        return_data = {
                'book_id': book_id, 'title': mi.title, 'authors': mi.authors, 'author': mi.format_field('authors')[1],
                'action': 'add', 'new_book_id': None
        }
        if duplicate_action != 'add':
            # Scanning for dupes can be slow on a large library so
            # only do it if the option is set
            if identical_books_data is None:
                identical_books_data = identical_books_data = newdb.data_for_find_identical_books()
            identical_book_list = find_identical_books(mi, identical_books_data)
            if identical_book_list:  # books with same author and nearly same title exist in newdb
                if duplicate_action == 'add_formats_to_existing':
                    new_book_id = automerge_book(automerge_action, book_id, mi, identical_book_list, newdb, format_map)
                    return_data['action'] = 'automerge'
                    return_data['new_book_id'] = new_book_id
                    postprocess_copy(book_id, new_book_id, new_authors, db, newdb, identical_books_data, duplicate_action)
                else:
                    return_data['action'] = 'duplicate'
                return return_data

        new_book_id = newdb.add_books(
            [(mi, format_map)], add_duplicates=True, apply_import_tags=tweaks['add_new_book_tags_when_importing_books'],
            preserve_uuid=preserve_uuid, run_hooks=False)[0][0]
        postprocess_copy(book_id, new_book_id, new_authors, db, newdb, identical_books_data, duplicate_action)
        return_data['new_book_id'] = new_book_id
        return return_data
Ejemplo n.º 27
0
 def do_test(cache, book_id):
     for field in (
         "path",
         "uuid",
         "author_sort",
         "timestamp",
         "pubdate",
         "title",
         "authors",
         "series_index",
         "sort",
     ):
         self.assertTrue(cache.field_for(field, book_id))
     for field in ("size", "cover"):
         self.assertFalse(cache.field_for(field, book_id))
     self.assertEqual(book_id, cache.fields["uuid"].table.uuid_to_id_map[cache.field_for("uuid", book_id)])
     self.assertLess(now() - cache.field_for("timestamp", book_id), timedelta(seconds=30))
     self.assertEqual(
         ("Created One", ("Creator One", "Creator Two")),
         (cache.field_for("title", book_id), cache.field_for("authors", book_id)),
     )
     self.assertEqual(cache.field_for("series_index", book_id), 1.0)
     self.assertEqual(cache.field_for("pubdate", book_id), UNDEFINED_DATE)
Ejemplo n.º 28
0
    def test_legacy_setters(self):  # {{{
        'Test methods that are directly equivalent in the old and new interface'
        from calibre.ebooks.metadata.book.base import Metadata
        from calibre.utils.date import now
        n = now()
        ndb = self.init_legacy(self.cloned_library)
        amap = ndb.new_api.get_id_map('authors')
        sorts = [(aid, 's%d' % aid) for aid in amap]
        db = self.init_old(self.cloned_library)
        run_funcs(self, db, ndb, (
            ('+format_metadata', 1, 'FMT1', itemgetter('size')),
            ('+format_metadata', 1, 'FMT2', itemgetter('size')),
            ('+format_metadata', 2, 'FMT1', itemgetter('size')),
            ('get_tags', 0),
            ('get_tags', 1),
            ('get_tags', 2),
            ('is_tag_used', 'News'),
            ('is_tag_used', 'xchkjgfh'),
            ('bulk_modify_tags', (1, ), ['t1'], ['News']),
            ('bulk_modify_tags', (2, ), ['t1'], ['Tag One', 'Tag Two']),
            ('bulk_modify_tags', (3, ), ['t1', 't2', 't3']),
            (db.clean, ),
            ('@all_tags', ),
            ('@tags', 0),
            ('@tags', 1),
            ('@tags', 2),
            ('unapply_tags', 1, ['t1']),
            ('unapply_tags', 2, ['xxxx']),
            ('unapply_tags', 3, ['t2', 't3']),
            (db.clean, ),
            ('@all_tags', ),
            ('@tags', 0),
            ('@tags', 1),
            ('@tags', 2),
            ('update_last_modified', (1, ), True, n),
            ('update_last_modified', (3, ), True, n),
            ('metadata_last_modified', 1, True),
            ('metadata_last_modified', 3, True),
            ('set_sort_field_for_author', sorts[0][0], sorts[0][1]),
            ('set_sort_field_for_author', sorts[1][0], sorts[1][1]),
            ('set_sort_field_for_author', sorts[2][0], sorts[2][1]),
            ('set_link_field_for_author', sorts[0][0], sorts[0][1]),
            ('set_link_field_for_author', sorts[1][0], sorts[1][1]),
            ('set_link_field_for_author', sorts[2][0], sorts[2][1]),
            (db.refresh, ),
            ('author_sort', 0),
            ('author_sort', 1),
            ('author_sort', 2),
        ))
        omi = [db.get_metadata(x) for x in (0, 1, 2)]
        nmi = [ndb.get_metadata(x) for x in (0, 1, 2)]
        self.assertEqual([x.author_sort_map for x in omi],
                         [x.author_sort_map for x in nmi])
        self.assertEqual([x.author_link_map for x in omi],
                         [x.author_link_map for x in nmi])
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)

        run_funcs(self, db, ndb, (
            (
                'set_authors',
                1,
                ('author one', ),
            ),
            ('set_authors', 2, ('author two', ), True, True, True),
            ('set_author_sort', 3, 'new_aus'),
            ('set_comment', 1, ''),
            ('set_comment', 2, None),
            ('set_comment', 3, '<p>a comment</p>'),
            ('set_has_cover', 1, True),
            ('set_has_cover', 2, True),
            ('set_has_cover', 3, 1),
            ('set_identifiers', 2, {
                'test': '',
                'a': 'b'
            }),
            ('set_identifiers', 3, {
                'id': '1',
                'isbn': '9783161484100'
            }),
            ('set_identifiers', 1, {}),
            ('set_languages', 1, ('en', )),
            ('set_languages', 2, ()),
            ('set_languages', 3, ('deu', 'spa', 'fra')),
            ('set_pubdate', 1, None),
            ('set_pubdate', 2, '2011-1-7'),
            ('set_series', 1, 'a series one'),
            ('set_series', 2, 'another series [7]'),
            ('set_series', 3, 'a third series'),
            ('set_publisher', 1, 'publisher two'),
            ('set_publisher', 2, None),
            ('set_publisher', 3, 'a third puB'),
            ('set_rating', 1, 2.3),
            ('set_rating', 2, 0),
            ('set_rating', 3, 8),
            ('set_timestamp', 1, None),
            ('set_timestamp', 2, '2011-1-7'),
            ('set_uuid', 1, None),
            ('set_uuid', 2, 'a test uuid'),
            ('set_title', 1, 'title two'),
            ('set_title', 2, None),
            ('set_title', 3, 'The Test Title'),
            ('set_tags', 1, ['a1', 'a2'], True),
            ('set_tags', 2, ['b1', 'tag one'], False, False, False, True),
            ('set_tags', 3, ['A1']),
            (db.refresh, ),
            ('title', 0),
            ('title', 1),
            ('title', 2),
            ('title_sort', 0),
            ('title_sort', 1),
            ('title_sort', 2),
            ('authors', 0),
            ('authors', 1),
            ('authors', 2),
            ('author_sort', 0),
            ('author_sort', 1),
            ('author_sort', 2),
            ('has_cover', 3),
            ('has_cover', 1),
            ('has_cover', 2),
            ('get_identifiers', 0),
            ('get_identifiers', 1),
            ('get_identifiers', 2),
            ('pubdate', 0),
            ('pubdate', 1),
            ('pubdate', 2),
            ('timestamp', 0),
            ('timestamp', 1),
            ('timestamp', 2),
            ('publisher', 0),
            ('publisher', 1),
            ('publisher', 2),
            ('rating', 0),
            ('+rating', 1, lambda x: x or 0),
            ('rating', 2),
            ('series', 0),
            ('series', 1),
            ('series', 2),
            ('series_index', 0),
            ('series_index', 1),
            ('series_index', 2),
            ('uuid', 0),
            ('uuid', 1),
            ('uuid', 2),
            ('isbn', 0),
            ('isbn', 1),
            ('isbn', 2),
            ('@tags', 0),
            ('@tags', 1),
            ('@tags', 2),
            ('@all_tags', ),
            ('@get_all_identifier_types', ),
            ('set_title_sort', 1, 'Title Two'),
            ('set_title_sort', 2, None),
            ('set_title_sort', 3, 'The Test Title_sort'),
            ('set_series_index', 1, 2.3),
            ('set_series_index', 2, 0),
            ('set_series_index', 3, 8),
            ('set_identifier', 1, 'moose', 'val'),
            ('set_identifier', 2, 'test', ''),
            ('set_identifier', 3, '', ''),
            (db.refresh, ),
            ('series_index', 0),
            ('series_index', 1),
            ('series_index', 2),
            ('title_sort', 0),
            ('title_sort', 1),
            ('title_sort', 2),
            ('get_identifiers', 0),
            ('get_identifiers', 1),
            ('get_identifiers', 2),
            ('@get_all_identifier_types', ),
            ('set_metadata', 1, Metadata(
                'title', ('a1', )), False, False, False, True, True),
            ('set_metadata', 3, Metadata('title', ('a1', ))),
            (db.refresh, ),
            ('title', 0),
            ('title', 1),
            ('title', 2),
            ('title_sort', 0),
            ('title_sort', 1),
            ('title_sort', 2),
            ('authors', 0),
            ('authors', 1),
            ('authors', 2),
            ('author_sort', 0),
            ('author_sort', 1),
            ('author_sort', 2),
            ('@tags', 0),
            ('@tags', 1),
            ('@tags', 2),
            ('@all_tags', ),
            ('@get_all_identifier_types', ),
        ))
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)

        run_funcs(self, db, ndb, (
            ('set', 0, 'title', 'newtitle'),
            ('set', 0, 'tags', 't1,t2,tag one', True),
            ('set', 0, 'authors', 'author one & Author Two', True),
            ('set', 0, 'rating', 3.2),
            ('set', 0, 'publisher', 'publisher one', False),
            (db.refresh, ),
            ('title', 0),
            ('rating', 0),
            ('#tags', 0),
            ('#tags', 1),
            ('#tags', 2),
            ('authors', 0),
            ('authors', 1),
            ('authors', 2),
            ('publisher', 0),
            ('publisher', 1),
            ('publisher', 2),
            ('delete_tag', 'T1'),
            ('delete_tag', 'T2'),
            ('delete_tag', 'Tag one'),
            ('delete_tag', 'News'),
            (db.clean, ),
            (db.refresh, ),
            ('@all_tags', ),
            ('#tags', 0),
            ('#tags', 1),
            ('#tags', 2),
        ))
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)
        run_funcs(self, db, ndb, (
            ('remove_all_tags', (1, 2, 3)),
            (db.clean, ),
            ('@all_tags', ),
            ('@tags', 0),
            ('@tags', 1),
            ('@tags', 2),
        ))
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)
        a = {v: k
             for k, v in ndb.new_api.get_id_map('authors').iteritems()
             }['Author One']
        t = {v: k
             for k, v in ndb.new_api.get_id_map('tags').iteritems()}['Tag One']
        s = {v: k
             for k, v in ndb.new_api.get_id_map('series').iteritems()
             }['A Series One']
        p = {v: k
             for k, v in ndb.new_api.get_id_map('publisher').iteritems()
             }['Publisher One']
        run_funcs(self, db, ndb, (
            ('rename_author', a, 'Author Two'),
            ('rename_tag', t, 'News'),
            ('rename_series', s, 'ss'),
            ('rename_publisher', p, 'publisher one'),
            (db.clean, ),
            (db.refresh, ),
            ('@all_tags', ),
            ('tags', 0),
            ('tags', 1),
            ('tags', 2),
            ('series', 0),
            ('series', 1),
            ('series', 2),
            ('publisher', 0),
            ('publisher', 1),
            ('publisher', 2),
            ('series_index', 0),
            ('series_index', 1),
            ('series_index', 2),
            ('authors', 0),
            ('authors', 1),
            ('authors', 2),
            ('author_sort', 0),
            ('author_sort', 1),
            ('author_sort', 2),
        ))
        db.close()
Ejemplo n.º 29
0
 def evaluate(self, formatter, kwargs, mi, locals):
     return format_date(now(), 'iso')
Ejemplo n.º 30
0
def metadata_to_xmp_packet(mi):
    A = ElementMaker(namespace=NS_MAP["x"], nsmap=nsmap("x"))
    R = ElementMaker(namespace=NS_MAP["rdf"], nsmap=nsmap("rdf"))
    root = A.xmpmeta(R.RDF)
    rdf = root[0]
    dc = rdf.makeelement(expand("rdf:Description"), nsmap=nsmap("dc"))
    dc.set(expand("rdf:about"), "")
    rdf.append(dc)
    for prop, tag in {"title": "dc:title", "comments": "dc:description"}.iteritems():
        val = mi.get(prop) or ""
        create_alt_property(dc, tag, val)
    for prop, (tag, ordered) in {
        "authors": ("dc:creator", True),
        "tags": ("dc:subject", False),
        "publisher": ("dc:publisher", False),
    }.iteritems():
        val = mi.get(prop) or ()
        if isinstance(val, basestring):
            val = [val]
        create_sequence_property(dc, tag, val, ordered)
    if not mi.is_null("pubdate"):
        create_sequence_property(
            dc, "dc:date", [isoformat(mi.pubdate, as_utc=False)]
        )  # Adobe spec recommends local time
    if not mi.is_null("languages"):
        langs = filter(None, map(lambda x: lang_as_iso639_1(x) or canonicalize_lang(x), mi.languages))
        if langs:
            create_sequence_property(dc, "dc:language", langs, ordered=False)

    xmp = rdf.makeelement(expand("rdf:Description"), nsmap=nsmap("xmp", "xmpidq"))
    xmp.set(expand("rdf:about"), "")
    rdf.append(xmp)
    extra_ids = {}
    for x in ("prism", "pdfx"):
        p = extra_ids[x] = rdf.makeelement(expand("rdf:Description"), nsmap=nsmap(x))
        p.set(expand("rdf:about"), "")
        rdf.append(p)

    identifiers = mi.get_identifiers()
    if identifiers:
        create_identifiers(xmp, identifiers)
        for scheme, val in identifiers.iteritems():
            if scheme in {"isbn", "doi"}:
                for prefix, parent in extra_ids.iteritems():
                    ie = parent.makeelement(expand("%s:%s" % (prefix, scheme)))
                    ie.text = val
                    parent.append(ie)

    d = xmp.makeelement(expand("xmp:MetadataDate"))
    d.text = isoformat(now(), as_utc=False)
    xmp.append(d)

    calibre = rdf.makeelement(expand("rdf:Description"), nsmap=nsmap("calibre", "calibreSI", "calibreCC"))
    calibre.set(expand("rdf:about"), "")
    rdf.append(calibre)
    if not mi.is_null("rating"):
        try:
            r = float(mi.rating)
        except (TypeError, ValueError):
            pass
        else:
            create_simple_property(calibre, "calibre:rating", "%g" % r)
    if not mi.is_null("series"):
        create_series(calibre, mi.series, mi.series_index)
    if not mi.is_null("timestamp"):
        create_simple_property(calibre, "calibre:timestamp", isoformat(mi.timestamp, as_utc=False))
    for x in ("author_link_map", "user_categories"):
        val = getattr(mi, x, None)
        if val:
            create_simple_property(calibre, "calibre:" + x, dump_dict(val))

    for x in ("title_sort", "author_sort"):
        if not mi.is_null(x):
            create_simple_property(calibre, "calibre:" + x, getattr(mi, x))

    all_user_metadata = mi.get_all_user_metadata(True)
    if all_user_metadata:
        create_user_metadata(calibre, all_user_metadata)
    return serialize_xmp_packet(root)
Ejemplo n.º 31
0
    def do_one(self, num, book_id, newdb):
        mi = self.db.get_metadata(book_id,
                                  index_is_id=True,
                                  get_cover=True,
                                  cover_as_data=True)
        if not gprefs['preserve_date_on_ctl']:
            mi.timestamp = now()
        self.progress(num, mi.title)
        fmts = self.db.formats(book_id, index_is_id=True)
        if not fmts:
            fmts = []
        else:
            fmts = fmts.split(',')
        identical_book_list = set()
        paths = []
        for fmt in fmts:
            p = self.db.format(book_id, fmt, index_is_id=True, as_path=True)
            if p:
                paths.append(p)
        try:
            if self.check_for_duplicates:
                # Scanning for dupes can be slow on a large library so
                # only do it if the option is set
                identical_book_list = find_identical_books(
                    mi, self.find_identical_books_data)
                if identical_book_list:  # books with same author and nearly same title exist in newdb
                    if prefs['add_formats_to_existing']:
                        self.automerge_book(book_id, mi, identical_book_list,
                                            paths, newdb)
                    else:  # Report duplicates for later processing
                        self.duplicate_ids[book_id] = (mi.title, mi.authors)
                    return

            new_authors = {
                k
                for k, v in newdb.new_api.get_item_ids(
                    'authors', mi.authors).iteritems() if v is None
            }
            new_book_id = newdb.import_book(
                mi,
                paths,
                notify=False,
                import_hooks=False,
                apply_import_tags=tweaks[
                    'add_new_book_tags_when_importing_books'],
                preserve_uuid=self.delete_after)
            if new_authors:
                author_id_map = self.db.new_api.get_item_ids(
                    'authors', new_authors)
                sort_map, link_map = {}, {}
                for author, aid in author_id_map.iteritems():
                    if aid is not None:
                        adata = self.db.new_api.author_data((aid, )).get(aid)
                        if adata is not None:
                            aid = newdb.new_api.get_item_id('authors', author)
                            if aid is not None:
                                asv = adata.get('sort')
                                if asv:
                                    sort_map[aid] = asv
                                alv = adata.get('link')
                                if alv:
                                    link_map[aid] = alv
                if sort_map:
                    newdb.new_api.set_sort_for_authors(sort_map,
                                                       update_books=False)
                if link_map:
                    newdb.new_api.set_link_for_authors(link_map)

            co = self.db.conversion_options(book_id, 'PIPE')
            if co is not None:
                newdb.set_conversion_options(new_book_id, 'PIPE', co)
            if self.check_for_duplicates:
                newdb.new_api.update_data_for_find_identical_books(
                    new_book_id, self.find_identical_books_data)
            self.processed.add(book_id)
        finally:
            for path in paths:
                try:
                    os.remove(path)
                except:
                    pass
Ejemplo n.º 32
0
    def test_legacy_setters(self):  # {{{
        'Test methods that are directly equivalent in the old and new interface'
        from calibre.ebooks.metadata.book.base import Metadata
        from calibre.utils.date import now
        n = now()
        ndb = self.init_legacy(self.cloned_library)
        amap = ndb.new_api.get_id_map('authors')
        sorts = [(aid, 's%d' % aid) for aid in amap]
        db = self.init_old(self.cloned_library)
        run_funcs(self, db, ndb, (
            ('+format_metadata', 1, 'FMT1', itemgetter('size')),
            ('+format_metadata', 1, 'FMT2', itemgetter('size')),
            ('+format_metadata', 2, 'FMT1', itemgetter('size')),
            ('get_tags', 0), ('get_tags', 1), ('get_tags', 2),
            ('is_tag_used', 'News'), ('is_tag_used', 'xchkjgfh'),
            ('bulk_modify_tags', (1,), ['t1'], ['News']),
            ('bulk_modify_tags', (2,), ['t1'], ['Tag One', 'Tag Two']),
            ('bulk_modify_tags', (3,), ['t1', 't2', 't3']),
            (db.clean,),
            ('@all_tags',),
            ('@tags', 0), ('@tags', 1), ('@tags', 2),

            ('unapply_tags', 1, ['t1']),
            ('unapply_tags', 2, ['xxxx']),
            ('unapply_tags', 3, ['t2', 't3']),
            (db.clean,),
            ('@all_tags',),
            ('@tags', 0), ('@tags', 1), ('@tags', 2),

            ('update_last_modified', (1,), True, n), ('update_last_modified', (3,), True, n),
            ('metadata_last_modified', 1, True), ('metadata_last_modified', 3, True),
            ('set_sort_field_for_author', sorts[0][0], sorts[0][1]),
            ('set_sort_field_for_author', sorts[1][0], sorts[1][1]),
            ('set_sort_field_for_author', sorts[2][0], sorts[2][1]),
            ('set_link_field_for_author', sorts[0][0], sorts[0][1]),
            ('set_link_field_for_author', sorts[1][0], sorts[1][1]),
            ('set_link_field_for_author', sorts[2][0], sorts[2][1]),
            (db.refresh,),
            ('author_sort', 0), ('author_sort', 1), ('author_sort', 2),
        ))
        omi = [db.get_metadata(x) for x in (0, 1, 2)]
        nmi = [ndb.get_metadata(x) for x in (0, 1, 2)]
        self.assertEqual([x.author_sort_map for x in omi], [x.author_sort_map for x in nmi])
        self.assertEqual([x.author_link_map for x in omi], [x.author_link_map for x in nmi])
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)

        run_funcs(self, db, ndb, (
            ('set_authors', 1, ('author one',),), ('set_authors', 2, ('author two',), True, True, True),
            ('set_author_sort', 3, 'new_aus'),
            ('set_comment', 1, ''), ('set_comment', 2, None), ('set_comment', 3, '<p>a comment</p>'),
            ('set_has_cover', 1, True), ('set_has_cover', 2, True), ('set_has_cover', 3, 1),
            ('set_identifiers', 2, {'test':'', 'a':'b'}), ('set_identifiers', 3, {'id':'1', 'isbn':'9783161484100'}), ('set_identifiers', 1, {}),
            ('set_languages', 1, ('en',)),
            ('set_languages', 2, ()),
            ('set_languages', 3, ('deu', 'spa', 'fra')),
            ('set_pubdate', 1, None), ('set_pubdate', 2, '2011-1-7'),
            ('set_series', 1, 'a series one'), ('set_series', 2, 'another series [7]'), ('set_series', 3, 'a third series'),
            ('set_publisher', 1, 'publisher two'), ('set_publisher', 2, None), ('set_publisher', 3, 'a third puB'),
            ('set_rating', 1, 2.3), ('set_rating', 2, 0), ('set_rating', 3, 8),
            ('set_timestamp', 1, None), ('set_timestamp', 2, '2011-1-7'),
            ('set_uuid', 1, None), ('set_uuid', 2, 'a test uuid'),
            ('set_title', 1, 'title two'), ('set_title', 2, None), ('set_title', 3, 'The Test Title'),
            ('set_tags', 1, ['a1', 'a2'], True), ('set_tags', 2, ['b1', 'tag one'], False, False, False, True), ('set_tags', 3, ['A1']),
            (db.refresh,),
            ('title', 0), ('title', 1), ('title', 2),
            ('title_sort', 0), ('title_sort', 1), ('title_sort', 2),
            ('authors', 0), ('authors', 1), ('authors', 2),
            ('author_sort', 0), ('author_sort', 1), ('author_sort', 2),
            ('has_cover', 3), ('has_cover', 1), ('has_cover', 2),
            ('get_identifiers', 0), ('get_identifiers', 1), ('get_identifiers', 2),
            ('pubdate', 0), ('pubdate', 1), ('pubdate', 2),
            ('timestamp', 0), ('timestamp', 1), ('timestamp', 2),
            ('publisher', 0), ('publisher', 1), ('publisher', 2),
            ('rating', 0), ('+rating', 1, lambda x: x or 0), ('rating', 2),
            ('series', 0), ('series', 1), ('series', 2),
            ('series_index', 0), ('series_index', 1), ('series_index', 2),
            ('uuid', 0), ('uuid', 1), ('uuid', 2),
            ('isbn', 0), ('isbn', 1), ('isbn', 2),
            ('@tags', 0), ('@tags', 1), ('@tags', 2),
            ('@all_tags',),
            ('@get_all_identifier_types',),

            ('set_title_sort', 1, 'Title Two'), ('set_title_sort', 2, None), ('set_title_sort', 3, 'The Test Title_sort'),
            ('set_series_index', 1, 2.3), ('set_series_index', 2, 0), ('set_series_index', 3, 8),
            ('set_identifier', 1, 'moose', 'val'), ('set_identifier', 2, 'test', ''), ('set_identifier', 3, '', ''),
            (db.refresh,),
            ('series_index', 0), ('series_index', 1), ('series_index', 2),
            ('title_sort', 0), ('title_sort', 1), ('title_sort', 2),
            ('get_identifiers', 0), ('get_identifiers', 1), ('get_identifiers', 2),
            ('@get_all_identifier_types',),

            ('set_metadata', 1, Metadata('title', ('a1',)), False, False, False, True, True),
            ('set_metadata', 3, Metadata('title', ('a1',))),
            (db.refresh,),
            ('title', 0), ('title', 1), ('title', 2),
            ('title_sort', 0), ('title_sort', 1), ('title_sort', 2),
            ('authors', 0), ('authors', 1), ('authors', 2),
            ('author_sort', 0), ('author_sort', 1), ('author_sort', 2),
            ('@tags', 0), ('@tags', 1), ('@tags', 2),
            ('@all_tags',),
            ('@get_all_identifier_types',),
        ))
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)

        run_funcs(self, db, ndb, (
            ('set', 0, 'title', 'newtitle'),
            ('set', 0, 'tags', 't1,t2,tag one', True),
            ('set', 0, 'authors', 'author one & Author Two', True),
            ('set', 0, 'rating', 3.2),
            ('set', 0, 'publisher', 'publisher one', False),
            (db.refresh,),
            ('title', 0),
            ('rating', 0),
            ('#tags', 0), ('#tags', 1), ('#tags', 2),
            ('authors', 0), ('authors', 1), ('authors', 2),
            ('publisher', 0), ('publisher', 1), ('publisher', 2),
            ('delete_tag', 'T1'), ('delete_tag', 'T2'), ('delete_tag', 'Tag one'), ('delete_tag', 'News'),
            (db.clean,), (db.refresh,),
            ('@all_tags',),
            ('#tags', 0), ('#tags', 1), ('#tags', 2),
        ))
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)
        run_funcs(self, db, ndb, (
            ('remove_all_tags', (1, 2, 3)),
            (db.clean,),
            ('@all_tags',),
            ('@tags', 0), ('@tags', 1), ('@tags', 2),
        ))
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)
        a = {v:k for k, v in ndb.new_api.get_id_map('authors').iteritems()}['Author One']
        t = {v:k for k, v in ndb.new_api.get_id_map('tags').iteritems()}['Tag One']
        s = {v:k for k, v in ndb.new_api.get_id_map('series').iteritems()}['A Series One']
        p = {v:k for k, v in ndb.new_api.get_id_map('publisher').iteritems()}['Publisher One']
        run_funcs(self, db, ndb, (
            ('rename_author', a, 'Author Two'),
            ('rename_tag', t, 'News'),
            ('rename_series', s, 'ss'),
            ('rename_publisher', p, 'publisher one'),
            (db.clean,),
            (db.refresh,),
            ('@all_tags',),
            ('tags', 0), ('tags', 1), ('tags', 2),
            ('series', 0), ('series', 1), ('series', 2),
            ('publisher', 0), ('publisher', 1), ('publisher', 2),
            ('series_index', 0), ('series_index', 1), ('series_index', 2),
            ('authors', 0), ('authors', 1), ('authors', 2),
            ('author_sort', 0), ('author_sort', 1), ('author_sort', 2),
        ))
        db.close()
Ejemplo n.º 33
0
    def test_legacy_setters(self):  # {{{
        "Test methods that are directly equivalent in the old and new interface"
        from calibre.ebooks.metadata.book.base import Metadata
        from calibre.utils.date import now

        n = now()
        ndb = self.init_legacy(self.cloned_library)
        amap = ndb.new_api.get_id_map("authors")
        sorts = [(aid, "s%d" % aid) for aid in amap]
        db = self.init_old(self.cloned_library)
        run_funcs(
            self,
            db,
            ndb,
            (
                ("+format_metadata", 1, "FMT1", itemgetter("size")),
                ("+format_metadata", 1, "FMT2", itemgetter("size")),
                ("+format_metadata", 2, "FMT1", itemgetter("size")),
                ("get_tags", 0),
                ("get_tags", 1),
                ("get_tags", 2),
                ("is_tag_used", "News"),
                ("is_tag_used", "xchkjgfh"),
                ("bulk_modify_tags", (1,), ["t1"], ["News"]),
                ("bulk_modify_tags", (2,), ["t1"], ["Tag One", "Tag Two"]),
                ("bulk_modify_tags", (3,), ["t1", "t2", "t3"]),
                (db.clean,),
                ("@all_tags",),
                ("@tags", 0),
                ("@tags", 1),
                ("@tags", 2),
                ("unapply_tags", 1, ["t1"]),
                ("unapply_tags", 2, ["xxxx"]),
                ("unapply_tags", 3, ["t2", "t3"]),
                (db.clean,),
                ("@all_tags",),
                ("@tags", 0),
                ("@tags", 1),
                ("@tags", 2),
                ("update_last_modified", (1,), True, n),
                ("update_last_modified", (3,), True, n),
                ("metadata_last_modified", 1, True),
                ("metadata_last_modified", 3, True),
                ("set_sort_field_for_author", sorts[0][0], sorts[0][1]),
                ("set_sort_field_for_author", sorts[1][0], sorts[1][1]),
                ("set_sort_field_for_author", sorts[2][0], sorts[2][1]),
                ("set_link_field_for_author", sorts[0][0], sorts[0][1]),
                ("set_link_field_for_author", sorts[1][0], sorts[1][1]),
                ("set_link_field_for_author", sorts[2][0], sorts[2][1]),
                (db.refresh,),
                ("author_sort", 0),
                ("author_sort", 1),
                ("author_sort", 2),
            ),
        )
        omi = [db.get_metadata(x) for x in (0, 1, 2)]
        nmi = [ndb.get_metadata(x) for x in (0, 1, 2)]
        self.assertEqual([x.author_sort_map for x in omi], [x.author_sort_map for x in nmi])
        self.assertEqual([x.author_link_map for x in omi], [x.author_link_map for x in nmi])
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)

        run_funcs(
            self,
            db,
            ndb,
            (
                ("set_authors", 1, ("author one",)),
                ("set_authors", 2, ("author two",), True, True, True),
                ("set_author_sort", 3, "new_aus"),
                ("set_comment", 1, ""),
                ("set_comment", 2, None),
                ("set_comment", 3, "<p>a comment</p>"),
                ("set_has_cover", 1, True),
                ("set_has_cover", 2, True),
                ("set_has_cover", 3, 1),
                ("set_identifiers", 2, {"test": "", "a": "b"}),
                ("set_identifiers", 3, {"id": "1", "isbn": "9783161484100"}),
                ("set_identifiers", 1, {}),
                ("set_languages", 1, ("en",)),
                ("set_languages", 2, ()),
                ("set_languages", 3, ("deu", "spa", "fra")),
                ("set_pubdate", 1, None),
                ("set_pubdate", 2, "2011-1-7"),
                ("set_series", 1, "a series one"),
                ("set_series", 2, "another series [7]"),
                ("set_series", 3, "a third series"),
                ("set_publisher", 1, "publisher two"),
                ("set_publisher", 2, None),
                ("set_publisher", 3, "a third puB"),
                ("set_rating", 1, 2.3),
                ("set_rating", 2, 0),
                ("set_rating", 3, 8),
                ("set_timestamp", 1, None),
                ("set_timestamp", 2, "2011-1-7"),
                ("set_uuid", 1, None),
                ("set_uuid", 2, "a test uuid"),
                ("set_title", 1, "title two"),
                ("set_title", 2, None),
                ("set_title", 3, "The Test Title"),
                ("set_tags", 1, ["a1", "a2"], True),
                ("set_tags", 2, ["b1", "tag one"], False, False, False, True),
                ("set_tags", 3, ["A1"]),
                (db.refresh,),
                ("title", 0),
                ("title", 1),
                ("title", 2),
                ("title_sort", 0),
                ("title_sort", 1),
                ("title_sort", 2),
                ("authors", 0),
                ("authors", 1),
                ("authors", 2),
                ("author_sort", 0),
                ("author_sort", 1),
                ("author_sort", 2),
                ("has_cover", 3),
                ("has_cover", 1),
                ("has_cover", 2),
                ("get_identifiers", 0),
                ("get_identifiers", 1),
                ("get_identifiers", 2),
                ("pubdate", 0),
                ("pubdate", 1),
                ("pubdate", 2),
                ("timestamp", 0),
                ("timestamp", 1),
                ("timestamp", 2),
                ("publisher", 0),
                ("publisher", 1),
                ("publisher", 2),
                ("rating", 0),
                ("+rating", 1, lambda x: x or 0),
                ("rating", 2),
                ("series", 0),
                ("series", 1),
                ("series", 2),
                ("series_index", 0),
                ("series_index", 1),
                ("series_index", 2),
                ("uuid", 0),
                ("uuid", 1),
                ("uuid", 2),
                ("isbn", 0),
                ("isbn", 1),
                ("isbn", 2),
                ("@tags", 0),
                ("@tags", 1),
                ("@tags", 2),
                ("@all_tags",),
                ("@get_all_identifier_types",),
                ("set_title_sort", 1, "Title Two"),
                ("set_title_sort", 2, None),
                ("set_title_sort", 3, "The Test Title_sort"),
                ("set_series_index", 1, 2.3),
                ("set_series_index", 2, 0),
                ("set_series_index", 3, 8),
                ("set_identifier", 1, "moose", "val"),
                ("set_identifier", 2, "test", ""),
                ("set_identifier", 3, "", ""),
                (db.refresh,),
                ("series_index", 0),
                ("series_index", 1),
                ("series_index", 2),
                ("title_sort", 0),
                ("title_sort", 1),
                ("title_sort", 2),
                ("get_identifiers", 0),
                ("get_identifiers", 1),
                ("get_identifiers", 2),
                ("@get_all_identifier_types",),
                ("set_metadata", 1, Metadata("title", ("a1",)), False, False, False, True, True),
                ("set_metadata", 3, Metadata("title", ("a1",))),
                (db.refresh,),
                ("title", 0),
                ("title", 1),
                ("title", 2),
                ("title_sort", 0),
                ("title_sort", 1),
                ("title_sort", 2),
                ("authors", 0),
                ("authors", 1),
                ("authors", 2),
                ("author_sort", 0),
                ("author_sort", 1),
                ("author_sort", 2),
                ("@tags", 0),
                ("@tags", 1),
                ("@tags", 2),
                ("@all_tags",),
                ("@get_all_identifier_types",),
            ),
        )
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)

        run_funcs(
            self,
            db,
            ndb,
            (
                ("set", 0, "title", "newtitle"),
                ("set", 0, "tags", "t1,t2,tag one", True),
                ("set", 0, "authors", "author one & Author Two", True),
                ("set", 0, "rating", 3.2),
                ("set", 0, "publisher", "publisher one", False),
                (db.refresh,),
                ("title", 0),
                ("rating", 0),
                ("#tags", 0),
                ("#tags", 1),
                ("#tags", 2),
                ("authors", 0),
                ("authors", 1),
                ("authors", 2),
                ("publisher", 0),
                ("publisher", 1),
                ("publisher", 2),
                ("delete_tag", "T1"),
                ("delete_tag", "T2"),
                ("delete_tag", "Tag one"),
                ("delete_tag", "News"),
                (db.clean,),
                (db.refresh,),
                ("@all_tags",),
                ("#tags", 0),
                ("#tags", 1),
                ("#tags", 2),
            ),
        )
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)
        run_funcs(
            self,
            db,
            ndb,
            (("remove_all_tags", (1, 2, 3)), (db.clean,), ("@all_tags",), ("@tags", 0), ("@tags", 1), ("@tags", 2)),
        )
        db.close()

        ndb = self.init_legacy(self.cloned_library)
        db = self.init_old(self.cloned_library)
        a = {v: k for k, v in ndb.new_api.get_id_map("authors").iteritems()}["Author One"]
        t = {v: k for k, v in ndb.new_api.get_id_map("tags").iteritems()}["Tag One"]
        s = {v: k for k, v in ndb.new_api.get_id_map("series").iteritems()}["A Series One"]
        p = {v: k for k, v in ndb.new_api.get_id_map("publisher").iteritems()}["Publisher One"]
        run_funcs(
            self,
            db,
            ndb,
            (
                ("rename_author", a, "Author Two"),
                ("rename_tag", t, "News"),
                ("rename_series", s, "ss"),
                ("rename_publisher", p, "publisher one"),
                (db.clean,),
                (db.refresh,),
                ("@all_tags",),
                ("tags", 0),
                ("tags", 1),
                ("tags", 2),
                ("series", 0),
                ("series", 1),
                ("series", 2),
                ("publisher", 0),
                ("publisher", 1),
                ("publisher", 2),
                ("series_index", 0),
                ("series_index", 1),
                ("series_index", 2),
                ("authors", 0),
                ("authors", 1),
                ("authors", 2),
                ("author_sort", 0),
                ("author_sort", 1),
                ("author_sort", 2),
            ),
        )
        db.close()
Ejemplo n.º 34
0
    def do_one(self, num, book_id, newdb):
        mi = self.db.get_metadata(book_id, index_is_id=True, get_cover=True, cover_as_data=True)
        if not gprefs['preserve_date_on_ctl']:
            mi.timestamp = now()
        self.progress(num, mi.title)
        fmts = self.db.formats(book_id, index_is_id=True)
        if not fmts:
            fmts = []
        else:
            fmts = fmts.split(',')
        identical_book_list = set()
        paths = []
        for fmt in fmts:
            p = self.db.format(book_id, fmt, index_is_id=True,
                as_path=True)
            if p:
                paths.append(p)
        try:
            if self.check_for_duplicates:
                # Scanning for dupes can be slow on a large library so
                # only do it if the option is set
                identical_book_list = find_identical_books(mi, self.find_identical_books_data)
                if identical_book_list:  # books with same author and nearly same title exist in newdb
                    if prefs['add_formats_to_existing']:
                        self.automerge_book(book_id, mi, identical_book_list, paths, newdb)
                    else:  # Report duplicates for later processing
                        self.duplicate_ids[book_id] = (mi.title, mi.authors)
                    return

            new_authors = {k for k, v in newdb.new_api.get_item_ids('authors', mi.authors).iteritems() if v is None}
            new_book_id = newdb.import_book(mi, paths, notify=False, import_hooks=False,
                apply_import_tags=tweaks['add_new_book_tags_when_importing_books'],
                preserve_uuid=self.delete_after)
            if new_authors:
                author_id_map = self.db.new_api.get_item_ids('authors', new_authors)
                sort_map, link_map = {}, {}
                for author, aid in author_id_map.iteritems():
                    if aid is not None:
                        adata = self.db.new_api.author_data((aid,)).get(aid)
                        if adata is not None:
                            aid = newdb.new_api.get_item_id('authors', author)
                            if aid is not None:
                                asv = adata.get('sort')
                                if asv:
                                    sort_map[aid] = asv
                                alv = adata.get('link')
                                if alv:
                                    link_map[aid] = alv
                if sort_map:
                    newdb.new_api.set_sort_for_authors(sort_map, update_books=False)
                if link_map:
                    newdb.new_api.set_link_for_authors(link_map)

            co = self.db.conversion_options(book_id, 'PIPE')
            if co is not None:
                newdb.set_conversion_options(new_book_id, 'PIPE', co)
            if self.check_for_duplicates:
                newdb.new_api.update_data_for_find_identical_books(new_book_id, self.find_identical_books_data)
            self.processed.add(book_id)
        finally:
            for path in paths:
                try:
                    os.remove(path)
                except:
                    pass
Ejemplo n.º 35
0
def metadata_to_xmp_packet(mi):
    A = ElementMaker(namespace=NS_MAP['x'], nsmap=nsmap('x'))
    R = ElementMaker(namespace=NS_MAP['rdf'], nsmap=nsmap('rdf'))
    root = A.xmpmeta(R.RDF)
    rdf = root[0]
    dc = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('dc'))
    dc.set(expand('rdf:about'), '')
    rdf.append(dc)
    for prop, tag in iteritems({
            'title': 'dc:title',
            'comments': 'dc:description'
    }):
        val = mi.get(prop) or ''
        create_alt_property(dc, tag, val)
    for prop, (tag, ordered) in iteritems({
            'authors': ('dc:creator', True),
            'tags': ('dc:subject', False),
            'publisher': ('dc:publisher', False),
    }):
        val = mi.get(prop) or ()
        if isinstance(val, string_or_bytes):
            val = [val]
        create_sequence_property(dc, tag, val, ordered)
    if not mi.is_null('pubdate'):
        create_sequence_property(dc, 'dc:date',
                                 [isoformat(mi.pubdate, as_utc=False)
                                  ])  # Adobe spec recommends local time
    if not mi.is_null('languages'):
        langs = list(
            filter(
                None,
                map(lambda x: lang_as_iso639_1(x) or canonicalize_lang(x),
                    mi.languages)))
        if langs:
            create_sequence_property(dc, 'dc:language', langs, ordered=False)

    xmp = rdf.makeelement(expand('rdf:Description'),
                          nsmap=nsmap('xmp', 'xmpidq'))
    xmp.set(expand('rdf:about'), '')
    rdf.append(xmp)
    extra_ids = {}
    for x in ('prism', 'pdfx'):
        p = extra_ids[x] = rdf.makeelement(expand('rdf:Description'),
                                           nsmap=nsmap(x))
        p.set(expand('rdf:about'), '')
        rdf.append(p)

    identifiers = mi.get_identifiers()
    if identifiers:
        create_identifiers(xmp, identifiers)
        for scheme, val in iteritems(identifiers):
            if scheme in {'isbn', 'doi'}:
                for prefix, parent in iteritems(extra_ids):
                    ie = parent.makeelement(expand('%s:%s' % (prefix, scheme)))
                    ie.text = val
                    parent.append(ie)

    d = xmp.makeelement(expand('xmp:MetadataDate'))
    d.text = isoformat(now(), as_utc=False)
    xmp.append(d)

    calibre = rdf.makeelement(expand('rdf:Description'),
                              nsmap=nsmap('calibre', 'calibreSI', 'calibreCC'))
    calibre.set(expand('rdf:about'), '')
    rdf.append(calibre)
    if not mi.is_null('rating'):
        try:
            r = float(mi.rating)
        except (TypeError, ValueError):
            pass
        else:
            create_simple_property(calibre, 'calibre:rating', '%g' % r)
    if not mi.is_null('series'):
        create_series(calibre, mi.series, mi.series_index)
    if not mi.is_null('timestamp'):
        create_simple_property(calibre, 'calibre:timestamp',
                               isoformat(mi.timestamp, as_utc=False))
    for x in ('author_link_map', 'user_categories'):
        val = getattr(mi, x, None)
        if val:
            create_simple_property(calibre, 'calibre:' + x, dump_dict(val))

    for x in ('title_sort', 'author_sort'):
        if not mi.is_null(x):
            create_simple_property(calibre, 'calibre:' + x, getattr(mi, x))

    all_user_metadata = mi.get_all_user_metadata(True)
    if all_user_metadata:
        create_user_metadata(calibre, all_user_metadata)
    return serialize_xmp_packet(root)
Ejemplo n.º 36
0
    def _doit(self, newdb):
        for i, x in enumerate(self.ids):
            mi = self.db.get_metadata(x, index_is_id=True, get_cover=True,
                    cover_as_data=True)
            if not gprefs['preserve_date_on_ctl']:
                mi.timestamp = now()
            self.progress(i, mi.title)
            fmts = self.db.formats(x, index_is_id=True)
            if not fmts:
                fmts = []
            else:
                fmts = fmts.split(',')
            paths = []
            for fmt in fmts:
                p = self.db.format(x, fmt, index_is_id=True,
                    as_path=True)
                if p:
                    paths.append(p)
            automerged = False
            if prefs['add_formats_to_existing']:
                identical_book_list = newdb.find_identical_books(mi)
                if identical_book_list:  # books with same author and nearly same title exist in newdb
                    self.auto_merged_ids[x] = _('%(title)s by %(author)s')%\
                    dict(title=mi.title, author=mi.format_field('authors')[1])
                    automerged = True
                    seen_fmts = set()
                    for identical_book in identical_book_list:
                        ib_fmts = newdb.formats(identical_book, index_is_id=True)
                        if ib_fmts:
                            seen_fmts |= set(ib_fmts.split(','))
                        replace = gprefs['automerge'] == 'overwrite'
                        self.add_formats(identical_book, paths, newdb,
                                replace=replace)

                    if gprefs['automerge'] == 'new record':
                        incoming_fmts = \
                            set([os.path.splitext(path)[-1].replace('.',
                                '').upper() for path in paths])

                        if incoming_fmts.intersection(seen_fmts):
                            # There was at least one duplicate format
                            # so create a new record and put the
                            # incoming formats into it
                            # We should arguably put only the duplicate
                            # formats, but no real harm is done by having
                            # all formats
                            newdb.import_book(mi, paths, notify=False, import_hooks=False,
                                apply_import_tags=tweaks['add_new_book_tags_when_importing_books'],
                                preserve_uuid=False)

            if not automerged:
                newdb.import_book(mi, paths, notify=False, import_hooks=False,
                    apply_import_tags=tweaks['add_new_book_tags_when_importing_books'],
                    preserve_uuid=self.delete_after)
                co = self.db.conversion_options(x, 'PIPE')
                if co is not None:
                    newdb.set_conversion_options(x, 'PIPE', co)
            self.processed.add(x)
            for path in paths:
                try:
                    os.remove(path)
                except:
                    pass
Ejemplo n.º 37
0
    def __call__(self, query, field_iter):
        matches = set()
        if len(query) < 2:
            return matches

        if query == 'false':
            for v, book_ids in field_iter():
                if isinstance(v, (str, unicode)):
                    v = parse_date(v)
                if v is None or v <= UNDEFINED_DATE:
                    matches |= book_ids
            return matches

        if query == 'true':
            for v, book_ids in field_iter():
                if isinstance(v, (str, unicode)):
                    v = parse_date(v)
                if v is not None and v > UNDEFINED_DATE:
                    matches |= book_ids
            return matches

        relop = None
        for k, op in self.operators.iteritems():
            if query.startswith(k):
                p, relop = op
                query = query[p:]
        if relop is None:
            relop = self.operators['='][-1]

        if query in self.local_today:
            qd = now()
            field_count = 3
        elif query in self.local_yesterday:
            qd = now() - timedelta(1)
            field_count = 3
        elif query in self.local_thismonth:
            qd = now()
            field_count = 2
        else:
            m = self.daysago_pat.search(query)
            if m is not None:
                num = query[:-len(m.group(1))]
                try:
                    qd = now() - timedelta(int(num))
                except:
                    raise ParseException(_('Number conversion error: {0}').format(num))
                field_count = 3
            else:
                try:
                    qd = parse_date(query, as_utc=False)
                except:
                    raise ParseException(_('Date conversion error: {0}').format(query))
                if '-' in query:
                    field_count = query.count('-') + 1
                else:
                    field_count = query.count('/') + 1

        for v, book_ids in field_iter():
            if isinstance(v, (str, unicode)):
                v = parse_date(v)
            if v is not None and relop(dt_as_local(v), qd, field_count):
                matches |= book_ids

        return matches
Ejemplo n.º 38
0
    def __call__(self, query, field_iter):
        matches = set()
        if len(query) < 2:
            return matches

        if query == 'false':
            for v, book_ids in field_iter():
                if isinstance(v, (str, unicode)):
                    v = parse_date(v)
                if v is None or v <= UNDEFINED_DATE:
                    matches |= book_ids
            return matches

        if query == 'true':
            for v, book_ids in field_iter():
                if isinstance(v, (str, unicode)):
                    v = parse_date(v)
                if v is not None and v > UNDEFINED_DATE:
                    matches |= book_ids
            return matches

        for k, relop in self.operators.iteritems():
            if query.startswith(k):
                query = query[len(k):]
                break
        else:
            relop = self.operators['=']

        if query in self.local_today:
            qd = now()
            field_count = 3
        elif query in self.local_yesterday:
            qd = now() - timedelta(1)
            field_count = 3
        elif query in self.local_thismonth:
            qd = now()
            field_count = 2
        else:
            m = self.daysago_pat.search(query)
            if m is not None:
                num = query[:-len(m.group(1))]
                try:
                    qd = now() - timedelta(int(num))
                except:
                    raise ParseException(
                        _('Number conversion error: {0}').format(num))
                field_count = 3
            else:
                try:
                    qd = parse_date(query, as_utc=False)
                except:
                    raise ParseException(
                        _('Date conversion error: {0}').format(query))
                if '-' in query:
                    field_count = query.count('-') + 1
                else:
                    field_count = query.count('/') + 1

        for v, book_ids in field_iter():
            if isinstance(v, (str, unicode)):
                v = parse_date(v)
            if v is not None and relop(dt_as_local(v), qd, field_count):
                matches |= book_ids

        return matches
Ejemplo n.º 39
0
 def set_to_clear(self):
     self.setDateTime(now())
     self.setDateTime(UNDEFINED_QDATETIME)
Ejemplo n.º 40
0
    def _get_metadata(self, book_id, get_user_categories=True): # {{{
        mi = Metadata(None, template_cache=self.formatter_template_cache)
        author_ids = self._field_ids_for('authors', book_id)
        aut_list = [self._author_data(i) for i in author_ids]
        aum = []
        aus = {}
        aul = {}
        for rec in aut_list:
            aut = rec['name']
            aum.append(aut)
            aus[aut] = rec['sort']
            aul[aut] = rec['link']
        mi.title       = self._field_for('title', book_id,
                default_value=_('Unknown'))
        mi.authors     = aum
        mi.author_sort = self._field_for('author_sort', book_id,
                default_value=_('Unknown'))
        mi.author_sort_map = aus
        mi.author_link_map = aul
        mi.comments    = self._field_for('comments', book_id)
        mi.publisher   = self._field_for('publisher', book_id)
        n = now()
        mi.timestamp   = self._field_for('timestamp', book_id, default_value=n)
        mi.pubdate     = self._field_for('pubdate', book_id, default_value=n)
        mi.uuid        = self._field_for('uuid', book_id,
                default_value='dummy')
        mi.title_sort  = self._field_for('sort', book_id,
                default_value=_('Unknown'))
        mi.book_size   = self._field_for('size', book_id, default_value=0)
        mi.ondevice_col = self._field_for('ondevice', book_id, default_value='')
        mi.last_modified = self._field_for('last_modified', book_id,
                default_value=n)
        formats = self._field_for('formats', book_id)
        mi.format_metadata = {}
        mi.languages = list(self._field_for('languages', book_id))
        if not formats:
            good_formats = None
        else:
            mi.format_metadata = FormatMetadata(self, id, formats)
            good_formats = FormatsList(formats, mi.format_metadata)
        mi.formats = good_formats
        mi.has_cover = _('Yes') if self._field_for('cover', book_id,
                default_value=False) else ''
        mi.tags = list(self._field_for('tags', book_id, default_value=()))
        mi.series = self._field_for('series', book_id)
        if mi.series:
            mi.series_index = self._field_for('series_index', book_id,
                    default_value=1.0)
        mi.rating = self._field_for('rating', book_id)
        mi.set_identifiers(self._field_for('identifiers', book_id,
            default_value={}))
        mi.application_id = book_id
        mi.id = book_id
        composites = []
        for key, meta in self.field_metadata.custom_iteritems():
            mi.set_user_metadata(key, meta)
            if meta['datatype'] == 'composite':
                composites.append(key)
            else:
                val = self._field_for(key, book_id)
                if isinstance(val, tuple):
                    val = list(val)
                extra = self._field_for(key+'_index', book_id)
                mi.set(key, val=val, extra=extra)
        for key in composites:
            mi.set(key, val=self._composite_for(key, book_id, mi))

        user_cat_vals = {}
        if get_user_categories:
            user_cats = self.backend.prefs['user_categories']
            for ucat in user_cats:
                res = []
                for name,cat,ign in user_cats[ucat]:
                    v = mi.get(cat, None)
                    if isinstance(v, list):
                        if name in v:
                            res.append([name,cat])
                    elif name == v:
                        res.append([name,cat])
                user_cat_vals[ucat] = res
        mi.user_categories = user_cat_vals

        return mi
Ejemplo n.º 41
0
def metadata_to_xmp_packet(mi):
    A = ElementMaker(namespace=NS_MAP['x'], nsmap=nsmap('x'))
    R = ElementMaker(namespace=NS_MAP['rdf'], nsmap=nsmap('rdf'))
    root = A.xmpmeta(R.RDF)
    rdf = root[0]
    dc = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('dc'))
    dc.set(expand('rdf:about'), '')
    rdf.append(dc)
    for prop, tag in {'title':'dc:title', 'comments':'dc:description'}.iteritems():
        val = mi.get(prop) or ''
        create_alt_property(dc, tag, val)
    for prop, (tag, ordered) in {
        'authors':('dc:creator', True), 'tags':('dc:subject', False), 'publisher':('dc:publisher', False),
    }.iteritems():
        val = mi.get(prop) or ()
        if isinstance(val, basestring):
            val = [val]
        create_sequence_property(dc, tag, val, ordered)
    if not mi.is_null('pubdate'):
        create_sequence_property(dc, 'dc:date', [isoformat(mi.pubdate, as_utc=False)])  # Adobe spec recommends local time
    if not mi.is_null('languages'):
        langs = filter(None, map(lambda x:lang_as_iso639_1(x) or canonicalize_lang(x), mi.languages))
        if langs:
            create_sequence_property(dc, 'dc:language', langs, ordered=False)

    xmp = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('xmp', 'xmpidq'))
    xmp.set(expand('rdf:about'), '')
    rdf.append(xmp)
    extra_ids = {}
    for x in ('prism', 'pdfx'):
        p = extra_ids[x] = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap(x))
        p.set(expand('rdf:about'), '')
        rdf.append(p)

    identifiers = mi.get_identifiers()
    if identifiers:
        create_identifiers(xmp, identifiers)
        for scheme, val in identifiers.iteritems():
            if scheme in {'isbn', 'doi'}:
                for prefix, parent in extra_ids.iteritems():
                    ie = parent.makeelement(expand('%s:%s'%(prefix, scheme)))
                    ie.text = val
                    parent.append(ie)

    d = xmp.makeelement(expand('xmp:MetadataDate'))
    d.text = isoformat(now(), as_utc=False)
    xmp.append(d)

    calibre = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('calibre', 'calibreSI', 'calibreCC'))
    calibre.set(expand('rdf:about'), '')
    rdf.append(calibre)
    if not mi.is_null('rating'):
        try:
            r = float(mi.rating)
        except (TypeError, ValueError):
            pass
        else:
            create_simple_property(calibre, 'calibre:rating', '%g' % r)
    if not mi.is_null('series'):
        create_series(calibre, mi.series, mi.series_index)
    if not mi.is_null('timestamp'):
        create_simple_property(calibre, 'calibre:timestamp', isoformat(mi.timestamp, as_utc=False))
    for x in ('author_link_map', 'user_categories'):
        val = getattr(mi, x, None)
        if val:
            create_simple_property(calibre, 'calibre:'+x, dump_dict(val))

    for x in ('title_sort', 'author_sort'):
        if not mi.is_null(x):
            create_simple_property(calibre, 'calibre:'+x, getattr(mi, x))

    all_user_metadata = mi.get_all_user_metadata(True)
    if all_user_metadata:
        create_user_metadata(calibre, all_user_metadata)
    return serialize_xmp_packet(root)
Ejemplo n.º 42
0
 def evaluate(self, formatter, kwargs, mi, locals):
     return format_date(now(), "iso")
Ejemplo n.º 43
0
 def setEditorData(self, editor, index):
     val = index.model().data(index, Qt.DisplayRole).toDateTime()
     if val is None or val == UNDEFINED_QDATETIME:
         val = now()
     editor.setDateTime(val)
Ejemplo n.º 44
0
def meta_info_to_oeb_metadata(mi, m, log, override_input_metadata=False):
    from calibre.ebooks.oeb.base import OPF
    if not mi.is_null('title'):
        m.clear('title')
        m.add('title', mi.title)
    if mi.title_sort:
        if not m.title:
            m.add('title', mi.title_sort)
        m.clear('title_sort')
        m.add('title_sort', mi.title_sort)
    if not mi.is_null('authors'):
        m.filter('creator', lambda x : x.role.lower() in ['aut', ''])
        for a in mi.authors:
            attrib = {'role':'aut'}
            if mi.author_sort:
                attrib[OPF('file-as')] = mi.author_sort
            m.add('creator', a, attrib=attrib)
    if not mi.is_null('book_producer'):
        m.filter('contributor', lambda x : x.role.lower() == 'bkp')
        m.add('contributor', mi.book_producer, role='bkp')
    elif override_input_metadata:
        m.filter('contributor', lambda x : x.role.lower() == 'bkp')
    if not mi.is_null('comments'):
        m.clear('description')
        m.add('description', mi.comments)
    elif override_input_metadata:
        m.clear('description')
    if not mi.is_null('publisher'):
        m.clear('publisher')
        m.add('publisher', mi.publisher)
    elif override_input_metadata:
        m.clear('publisher')
    if not mi.is_null('series'):
        m.clear('series')
        m.add('series', mi.series)
    elif override_input_metadata:
        m.clear('series')
    identifiers = mi.get_identifiers()
    set_isbn = False
    for typ, val in identifiers.iteritems():
        has = False
        if typ.lower() == 'isbn':
            set_isbn = True
        for x in m.identifier:
            if x.scheme.lower() == typ.lower():
                x.content = val
                has = True
        if not has:
            m.add('identifier', val, scheme=typ.upper())
    if override_input_metadata and not set_isbn:
        m.filter('identifier', lambda x: x.scheme.lower() == 'isbn')
    if not mi.is_null('languages'):
        m.clear('language')
        for lang in mi.languages:
            if lang and lang.lower() not in ('und', ''):
                m.add('language', lang)
    if not mi.is_null('series_index'):
        m.clear('series_index')
        m.add('series_index', mi.format_series_index())
    elif override_input_metadata:
        m.clear('series_index')
    if not mi.is_null('rating'):
        m.clear('rating')
        m.add('rating', '%.2f'%mi.rating)
    elif override_input_metadata:
        m.clear('rating')
    if not mi.is_null('tags'):
        m.clear('subject')
        for t in mi.tags:
            m.add('subject', t)
    elif override_input_metadata:
        m.clear('subject')
    if not mi.is_null('pubdate'):
        m.clear('date')
        m.add('date', isoformat(mi.pubdate))
    if not mi.is_null('timestamp'):
        m.clear('timestamp')
        m.add('timestamp', isoformat(mi.timestamp))
    if not mi.is_null('rights'):
        m.clear('rights')
        m.add('rights', mi.rights)
    if not mi.is_null('publication_type'):
        m.clear('publication_type')
        m.add('publication_type', mi.publication_type)

    if not m.timestamp:
        m.add('timestamp', isoformat(now()))
Ejemplo n.º 45
0
    def _doit(self, newdb):
        for i, x in enumerate(self.ids):
            mi = self.db.get_metadata(x, index_is_id=True, get_cover=True,
                    cover_as_data=True)
            if not gprefs['preserve_date_on_ctl']:
                mi.timestamp = now()
            self.progress(i, mi.title)
            fmts = self.db.formats(x, index_is_id=True)
            if not fmts:
                fmts = []
            else:
                fmts = fmts.split(',')
            paths = []
            for fmt in fmts:
                p = self.db.format(x, fmt, index_is_id=True,
                    as_path=True)
                if p:
                    paths.append(p)
            automerged = False
            if prefs['add_formats_to_existing']:
                identical_book_list = newdb.find_identical_books(mi)
                if identical_book_list:  # books with same author and nearly same title exist in newdb
                    self.auto_merged_ids[x] = _('%(title)s by %(author)s')%\
                    dict(title=mi.title, author=mi.format_field('authors')[1])
                    automerged = True
                    seen_fmts = set()
                    for identical_book in identical_book_list:
                        ib_fmts = newdb.formats(identical_book, index_is_id=True)
                        if ib_fmts:
                            seen_fmts |= set(ib_fmts.split(','))
                        replace = gprefs['automerge'] == 'overwrite'
                        self.add_formats(identical_book, paths, newdb,
                                replace=replace)

                    if gprefs['automerge'] == 'new record':
                        incoming_fmts = \
                            set([os.path.splitext(path)[-1].replace('.',
                                '').upper() for path in paths])

                        if incoming_fmts.intersection(seen_fmts):
                            # There was at least one duplicate format
                            # so create a new record and put the
                            # incoming formats into it
                            # We should arguably put only the duplicate
                            # formats, but no real harm is done by having
                            # all formats
                            newdb.import_book(mi, paths, notify=False, import_hooks=False,
                                apply_import_tags=tweaks['add_new_book_tags_when_importing_books'],
                                preserve_uuid=False)

            if not automerged:
                newdb.import_book(mi, paths, notify=False, import_hooks=False,
                    apply_import_tags=tweaks['add_new_book_tags_when_importing_books'],
                    preserve_uuid=self.delete_after)
                co = self.db.conversion_options(x, 'PIPE')
                if co is not None:
                    newdb.set_conversion_options(x, 'PIPE', co)
            self.processed.add(x)
            for path in paths:
                try:
                    os.remove(path)
                except:
                    pass
Ejemplo n.º 46
0
 def set_to_today(self):
     self.setDateTime(now())
 def setEditorData(self, editor, index):
     val = index.model().data(index, Qt.DisplayRole).toDateTime()
     if val is None or val == UNDEFINED_QDATETIME:
         val = now()
     editor.setDateTime(val)
Ejemplo n.º 48
0
    def get_dates_matches(self, location, query, candidates):
        matches = set([])
        if len(query) < 2:
            return matches

        if location == 'date':
            location = 'timestamp'
        loc = self.field_metadata[location]['rec_index']

        if query == 'false':
            for id_ in candidates:
                item = self._data[id_]
                if item is None:
                    continue
                v = item[loc]
                if isinstance(v, (str, unicode)):
                    v = parse_date(v)
                if v is None or v <= UNDEFINED_DATE:
                    matches.add(item[0])
            return matches
        if query == 'true':
            for id_ in candidates:
                item = self._data[id_]
                if item is None:
                    continue
                v = item[loc]
                if isinstance(v, (str, unicode)):
                    v = parse_date(v)
                if v is not None and v > UNDEFINED_DATE:
                    matches.add(item[0])
            return matches

        relop = None
        for k in self.date_search_relops.keys():
            if query.startswith(k):
                (p, relop) = self.date_search_relops[k]
                query = query[p:]
        if relop is None:
                (p, relop) = self.date_search_relops['=']

        if query in self.local_today:
            qd = now()
            field_count = 3
        elif query in self.local_yesterday:
            qd = now() - timedelta(1)
            field_count = 3
        elif query in self.local_thismonth:
            qd = now()
            field_count = 2
        elif query.endswith(self.local_daysago) or query.endswith(self.untrans_daysago):
            num = query[0:-(self.local_daysago_len if query.endswith(self.local_daysago) else self.untrans_daysago_len)]
            try:
                qd = now() - timedelta(int(num))
            except:
                raise ParseException(_('Number conversion error: {0}').format(num))
            field_count = 3
        else:
            try:
                qd = parse_date(query, as_utc=False)
            except:
                raise ParseException(_('Date conversion error: {0}').format(query))
            if '-' in query:
                field_count = query.count('-') + 1
            else:
                field_count = query.count('/') + 1
        for id_ in candidates:
            item = self._data[id_]
            if item is None or item[loc] is None:
                continue
            v = item[loc]
            if isinstance(v, (str, unicode)):
                v = parse_date(v)
            if relop(v, qd, field_count):
                matches.add(item[0])
        return matches
Ejemplo n.º 49
0
    def __init__(self, parent, db):
        QDialog.__init__(self, parent)
        self.setupUi(self)
        self.um_label.setText(self.um_label.text() % localize_user_manual_link(
            'https://manual.calibre-ebook.com/gui.html#the-search-interface'))
        for val, text in [(0, '')] + [
            (i, strftime('%B',
                         date(2010, i, 1).timetuple())) for i in xrange(1, 13)
        ]:
            self.date_month.addItem(text, val)
        for val, text in [('today', _('Today')), ('yesterday', _('Yesterday')),
                          ('thismonth', _('This month'))]:
            self.date_human.addItem(text, val)
        self.date_year.setValue(now().year)
        self.date_day.setSpecialValueText(u' \xa0')
        vals = [((v['search_terms'] or [k])[0], v['name'] or k)
                for k, v in db.field_metadata.iteritems()
                if v.get('datatype', None) == 'datetime']
        for k, v in sorted(vals, key=lambda (k, v): sort_key(v)):
            self.date_field.addItem(v, k)

        self.date_year.valueChanged.connect(
            lambda: self.sel_date.setChecked(True))
        self.date_month.currentIndexChanged.connect(
            lambda: self.sel_date.setChecked(True))
        self.date_day.valueChanged.connect(
            lambda: self.sel_date.setChecked(True))
        self.date_daysago.valueChanged.connect(
            lambda: self.sel_daysago.setChecked(True))
        self.date_ago_type.addItems(
            [_('days'), _('weeks'),
             _('months'), _('years')])
        self.date_human.currentIndexChanged.connect(
            lambda: self.sel_human.setChecked(True))
        init_dateop(self.dateop_date)
        self.sel_date.setChecked(True)
        self.mc = ''
        searchables = sorted(db.field_metadata.searchable_fields(),
                             key=lambda x: sort_key(x
                                                    if x[0] != '#' else x[1:]))
        self.general_combo.addItems(searchables)

        all_authors = db.all_authors()
        all_authors.sort(key=lambda x: sort_key(x[1]))
        self.authors_box.setEditText('')
        self.authors_box.set_separator('&')
        self.authors_box.set_space_before_sep(True)
        self.authors_box.set_add_separator(
            tweaks['authors_completer_append_separator'])
        self.authors_box.update_items_cache(db.all_author_names())

        all_series = db.all_series()
        all_series.sort(key=lambda x: sort_key(x[1]))
        self.series_box.set_separator(None)
        self.series_box.update_items_cache([x[1] for x in all_series])
        self.series_box.show_initial_value('')

        all_tags = db.all_tags()
        self.tags_box.update_items_cache(all_tags)

        self.box_last_values = copy.deepcopy(box_values)
        if self.box_last_values:
            for k, v in self.box_last_values.items():
                if k == 'general_index':
                    continue
                getattr(self, k).setText(v)
            self.general_combo.setCurrentIndex(
                self.general_combo.findText(
                    self.box_last_values['general_index']))

        self.clear_button.clicked.connect(self.clear_button_pushed)

        current_tab = gprefs.get('advanced search dialog current tab', 0)
        self.tabWidget.setCurrentIndex(current_tab)
        if current_tab == 1:
            self.matchkind.setCurrentIndex(last_matchkind)

        self.tabWidget.currentChanged[int].connect(self.tab_changed)
        self.tab_changed(current_tab)
        self.resize(self.sizeHint())