Exemple #1
0
 def cell_changed(self, row, col):
     id_ = int(self.table.item(row, 0).data(Qt.ItemDataRole.UserRole))
     if col == 0:
         item = self.table.item(row, 0)
         aut = str(item.text()).strip()
         aut_list = string_to_authors(aut)
         if len(aut_list) != 1:
             error_dialog(
                 self.parent(), _('Invalid author name'),
                 _('You cannot change an author to multiple authors.')
             ).exec()
             aut = ' % '.join(aut_list)
             self.table.item(row, 0).setText(aut)
         item.set_sort_key()
         self.authors[id_]['name'] = aut
         self.set_icon(item, id_)
         c = self.table.item(row, 1)
         txt = author_to_author_sort(aut)
         self.authors[id_]['sort'] = txt
         c.setText(txt)  # This triggers another cellChanged event
         item = c
     else:
         item = self.table.item(row, col)
         item.set_sort_key()
         self.set_icon(item, id_)
         self.authors[id_][self.get_column_name(col)] = str(item.text())
     self.table.setCurrentItem(item)
     self.table.scrollToItem(item)
Exemple #2
0
    def __init__(self, prefix, lpath, title=None, authors=None, mime=None, date=None, ContentType=None,
                 thumbnail_name=None, size=None, other=None):
        from calibre.utils.date import parse_date
#         debug_print('Book::__init__ - title=', title)
        show_debug = title is not None and title.lower().find("xxxxx") >= 0
        if other is not None:
            other.title = title
            other.published_date = date
        if show_debug:
            debug_print("Book::__init__ - title=", title, 'authors=', authors)
            debug_print("Book::__init__ - other=", other)
        super(Book, self).__init__(prefix, lpath, size, other)

        if title is not None and len(title) > 0:
            self.title = title

        if authors is not None and len(authors) > 0:
            self.authors_from_string(authors)
            if self.author_sort is None or self.author_sort == "Unknown":
                self.author_sort = author_to_author_sort(authors)

        self.mime = mime

        self.size = size  # will be set later if None

        if ContentType == '6' and date is not None:
            try:
                self.datetime = time.strptime(date, "%Y-%m-%dT%H:%M:%S.%f")
            except:
                try:
                    self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%dT%H:%M:%S")
                except:
                    try:
                        self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%d")
                    except:
                        try:
                            self.datetime = parse_date(date,
                                    assume_utc=True).timetuple()
                        except:
                            try:
                                self.datetime = time.gmtime(os.path.getctime(self.path))
                            except:
                                self.datetime = time.gmtime()

        self.kobo_metadata = Metadata(title, self.authors)
        self.contentID          = None
        self.current_shelves    = []
        self.kobo_collections   = []
        self.can_put_on_shelves = True
        self.kobo_series        = None
        self.kobo_series_number = None  # Kobo stores the series number as string. And it can have a leading "#".
        self.kobo_series_id     = None
        self.kobo_subtitle      = None

        if thumbnail_name is not None:
            self.thumbnail = ImageWrapper(thumbnail_name)

        if show_debug:
            debug_print("Book::__init__ end - self=", self)
            debug_print("Book::__init__ end - title=", title, 'authors=', authors)
Exemple #3
0
    def __init__(self,
                 title,
                 rm_id,
                 authors=[],
                 size=0,
                 tags=[],
                 other=None,
                 datetime=time.gmtime(),
                 collections=[]):
        super().__init__(title, authors=authors, other=other)
        self.rm_id = rm_id
        self.path = rm_id
        self.datetime = datetime
        self.author_sort = author_to_author_sort(self.authors)

        if not self.size:
            if size:
                self.size = size
            else:
                self.size = 0

        if tags:
            self.tags = tags
        if authors:
            self.authors = authors
        if collections:
            self.device_collections = collections
    def _get_metadata(self, book, pdf_stats):
        """
        Return a populated Book object with available metadata
        """
        from calibre.ebooks.metadata import author_to_author_sort, authors_to_string, title_sort
        from calibre.ebooks.metadata.pdf import get_metadata

        self._log_location(repr(book))

        pdf_path = os.path.join(self.temp_dir, pdf_stats["path"])
        with open(pdf_path, "rb") as f:
            stream = cStringIO.StringIO(f.read())

        mi = get_metadata(stream, cover=False)
        this_book = Book(mi.title, " & ".join(mi.authors))
        this_book.author_sort = author_to_author_sort(mi.authors[0])
        this_book.datetime = datetime.fromtimestamp(int(pdf_stats["stats"]["st_birthtime"])).timetuple()
        this_book.dateadded = int(pdf_stats["stats"]["st_birthtime"])
        this_book.path = book
        this_book.size = int(pdf_stats["stats"]["st_size"])
        this_book.thumbnail = self._get_goodreader_thumb(book)
        if this_book.thumbnail:
            this_book.thumb_data = base64.b64encode(this_book.thumbnail)
        else:
            this_book.thumb_data = None
        this_book.title_sort = title_sort(mi.title)
        this_book.uuid = None

        return this_book
Exemple #5
0
    def __init__(self, prefix, lpath, title=None, authors=None, date=None, finished=False, current_page=None, size=None, other=None):

        Book.__init__(self, prefix, lpath, size, other)

        self.current_page = current_page
        self.finished = finished
        self.bookeen_id = None

        self.size = size
        if title is not None and len(title) > 0:
            self.title = title
        else:
            self.title = "(No title)"

        if authors is not None and len(authors) > 0:
            self.authors_from_string(authors)
            if self.author_sort is None or self.author_sort == "Unknown":
                self.author_sort = author_to_author_sort(authors)

        self.date = date
        self._new_book = False
        self.device_collections = []
        self.path = os.path.join(prefix, lpath)
        if os.sep == '\\':
            self.path = self.path.replace('/', '\\')
            self.lpath = lpath.replace('\\', '/')
        else:
            self.lpath = lpath
    def _get_metadata(self, book, pdf_stats):
        '''
        Return a populated Book object with available metadata
        '''
        from calibre.ebooks.metadata import author_to_author_sort, authors_to_string, title_sort
        from calibre.ebooks.metadata.pdf import get_metadata
        self._log_location(repr(book))

        pdf_path = os.path.join(self.temp_dir, pdf_stats['path'])
        with open(pdf_path, 'rb') as f:
            stream = cStringIO.StringIO(f.read())

        mi = get_metadata(stream, cover=False)
        this_book = Book(mi.title, ' & '.join(mi.authors))
        this_book.author_sort = author_to_author_sort(mi.authors[0])
        this_book.datetime = datetime.fromtimestamp(int(pdf_stats['stats']['st_birthtime'])).timetuple()
        this_book.dateadded = int(pdf_stats['stats']['st_birthtime'])
        this_book.path = book
        this_book.size = int(pdf_stats['stats']['st_size'])
        this_book.thumbnail = self._get_goodreader_thumb(book)
        if this_book.thumbnail:
            this_book.thumb_data = base64.b64encode(this_book.thumbnail)
        else:
            this_book.thumb_data = None
        this_book.title_sort = title_sort(mi.title)
        this_book.uuid = None

        return this_book
Exemple #7
0
 def read_id_maps(self, db):
     self.alink_map = {}
     self.asort_map = {}
     for row in db.conn.execute("SELECT id, name, sort, link FROM authors"):
         self.id_map[row[0]] = self.unserialize(row[1])
         self.asort_map[row[0]] = row[2] if row[2] else author_to_author_sort(row[1])
         self.alink_map[row[0]] = row[3]
Exemple #8
0
	def add(self, book_id,  mi, formats, one_liner=''):
		self.one_liner = one_liner
		self.title = mi.title
		# authors
		authors = []
		for x in mi.authors:
			authors.append(author_to_author_sort(x))
		self.authors = '|'.join(authors)
		# issues
		issues = []
		um = mi.get_all_user_metadata(False)
		if '#issue' in um:
			issue_strs = um['#issue']['#value#']
			for issue_str in issue_strs:
				issue_id = issue_str.rpartition('(')[-1].partition(')')[0]
				issues.append(issue_id)
		self.issues = '|'.join(issues)
		# etc
		self.description = mi.comments
		# opf
		self.opf = metadata_to_opf(mi)
		# file to upload
		for format, file_loc in formats.items():
			self.file = file_loc
		# metadata
		self.mi = mi
		self.book_id = book_id
		self._start()
Exemple #9
0
    def update_author(self):
        ''' Gets all metadata for an author from Casanova '''
        from calibre.ebooks.metadata import author_to_author_sort
        row = self.get_selected_row()
        authors = []
        
        if self.gui.current_view() is self.gui.library_view:
            a = self.gui.library_view.model().authors(row)
            authors = a.split(',')
        else:
            mi = self.gui.current_view().model().get_book_display_info(row)
            authors = mi.authors
        
        corrected_authors = {}
        for x in authors:
            corrected_authors[x] = author_to_author_sort(x)
        
        result = {'added':0, 'updated':0}
        if len(corrected_authors)>1:
            choose_dialog = ChooseAuthorsToUpdateDialog(self.gui, self.mm, corrected_authors)
            choose_dialog.exec_()
            if choose_dialog.result() != choose_dialog.Accepted:
                return
            if choose_dialog.selected_authors is None:
                return error_dialog(self.gui, 'Unable to Sync',
                                    'Unable to retrieve updates to selected issues.', show=True)
            # @todo: put this in a Dispatcher job
            selected_authors_string = '|'.join(choose_dialog.selected_authors)
            result = self.mm.author_sync(selected_authors_string)
        if len(corrected_authors) == 1:
            for k,v in corrected_authors.items():
                result = self.mm.author_sync(v) 

        return info_dialog(self.gui, 'Metadata retrieved',
                                unicode(result['added']) + ' added and ' + unicode(result['updated']) + ' updated', show=True)               
Exemple #10
0
def get_db_id(val,
              db,
              m,
              table,
              kmap,
              rid_map,
              allow_case_change,
              case_changes,
              val_map,
              is_authors=False):
    ''' Get the db id for the value val. If val does not exist in the db it is
    inserted into the db. '''
    kval = kmap(val)
    item_id = rid_map.get(kval, None)
    if item_id is None:
        if is_authors:
            aus = author_to_author_sort(val)
            db.execute('INSERT INTO authors(name,sort) VALUES (?,?)',
                       (val.replace(',', '|'), aus))
        else:
            db.execute(
                'INSERT INTO %s(%s) VALUES (?)' % (m['table'], m['column']),
                (val, ))
        item_id = rid_map[kval] = db.last_insert_rowid()
        table.id_map[item_id] = val
        table.col_book_map[item_id] = set()
        if is_authors:
            table.asort_map[item_id] = aus
            table.alink_map[item_id] = ''
    elif allow_case_change and val != table.id_map[item_id]:
        case_changes[item_id] = val
    val_map[val] = item_id
Exemple #11
0
    def __init__(self, prefix, lpath, title=None, authors=None, mime=None, date=None, ContentType=None,
                 thumbnail_name=None, size=None, other=None):
        from calibre.utils.date import parse_date
#         debug_print('Book::__init__ - title=', title)
        show_debug = title is not None and title.lower().find("xxxxx") >= 0
        if other is not None:
            other.title = title
            other.published_date = date
        if show_debug:
            debug_print("Book::__init__ - title=", title, 'authors=', authors)
            debug_print("Book::__init__ - other=", other)
        super(Book, self).__init__(prefix, lpath, size, other)

        if title is not None and len(title) > 0:
            self.title = title

        if authors is not None and len(authors) > 0:
            self.authors_from_string(authors)
            if self.author_sort is None or self.author_sort == "Unknown":
                self.author_sort = author_to_author_sort(authors)

        self.mime = mime

        self.size = size  # will be set later if None

        if ContentType == '6' and date is not None:
            try:
                self.datetime = time.strptime(date, "%Y-%m-%dT%H:%M:%S.%f")
            except:
                try:
                    self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%dT%H:%M:%S")
                except:
                    try:
                        self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%d")
                    except:
                        try:
                            self.datetime = parse_date(date,
                                    assume_utc=True).timetuple()
                        except:
                            try:
                                self.datetime = time.gmtime(os.path.getctime(self.path))
                            except:
                                self.datetime = time.gmtime()

        self.kobo_metadata = Metadata(title, self.authors)
        self.contentID          = None
        self.current_shelves    = []
        self.kobo_collections   = []
        self.can_put_on_shelves = True
        self.kobo_series        = None
        self.kobo_series_number = None  # Kobo stores the series number as string. And it can have a leading "#".
        self.kobo_subtitle      = None

        if thumbnail_name is not None:
            self.thumbnail = ImageWrapper(thumbnail_name)

        if show_debug:
            debug_print("Book::__init__ end - self=", self)
            debug_print("Book::__init__ end - title=", title, 'authors=', authors)
 def read_id_maps(self, db):
     self.alink_map = {}
     self.asort_map = {}
     for row in db.conn.execute('SELECT id, name, sort, link FROM authors'):
         self.id_map[row[0]] = row[1]
         self.asort_map[row[0]] = (row[2] if row[2] else
                                   author_to_author_sort(row[1]))
         self.alink_map[row[0]] = row[3]
Exemple #13
0
    def __init__(self, name, table, bools_are_tristate,
                 get_template_functions):
        self.name, self.table = name, table
        dt = self.metadata['datatype']
        self.has_text_data = dt in {
            'text', 'comments', 'series', 'enumeration'
        }
        self.table_type = self.table.table_type
        self._sort_key = (sort_key if dt in ('text', 'series',
                                             'enumeration') else IDENTITY)

        # This will be compared to the output of sort_key() which is a
        # bytestring, therefore it is safer to have it be a bytestring.
        # Coercing an empty bytestring to unicode will never fail, but the
        # output of sort_key cannot be coerced to unicode
        self._default_sort_key = b''

        if dt in {'int', 'float', 'rating'}:
            self._default_sort_key = 0
            self._sort_key = numeric_sort_key
        elif dt == 'bool':
            self._default_sort_key = None
            self._sort_key = bool_sort_key(bools_are_tristate)
        elif dt == 'datetime':
            self._default_sort_key = UNDEFINED_DATE
            if tweaks['sort_dates_using_visible_fields']:
                fmt = None
                if name in {'timestamp', 'pubdate', 'last_modified'}:
                    fmt = tweaks['gui_%s_display_format' % name]
                elif self.metadata['is_custom']:
                    fmt = self.metadata.get('display',
                                            {}).get('date_format', None)
                self._sort_key = partial(clean_date_for_sort, fmt=fmt)
        elif dt == 'comments' or name == 'identifiers':
            self._default_sort_key = ''

        if self.name == 'languages':
            self._sort_key = lambda x: sort_key(calibre_langcode_to_name(x))
        self.is_multiple = (bool(self.metadata['is_multiple'])
                            or self.name == 'formats')
        self.sort_sort_key = True
        if self.is_multiple and '&' in self.metadata['is_multiple'][
                'list_to_ui']:
            self._sort_key = lambda x: sort_key(author_to_author_sort(x))
            self.sort_sort_key = False
        self.default_value = {} if name == 'identifiers' else (
        ) if self.is_multiple else None
        self.category_formatter = unicode_type
        if dt == 'rating':
            if self.metadata['display'].get('allow_half_stars', False):
                self.category_formatter = lambda x: rating_to_stars(x, True)
            else:
                self.category_formatter = rating_to_stars
        elif name == 'languages':
            self.category_formatter = calibre_langcode_to_name
        self.writer = Writer(self)
        self.series_field = None
        self.get_template_functions = get_template_functions
    def sync_booklists(self, booklists, end_session=True):
        '''
        Update metadata on device.
        @param booklists: A tuple containing the result of calls to
                                (L{books}(oncard=None), L{books}(oncard='carda'),
                                L{books}(oncard='cardb')).

        prefs['manage_device_metadata']: ['manual'|'on_send'|'on_connect']

        booklist will reflect library metadata only when
        manage_device_metadata=='on_connect', otherwise booklist metadata comes from
        device
        '''
        from calibre.ebooks.metadata import author_to_author_sort, authors_to_string, title_sort

        self._log_location()

        for booklist in booklists:
            if not booklist:
                continue

            # Update db title/author from booklist title/author
            con = sqlite3.connect(self.local_metadata)
            with con:
                con.row_factory = sqlite3.Row
                cur = con.cursor()
                for book in booklist:
                    cur.execute('''SELECT
                                    authors,
                                    filename,
                                    title
                                   FROM metadata
                                   WHERE filename = {0}
                                '''.format(
                        self._quote_sqlite_identifier(book.path)))
                    cached_book = cur.fetchone()
                    if cached_book:
                        if (book.title != cached_book[b'title'] or
                            book.authors != [cached_book[b'authors']]):
                            self._log("updating metadata for %s" % repr(book.path))
                            cur.execute('''UPDATE metadata
                                           SET authors = "{0}",
                                               author_sort = "{1}",
                                               title = "{2}",
                                               title_sort = "{3}"
                                           WHERE filename = {4}
                                        '''.format(self._escape_delimiters(' & '.join(book.authors)),
                                                   self._escape_delimiters(author_to_author_sort(book.authors[0])),
                                                   self._escape_delimiters(book.title),
                                                   self._escape_delimiters(title_sort(book.title)),
                                                   self._quote_sqlite_identifier(book.path)))

                con.commit()

            # Copy the updated db to the iDevice
            self._log("updating remote_metadata")
            self.ios.copy_to_idevice(str(self.local_metadata), str(self.remote_metadata))
    def sync_booklists(self, booklists, end_session=True):
        '''
        Update metadata on device.
        @param booklists: A tuple containing the result of calls to
                                (L{books}(oncard=None), L{books}(oncard='carda'),
                                L{books}(oncard='cardb')).

        prefs['manage_device_metadata']: ['manual'|'on_send'|'on_connect']

        booklist will reflect library metadata only when
        manage_device_metadata=='on_connect', otherwise booklist metadata comes from
        device
        '''
        from calibre.ebooks.metadata import author_to_author_sort, authors_to_string, title_sort

        self._log_location()

        for booklist in booklists:
            if not booklist:
                continue

            # Update db title/author from booklist title/author
            con = sqlite3.connect(self.local_metadata)
            with con:
                con.row_factory = sqlite3.Row
                cur = con.cursor()
                for book in booklist:
                    cur.execute('''SELECT
                                    authors,
                                    filename,
                                    title
                                   FROM metadata
                                   WHERE filename = {0}
                                '''.format(json.dumps(book.path)))
                    cached_book = cur.fetchone()
                    if cached_book:
                        if (book.title != cached_book[b'title'] or
                            book.authors != [cached_book[b'authors']]):
                            self._log("updating metadata for %s" % repr(book.path))
                            cur.execute('''UPDATE metadata
                                           SET authors = "{0}",
                                               author_sort = "{1}",
                                               title = "{2}",
                                               title_sort = "{3}"
                                           WHERE filename = {4}
                                        '''.format(self._escape_delimiters('; '.join(book.authors)),
                                                   self._escape_delimiters(author_to_author_sort(book.authors[0])),
                                                   self._escape_delimiters(book.title),
                                                   self._escape_delimiters(title_sort(book.title)),
                                                   json.dumps(book.path)))

                con.commit()

            # Copy the updated db to the iDevice
            self._log("updating remote_metadata")
            self.ios.copy_to_idevice(str(self.local_metadata), str(self.remote_metadata))
Exemple #16
0
 def do_recalc_author_sort(self):
     self.table.cellChanged.disconnect()
     for row in range(0,self.table.rowCount()):
         item = self.table.item(row, 0)
         aut  = unicode(item.text()).strip()
         c = self.table.item(row, 1)
         # Sometimes trailing commas are left by changing between copy algs
         c.setText(author_to_author_sort(aut).rstrip(','))
     self.table.setFocus(Qt.OtherFocusReason)
     self.table.cellChanged.connect(self.cell_changed)
Exemple #17
0
    def rename_item(self, item_id, new_name, db):
        ret = ManyToManyTable.rename_item(self, item_id, new_name, db)
        if item_id not in self.id_map:
            self.alink_map.pop(item_id, None)
            self.asort_map.pop(item_id, None)
        else:
            # Was a simple rename, update the author sort value
            self.set_sort_names({item_id:author_to_author_sort(new_name)}, db)

        return ret
Exemple #18
0
 def read_id_maps(self, db):
     self.alink_map = lm = {}
     self.asort_map = sm = {}
     self.id_map = im = {}
     us = self.unserialize
     for aid, name, sort, link in db.conn.execute(
             'SELECT id, name, sort, link FROM authors'):
         name = us(name)
         im[aid] = name
         sm[aid] = (sort or author_to_author_sort(name))
         lm[aid] = link
Exemple #19
0
 def author_sort_from_authors(self, authors):
     '''Given a list of authors, return the author_sort string for the authors,
     preferring the author sort associated with the author over the computed
     string. '''
     table = self.fields['authors'].table
     result = []
     rmap = {icu_lower(v):k for k, v in table.id_map.iteritems()}
     for aut in authors:
         aid = rmap.get(icu_lower(aut), None)
         result.append(author_to_author_sort(aut) if aid is None else table.asort_map[aid])
     return ' & '.join(result)
def get_role(role, credits):
    '''
    Gets a list of persons with the given role.
    '''
    from calibre.ebooks.metadata import author_to_author_sort

    if prefs['swap_names']:
        return [author_to_author_sort(credit['person']) for credit in credits
                if credit['role'].lower() in role]
    return [credit['person'] for credit in credits
            if credit['role'].lower() in role]
def get_role(role, credits):
    '''
    Gets a list of persons with the given role.
    First primary persons, then all others, alphabetically
    '''
    from calibre.ebooks.metadata import author_to_author_sort

    if prefs['swap_names']:
        return [author_to_author_sort(credit['person']) for credit in credits
                if credit['role'].lower() in role]
    return [credit['person'] for credit in credits
            if credit['role'].lower() in role]
Exemple #22
0
    def __init__(self, prefix, lpath, title=None, authors=None, mime=None, date=None, ContentType=None,
                 thumbnail_name=None, size=None, other=None):
#        debug_print('Book::__init__ - title=', title)
        show_debug = title is not None and title.lower().find("xxxxx") >= 0
        if show_debug:
            debug_print("Book::__init__ - title=", title, 'authors=', authors)
            debug_print("Book::__init__ - other=", other)
        Book_.__init__(self, prefix, lpath, size, other)

        if title is not None and len(title) > 0:
            self.title = title

        if authors is not None and len(authors) > 0:
            self.authors_from_string(authors)
            if self.author_sort is None or self.author_sort == "Unknown":
                self.author_sort = author_to_author_sort(authors)

        self.mime = mime

        self.size = size # will be set later if None

        if ContentType == '6' and date is not None:
            try:
                self.datetime = time.strptime(date, "%Y-%m-%dT%H:%M:%S.%f")
            except:
                try:
                    self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%dT%H:%M:%S")
                except:
                    try:
                        self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%d")
                    except:
                        try:
                            self.datetime = parse_date(date,
                                    assume_utc=True).timetuple()
                        except:
                            try:
                                self.datetime = time.gmtime(os.path.getctime(self.path))
                            except:
                                self.datetime = time.gmtime()

        self.contentID          = None
        self.current_shelves    = []
        self.kobo_collections   = []
        self.kobo_series        = None
        self.kobo_series_number = None

        if thumbnail_name is not None:
            self.thumbnail = ImageWrapper(thumbnail_name)

        if show_debug:
            debug_print("Book::__init__ end - self=", self)
            debug_print("Book::__init__ end - title=", title, 'authors=', authors)
Exemple #23
0
def change_case(case_changes, dirtied, db, table, m, is_authors=False):
    if is_authors:
        vals = ((val.replace(',', '|'), item_id) for item_id, val in
                case_changes.iteritems())
    else:
        vals = ((val, item_id) for item_id, val in case_changes.iteritems())
    db.executemany(
        'UPDATE %s SET %s=? WHERE id=?'%(m['table'], m['column']), vals)
    for item_id, val in case_changes.iteritems():
        table.id_map[item_id] = val
        dirtied.update(table.col_book_map[item_id])
        if is_authors:
            table.asort_map[item_id] = author_to_author_sort(val)
Exemple #24
0
def change_case(case_changes, dirtied, db, table, m, is_authors=False):
    if is_authors:
        vals = ((val.replace(',', '|'), item_id) for item_id, val in
                case_changes.iteritems())
    else:
        vals = ((val, item_id) for item_id, val in case_changes.iteritems())
    db.executemany(
        'UPDATE %s SET %s=? WHERE id=?'%(m['table'], m['column']), vals)
    for item_id, val in case_changes.iteritems():
        table.id_map[item_id] = val
        dirtied.update(table.col_book_map[item_id])
        if is_authors:
            table.asort_map[item_id] = author_to_author_sort(val)
Exemple #25
0
    def _get_metadata(self, book, book_stats):
        '''
        Return a populated Book object with available metadata
        '''
        from calibre.ebooks.metadata import author_to_author_sort, authors_to_string, title_sort
        self._log_location(repr(book))
        format = book.rsplit('.')[1].lower()
        if format == 'mobi':
            from calibre.ebooks.metadata.mobi import get_metadata as get_mobi_metadata
            path = os.path.join(self.temp_dir, book_stats['path'])
            with open(path, 'rb') as f:
                stream = cStringIO.StringIO(f.read())
            mi = get_mobi_metadata(stream)

        elif format == 'pdf':
            from calibre.ebooks.metadata.pdf import get_metadata as get_pdf_metadata
            path = os.path.join(self.temp_dir, book_stats['path'])
            with open(path, 'rb') as f:
                stream = cStringIO.StringIO(f.read())
            mi = get_pdf_metadata(stream)

        else:
            self._log("unsupported format: '{}'".format(format))
            return Book()

        if False:
            ''' Perform a bit of voodoo to match Kindle multiple author style '''
            ks_authors = []
            for a in mi.authors:
                if "," in a:
                    # Already ln, fn
                    ks_authors.append(a)
                else:
                    ks_authors.append(author_to_author_sort(a))

        this_book = Book(mi.title, '; '.join(mi.authors))
        this_book.author_sort = '; '.join(mi.authors)
        this_book.datetime = datetime.fromtimestamp(
            int(book_stats['stats']['st_birthtime'])).timetuple()
        this_book.dateadded = int(book_stats['stats']['st_birthtime'])
        this_book.path = book
        this_book.size = int(book_stats['stats']['st_size'])
        this_book.thumbnail = self._get_kindle_thumb(mi.cover_data[1])
        if this_book.thumbnail:
            this_book.thumb_data = base64.b64encode(this_book.thumbnail)
        else:
            this_book.thumb_data = None
        this_book.title_sort = title_sort(mi.title)
        this_book.uuid = None

        return this_book
    def _get_metadata(self, book, book_stats):
        '''
        Return a populated Book object with available metadata
        '''
        from calibre.ebooks.metadata import author_to_author_sort, authors_to_string, title_sort
        self._log_location(repr(book))
        format = book.rsplit('.')[1].lower()
        if format == 'mobi':
            from calibre.ebooks.metadata.mobi import get_metadata as get_mobi_metadata
            path = os.path.join(self.temp_dir, book_stats['path'])
            with open(path, 'rb') as f:
                stream = cStringIO.StringIO(f.read())
            mi = get_mobi_metadata(stream)

        elif format == 'pdf':
            from calibre.ebooks.metadata.pdf import get_metadata as get_pdf_metadata
            path = os.path.join(self.temp_dir, book_stats['path'])
            with open(path, 'rb') as f:
                stream = cStringIO.StringIO(f.read())
            mi = get_pdf_metadata(stream)

        else:
            self._log("unsupported format: '{}'".format(format))
            return Book()

        if False:
            ''' Perform a bit of voodoo to match Kindle multiple author style '''
            ks_authors = []
            for a in mi.authors:
                if "," in a:
                    # Already ln, fn
                    ks_authors.append(a)
                else:
                    ks_authors.append(author_to_author_sort(a))

        this_book = Book(mi.title, '; '.join(mi.authors))
        this_book.author_sort = '; '.join(mi.authors)
        this_book.datetime = datetime.fromtimestamp(int(book_stats['stats']['st_birthtime'])).timetuple()
        this_book.dateadded = int(book_stats['stats']['st_birthtime'])
        this_book.path = book
        this_book.size = int(book_stats['stats']['st_size'])
        this_book.thumbnail = self._get_kindle_thumb(mi.cover_data[1])
        if this_book.thumbnail:
            this_book.thumb_data = base64.b64encode(this_book.thumbnail)
        else:
            this_book.thumb_data = None
        this_book.title_sort = title_sort(mi.title)
        this_book.uuid = None

        return this_book
Exemple #27
0
 def do_recalc_author_sort(self):
     self.table.cellChanged.disconnect()
     for row in range(0, self.table.rowCount()):
         item_aut = self.table.item(row, 0)
         id_ = int(item_aut.data(Qt.UserRole))
         aut = unicode_type(item_aut.text()).strip()
         item_aus = self.table.item(row, 1)
         # Sometimes trailing commas are left by changing between copy algs
         aus = unicode_type(author_to_author_sort(aut)).rstrip(',')
         item_aus.setText(aus)
         self.authors[id_]['sort'] = aus
         self.set_icon(item_aus, id_)
     self.table.setFocus(Qt.OtherFocusReason)
     self.table.cellChanged.connect(self.cell_changed)
Exemple #28
0
    def __init__(self, name, table, bools_are_tristate, get_template_functions):
        self.name, self.table = name, table
        dt = self.metadata['datatype']
        self.has_text_data = dt in {'text', 'comments', 'series', 'enumeration'}
        self.table_type = self.table.table_type
        self._sort_key = (sort_key if dt in ('text', 'series', 'enumeration') else IDENTITY)

        # This will be compared to the output of sort_key() which is a
        # bytestring, therefore it is safer to have it be a bytestring.
        # Coercing an empty bytestring to unicode will never fail, but the
        # output of sort_key cannot be coerced to unicode
        self._default_sort_key = b''

        if dt in {'int', 'float', 'rating'}:
            self._default_sort_key = 0
        elif dt == 'bool':
            self._default_sort_key = None
            self._sort_key = bool_sort_key(bools_are_tristate)
        elif dt == 'datetime':
            self._default_sort_key = UNDEFINED_DATE
            if tweaks['sort_dates_using_visible_fields']:
                fmt = None
                if name in {'timestamp', 'pubdate', 'last_modified'}:
                    fmt = tweaks['gui_%s_display_format' % name]
                elif self.metadata['is_custom']:
                    fmt = self.metadata.get('display', {}).get('date_format', None)
                self._sort_key = partial(clean_date_for_sort, fmt=fmt)
        elif dt == 'comments' or name == 'identifiers':
            self._default_sort_key = ''

        if self.name == 'languages':
            self._sort_key = lambda x:sort_key(calibre_langcode_to_name(x))
        self.is_multiple = (bool(self.metadata['is_multiple']) or self.name ==
                'formats')
        self.sort_sort_key = True
        if self.is_multiple and '&' in self.metadata['is_multiple']['list_to_ui']:
            self._sort_key = lambda x: sort_key(author_to_author_sort(x))
            self.sort_sort_key = False
        self.default_value = {} if name == 'identifiers' else () if self.is_multiple else None
        self.category_formatter = unicode_type
        if dt == 'rating':
            if self.metadata['display'].get('allow_half_stars', False):
                self.category_formatter = lambda x: rating_to_stars(x, True)
            else:
                self.category_formatter = rating_to_stars
        elif name == 'languages':
            self.category_formatter = calibre_langcode_to_name
        self.writer = Writer(self)
        self.series_field = None
        self.get_template_functions = get_template_functions
 def cell_changed(self, row, col):
     if col == 0:
         item = self.table.item(row, 0)
         aut  = unicode_type(item.text()).strip()
         aut_list = string_to_authors(aut)
         if len(aut_list) != 1:
             error_dialog(self.parent(), _('Invalid author name'),
                     _('You cannot change an author to multiple authors.')).exec_()
             aut = ' % '.join(aut_list)
             self.table.item(row, 0).setText(aut)
         c = self.table.item(row, 1)
         c.setText(author_to_author_sort(aut))
         item = c
     else:
         item  = self.table.item(row, col)
     self.table.setCurrentItem(item)
     self.table.scrollToItem(item)
Exemple #30
0
 def cell_changed(self, row, col):
     if col == 0:
         item = self.table.item(row, 0)
         aut  = unicode(item.text()).strip()
         amper = aut.find('&')
         if amper >= 0:
             error_dialog(self.parent(), _('Invalid author name'),
                     _('Author names cannot contain & characters.')).exec_()
             aut = aut.replace('&', '%')
             self.table.item(row, 0).setText(aut)
         c = self.table.item(row, 1)
         c.setText(author_to_author_sort(aut))
         item = c
     else:
         item  = self.table.item(row, col)
     self.table.setCurrentItem(item)
     self.table.scrollToItem(item)
 def cell_changed(self, row, col):
     if col == 0:
         item = self.table.item(row, 0)
         aut  = unicode(item.text()).strip()
         aut_list = string_to_authors(aut)
         if len(aut_list) != 1:
             error_dialog(self.parent(), _('Invalid author name'),
                     _('You cannot change an author to multiple authors.')).exec_()
             aut = ' % '.join(aut_list)
             self.table.item(row, 0).setText(aut)
         c = self.table.item(row, 1)
         c.setText(author_to_author_sort(aut))
         item = c
     else:
         item  = self.table.item(row, col)
     self.table.setCurrentItem(item)
     self.table.scrollToItem(item)
Exemple #32
0
 def check_all_methods(self,
                       name,
                       invert=None,
                       comma=None,
                       nocomma=None,
                       copy=None):
     methods = ('invert', 'copy', 'comma', 'nocomma')
     if invert is None:
         invert = name
     if comma is None:
         comma = invert
     if nocomma is None:
         nocomma = comma
     if copy is None:
         copy = name
     results = (invert, copy, comma, nocomma)
     for method, result in zip(methods, results):
         self.assertEqual(author_to_author_sort(name, method), result)
Exemple #33
0
    def __init__(self,
                 title,
                 rm_id,
                 authors=[],
                 size=0,
                 tags=[],
                 other=None,
                 datetime=time.gmtime()):
        super().__init__(title, authors=authors,
                         other=other)  # should pass title and author
        self.rm_id = rm_id
        self.datetime = datetime
        self.size = size
        self.tags = tags
        self.path = rm_id
        self.authors = authors

        self.author_sort = author_to_author_sort(self.authors)
Exemple #34
0
def get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
              case_changes, val_map, is_authors=False):
    ''' Get the db id for the value val. If val does not exist in the db it is
    inserted into the db. '''
    kval = kmap(val)
    item_id = rid_map.get(kval, None)
    if item_id is None:
        if is_authors:
            aus = author_to_author_sort(val)
            db.execute('INSERT INTO authors(name,sort) VALUES (?,?)',
                            (val.replace(',', '|'), aus))
        else:
            db.execute('INSERT INTO %s(%s) VALUES (?)'%(
                m['table'], m['column']), (val,))
        item_id = rid_map[kval] = db.last_insert_rowid()
        table.id_map[item_id] = val
        table.col_book_map[item_id] = set()
        if is_authors:
            table.asort_map[item_id] = aus
            table.alink_map[item_id] = ''
    elif allow_case_change and val != table.id_map[item_id]:
        case_changes[item_id] = val
    val_map[val] = item_id
Exemple #35
0
def get_categories(dbcache,
                   sort='name',
                   book_ids=None,
                   icon_map=None,
                   first_letter_sort=False):
    if icon_map is not None and type(icon_map) != TagsIcons:
        raise TypeError(
            'icon_map passed to get_categories must be of type TagIcons')
    if sort not in CATEGORY_SORTS:
        raise ValueError('sort ' + sort + ' not a valid value')

    fm = dbcache.field_metadata
    book_rating_map = dbcache.fields['rating'].book_value_map
    lang_map = dbcache.fields['languages'].book_value_map

    categories = {}
    book_ids = frozenset(book_ids) if book_ids else book_ids
    pm_cache = {}

    def get_metadata(book_id):
        ans = pm_cache.get(book_id)
        if ans is None:
            ans = pm_cache[book_id] = dbcache._get_proxy_metadata(book_id)
        return ans

    bids = None

    for category, is_multiple, is_composite in find_categories(fm):
        tag_class = create_tag_class(category, fm, icon_map)
        if is_composite:
            if bids is None:
                bids = dbcache._all_book_ids(
                ) if book_ids is None else book_ids
            cats = dbcache.fields[category].get_composite_categories(
                tag_class, book_rating_map, bids, is_multiple, get_metadata)
        elif category == 'news':
            cats = dbcache.fields['tags'].get_news_category(
                tag_class, book_ids)
        else:
            cat = fm[category]
            brm = book_rating_map
            if cat['datatype'] == 'rating' and category != 'rating':
                brm = dbcache.fields[category].book_value_map
            cats = dbcache.fields[category].get_categories(
                tag_class, brm, lang_map, book_ids)
            if (category != 'authors' and cat['datatype'] == 'text'
                    and cat['is_multiple']
                    and cat['display'].get('is_names', False)):
                for item in cats:
                    item.sort = author_to_author_sort(item.sort)
        sort_categories(cats, sort, first_letter_sort=first_letter_sort)
        categories[category] = cats

    # Needed for legacy databases that have multiple ratings that
    # map to n stars
    for r in categories['rating']:
        for x in tuple(categories['rating']):
            if r.name == x.name and r.id != x.id:
                r.id_set |= x.id_set
                r.count = r.count + x.count
                categories['rating'].remove(x)
                break

    # User categories
    user_categories = clean_user_categories(dbcache).copy()
    if user_categories:
        # We want to use same node in the user category as in the source
        # category. To do that, we need to find the original Tag node. There is
        # a time/space tradeoff here. By converting the tags into a map, we can
        # do the verification in the category loop much faster, at the cost of
        # temporarily duplicating the categories lists.
        taglist = {}
        for c, items in categories.iteritems():
            taglist[c] = dict(map(lambda t: (icu_lower(t.name), t), items))

        muc = dbcache.pref('grouped_search_make_user_categories', [])
        gst = dbcache.pref('grouped_search_terms', {})
        for c in gst:
            if c not in muc:
                continue
            user_categories[c] = []
            for sc in gst[c]:
                for t in categories.get(sc, ()):
                    user_categories[c].append([t.name, sc, 0])

        gst_icon = icon_map['gst'] if icon_map else None
        for user_cat in sorted(user_categories.iterkeys(), key=sort_key):
            items = []
            names_seen = {}
            for name, label, ign in user_categories[user_cat]:
                n = icu_lower(name)
                if label in taglist and n in taglist[label]:
                    if user_cat in gst:
                        # for gst items, make copy and consolidate the tags by name.
                        if n in names_seen:
                            t = names_seen[n]
                            t.id_set |= taglist[label][n].id_set
                            t.count += taglist[label][n].count
                            t.tooltip = t.tooltip.replace(
                                ')', ', ' + label + ')')
                        else:
                            t = copy.copy(taglist[label][n])
                            t.icon = gst_icon
                            names_seen[t.name] = t
                            items.append(t)
                    else:
                        items.append(taglist[label][n])
                # else: do nothing, to not include nodes w zero counts
            cat_name = '@' + user_cat  # add the '@' to avoid name collision
            # Not a problem if we accumulate entries in the icon map
            if icon_map is not None:
                icon_map[cat_name] = icon_map['user:'******'search' in icon_map:
        icon = icon_map['search']
    queries = dbcache._search_api.saved_searches.queries
    for srch in sorted(queries, key=sort_key):
        items.append(
            Tag(srch,
                tooltip=queries[srch],
                sort=srch,
                icon=icon,
                category='search',
                is_editable=False))
    if len(items):
        categories['search'] = items

    return categories
Exemple #36
0
    def test_many_many_basic(self):  # {{{
        'Test the different code paths for writing to a many-many field'
        cl = self.cloned_library
        cache = self.init_cache(cl)
        ae, af, sf = self.assertEqual, self.assertFalse, cache.set_field

        # Tags
        ae(sf('#tags', {1:cache.field_for('tags', 1), 2:cache.field_for('tags', 2)}),
            {1, 2})
        for name in ('tags', '#tags'):
            f = cache.fields[name]
            af(sf(name, {1:('News', 'tag one')}, allow_case_change=False))
            ae(sf(name, {1:'tag one, News'}), {1, 2})
            ae(sf(name, {3:('tag two', 'sep,sep2')}), {2, 3})
            ae(len(f.table.id_map), 4)
            ae(sf(name, {1:None}), set([1]))
            cache2 = self.init_cache(cl)
            for c in (cache, cache2):
                ae(c.field_for(name, 3), ('tag two', 'sep;sep2'))
                ae(len(c.fields[name].table.id_map), 3)
                ae(len(c.fields[name].table.id_map), 3)
                ae(c.field_for(name, 1), ())
                ae(c.field_for(name, 2), ('tag two', 'tag one'))
            del cache2

        # Authors
        ae(sf('#authors', {k:cache.field_for('authors', k) for k in (1,2,3)}),
           {1,2,3})

        for name in ('authors', '#authors'):
            f = cache.fields[name]
            ae(len(f.table.id_map), 3)
            af(cache.set_field(name, {3:None if name == 'authors' else 'Unknown'}))
            ae(cache.set_field(name, {3:'Kovid Goyal & Divok Layog'}), set([3]))
            ae(cache.set_field(name, {1:'', 2:'An, Author'}), {1,2})
            cache2 = self.init_cache(cl)
            for c in (cache, cache2):
                ae(len(c.fields[name].table.id_map), 4 if name =='authors' else 3)
                ae(c.field_for(name, 3), ('Kovid Goyal', 'Divok Layog'))
                ae(c.field_for(name, 2), ('An, Author',))
                ae(c.field_for(name, 1), ('Unknown',) if name=='authors' else ())
                if name == 'authors':
                    ae(c.field_for('author_sort', 1), author_to_author_sort('Unknown'))
                    ae(c.field_for('author_sort', 2), author_to_author_sort('An, Author'))
                    ae(c.field_for('author_sort', 3), author_to_author_sort('Kovid Goyal') + ' & ' + author_to_author_sort('Divok Layog'))
            del cache2
        ae(cache.set_field('authors', {1:'KoviD GoyaL'}), {1, 3})
        ae(cache.field_for('author_sort', 1), 'GoyaL, KoviD')
        ae(cache.field_for('author_sort', 3), 'GoyaL, KoviD & Layog, Divok')

        # Languages
        f = cache.fields['languages']
        ae(f.table.id_map, {1: 'eng', 2: 'deu'})
        ae(sf('languages', {1:''}), set([1]))
        ae(cache.field_for('languages', 1), ())
        ae(sf('languages', {2:('und',)}), set([2]))
        af(f.table.id_map)
        ae(sf('languages', {1:'eng,fra,deu', 2:'es,Dutch', 3:'English'}), {1, 2, 3})
        ae(cache.field_for('languages', 1), ('eng', 'fra', 'deu'))
        ae(cache.field_for('languages', 2), ('spa', 'nld'))
        ae(cache.field_for('languages', 3), ('eng',))
        ae(sf('languages', {3:None}), set([3]))
        ae(cache.field_for('languages', 3), ())
        ae(sf('languages', {1:'deu,fra,eng'}), set([1]), 'Changing order failed')
        ae(sf('languages', {2:'deu,eng,eng'}), set([2]))
        cache2 = self.init_cache(cl)
        for c in (cache, cache2):
            ae(cache.field_for('languages', 1), ('deu', 'fra', 'eng'))
            ae(cache.field_for('languages', 2), ('deu', 'eng'))
        del cache2

        # Identifiers
        f = cache.fields['identifiers']
        ae(sf('identifiers', {3: 'one:1,two:2'}), set([3]))
        ae(sf('identifiers', {2:None}), set([2]))
        ae(sf('identifiers', {1: {'test':'1', 'two':'2'}}), set([1]))
        cache2 = self.init_cache(cl)
        for c in (cache, cache2):
            ae(c.field_for('identifiers', 3), {'one':'1', 'two':'2'})
            ae(c.field_for('identifiers', 2), {})
            ae(c.field_for('identifiers', 1), {'test':'1', 'two':'2'})
        del cache2

        # Test setting of title sort
        ae(sf('title', {1:'The Moose', 2:'Cat'}), {1, 2})
        cache2 = self.init_cache(cl)
        for c in (cache, cache2):
            ae(c.field_for('sort', 1), 'Moose, The')
            ae(c.field_for('sort', 2), 'Cat')

        # Test setting with the same value repeated
        ae(sf('tags', {3: ('a', 'b', 'a')}), {3})
        ae(sf('tags', {3: ('x', 'X')}), {3}, 'Failed when setting tag twice with different cases')
        ae(('x',), cache.field_for('tags', 3))
Exemple #37
0
def get_categories(dbcache, sort='name', book_ids=None, first_letter_sort=False):
    if sort not in CATEGORY_SORTS:
        raise ValueError('sort ' + sort + ' not a valid value')

    fm = dbcache.field_metadata
    book_rating_map = dbcache.fields['rating'].book_value_map
    lang_map = dbcache.fields['languages'].book_value_map

    categories = {}
    book_ids = frozenset(book_ids) if book_ids else book_ids
    pm_cache = {}

    def get_metadata(book_id):
        ans = pm_cache.get(book_id)
        if ans is None:
            ans = pm_cache[book_id] = dbcache._get_proxy_metadata(book_id)
        return ans

    bids = None
    first_letter_sort = bool(first_letter_sort)

    for category, is_multiple, is_composite in find_categories(fm):
        tag_class = create_tag_class(category, fm)
        sort_on, reverse = sort, False
        if is_composite:
            if bids is None:
                bids = dbcache._all_book_ids() if book_ids is None else book_ids
            cats = dbcache.fields[category].get_composite_categories(
                tag_class, book_rating_map, bids, is_multiple, get_metadata)
        elif category == 'news':
            cats = dbcache.fields['tags'].get_news_category(tag_class, book_ids)
        else:
            cat = fm[category]
            brm = book_rating_map
            dt = cat['datatype']
            if dt == 'rating':
                if category != 'rating':
                    brm = dbcache.fields[category].book_value_map
                if sort_on == 'name':
                    sort_on, reverse = 'rating', True
            cats = dbcache.fields[category].get_categories(
                tag_class, brm, lang_map, book_ids)
            if (category != 'authors' and dt == 'text' and
                cat['is_multiple'] and cat['display'].get('is_names', False)):
                for item in cats:
                    item.sort = author_to_author_sort(item.sort)
        cats.sort(key=category_sort_keys[first_letter_sort][sort_on], reverse=reverse)
        categories[category] = cats

    # Needed for legacy databases that have multiple ratings that
    # map to n stars
    for r in categories['rating']:
        for x in tuple(categories['rating']):
            if r.name == x.name and r.id != x.id:
                r.id_set |= x.id_set
                r.count = len(r.id_set)
                categories['rating'].remove(x)
                break

    # User categories
    user_categories = clean_user_categories(dbcache).copy()

    # First add any grouped search terms to the user categories
    muc = dbcache.pref('grouped_search_make_user_categories', [])
    gst = dbcache.pref('grouped_search_terms', {})
    for c in gst:
        if c not in muc:
            continue
        user_categories[c] = []
        for sc in gst[c]:
            for t in categories.get(sc, ()):
                user_categories[c].append([t.name, sc, 0])

    if user_categories:
        # We want to use same node in the user category as in the source
        # category. To do that, we need to find the original Tag node. There is
        # a time/space tradeoff here. By converting the tags into a map, we can
        # do the verification in the category loop much faster, at the cost of
        # temporarily duplicating the categories lists.
        taglist = {}
        for c, items in iteritems(categories):
            taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), items))

        # Add the category values to the user categories
        for user_cat in sorted(user_categories, key=sort_key):
            items = []
            names_seen = {}
            user_cat_is_gst = user_cat in gst
            for name, label, ign in user_categories[user_cat]:
                n = icu_lower(name)
                if label in taglist and n in taglist[label]:
                    if user_cat_is_gst:
                        # for gst items, make copy and consolidate the tags by name.
                        if n in names_seen:
                            # We must combine this node into a previous one with
                            # the same name ignoring case. As part of the process,
                            # remember the source categories and correct the
                            # average rating
                            t = names_seen[n]
                            other_tag = taglist[label][n]
                            t.id_set |= other_tag.id_set
                            t.count = len(t.id_set)
                            t.original_categories.add(other_tag.category)

                            total_rating = 0
                            count = 0
                            for id_ in t.id_set:
                                rating = book_rating_map.get(id_, 0)
                                if rating:
                                    total_rating += rating/2
                                    count += 1
                            if total_rating and count:
                                t.avg_rating = total_rating/count
                        else:
                            # Must deepcopy so we don't share the id_set between nodes
                            t = copy.deepcopy(taglist[label][n])
                            t.original_categories = {t.category}
                            names_seen[n] = t
                            items.append(t)
                    else:
                        items.append(taglist[label][n])
                # else: do nothing, to not include nodes w zero counts
            cat_name = '@' + user_cat  # add the '@' to avoid name collision
            items.sort(key=category_sort_keys[False][sort])
            categories[cat_name] = items

    # ### Finally, the saved searches category ####
    items = []
    queries = dbcache._search_api.saved_searches.queries
    for srch in sorted(queries, key=sort_key):
        items.append(Tag(srch, sort=srch, search_expression=queries[srch],
                         category='search', is_editable=False))
    if len(items):
        categories['search'] = items

    return categories
Exemple #38
0
    def test_many_many_basic(self):  # {{{
        'Test the different code paths for writing to a many-many field'
        cl = self.cloned_library
        cache = self.init_cache(cl)
        ae, af, sf = self.assertEqual, self.assertFalse, cache.set_field

        # Tags
        ae(sf('#tags', {1:cache.field_for('tags', 1), 2:cache.field_for('tags', 2)}),
            {1, 2})
        for name in ('tags', '#tags'):
            f = cache.fields[name]
            af(sf(name, {1:('News', 'tag one')}, allow_case_change=False))
            ae(sf(name, {1:'tag one, News'}), {1, 2})
            ae(sf(name, {3:('tag two', 'sep,sep2')}), {2, 3})
            ae(len(f.table.id_map), 4)
            ae(sf(name, {1:None}), set([1]))
            cache2 = self.init_cache(cl)
            for c in (cache, cache2):
                ae(c.field_for(name, 3), ('tag two', 'sep;sep2'))
                ae(len(c.fields[name].table.id_map), 3)
                ae(len(c.fields[name].table.id_map), 3)
                ae(c.field_for(name, 1), ())
                ae(c.field_for(name, 2), ('tag two', 'tag one'))
            del cache2

        # Authors
        ae(sf('#authors', {k:cache.field_for('authors', k) for k in (1,2,3)}),
           {1,2,3})

        for name in ('authors', '#authors'):
            f = cache.fields[name]
            ae(len(f.table.id_map), 3)
            af(cache.set_field(name, {3:None if name == 'authors' else 'Unknown'}))
            ae(cache.set_field(name, {3:'Kovid Goyal & Divok Layog'}), set([3]))
            ae(cache.set_field(name, {1:'', 2:'An, Author'}), {1,2})
            cache2 = self.init_cache(cl)
            for c in (cache, cache2):
                ae(len(c.fields[name].table.id_map), 4 if name =='authors' else 3)
                ae(c.field_for(name, 3), ('Kovid Goyal', 'Divok Layog'))
                ae(c.field_for(name, 2), ('An, Author',))
                ae(c.field_for(name, 1), ('Unknown',) if name=='authors' else ())
                if name == 'authors':
                    ae(c.field_for('author_sort', 1), author_to_author_sort('Unknown'))
                    ae(c.field_for('author_sort', 2), author_to_author_sort('An, Author'))
                    ae(c.field_for('author_sort', 3), author_to_author_sort('Kovid Goyal') + ' & ' + author_to_author_sort('Divok Layog'))
            del cache2
        ae(cache.set_field('authors', {1:'KoviD GoyaL'}), {1, 3})
        ae(cache.field_for('author_sort', 1), 'GoyaL, KoviD')
        ae(cache.field_for('author_sort', 3), 'GoyaL, KoviD & Layog, Divok')

        # Languages
        f = cache.fields['languages']
        ae(f.table.id_map, {1: 'eng', 2: 'deu'})
        ae(sf('languages', {1:''}), set([1]))
        ae(cache.field_for('languages', 1), ())
        ae(sf('languages', {2:('und',)}), set([2]))
        af(f.table.id_map)
        ae(sf('languages', {1:'eng,fra,deu', 2:'es,Dutch', 3:'English'}), {1, 2, 3})
        ae(cache.field_for('languages', 1), ('eng', 'fra', 'deu'))
        ae(cache.field_for('languages', 2), ('spa', 'nld'))
        ae(cache.field_for('languages', 3), ('eng',))
        ae(sf('languages', {3:None}), set([3]))
        ae(cache.field_for('languages', 3), ())
        ae(sf('languages', {1:'deu,fra,eng'}), set([1]), 'Changing order failed')
        ae(sf('languages', {2:'deu,eng,eng'}), set([2]))
        cache2 = self.init_cache(cl)
        for c in (cache, cache2):
            ae(cache.field_for('languages', 1), ('deu', 'fra', 'eng'))
            ae(cache.field_for('languages', 2), ('deu', 'eng'))
        del cache2

        # Identifiers
        f = cache.fields['identifiers']
        ae(sf('identifiers', {3: 'one:1,two:2'}), set([3]))
        ae(sf('identifiers', {2:None}), set([2]))
        ae(sf('identifiers', {1: {'test':'1', 'two':'2'}}), set([1]))
        cache2 = self.init_cache(cl)
        for c in (cache, cache2):
            ae(c.field_for('identifiers', 3), {'one':'1', 'two':'2'})
            ae(c.field_for('identifiers', 2), {})
            ae(c.field_for('identifiers', 1), {'test':'1', 'two':'2'})
        del cache2

        # Test setting of title sort
        ae(sf('title', {1:'The Moose', 2:'Cat'}), {1, 2})
        cache2 = self.init_cache(cl)
        for c in (cache, cache2):
            ae(c.field_for('sort', 1), 'Moose, The')
            ae(c.field_for('sort', 2), 'Cat')
 def surname(self, au):
     return author_to_author_sort(au).split(',')[0]
Exemple #40
0
def get_categories(dbcache, sort='name', book_ids=None, icon_map=None):
    if icon_map is not None and type(icon_map) != TagsIcons:
        raise TypeError('icon_map passed to get_categories must be of type TagIcons')
    if sort not in CATEGORY_SORTS:
        raise ValueError('sort ' + sort + ' not a valid value')

    fm = dbcache.field_metadata
    book_rating_map = dbcache.fields['rating'].book_value_map
    lang_map = dbcache.fields['languages'].book_value_map

    categories = {}
    book_ids = frozenset(book_ids) if book_ids else book_ids
    get_metadata = partial(dbcache._get_metadata, get_user_categories=False)
    bids = None

    for category, is_multiple, is_composite in find_categories(fm):
        tag_class = create_tag_class(category, fm, icon_map)
        if is_composite:
            if bids is None:
                bids = dbcache._all_book_ids() if book_ids is None else book_ids
            cats = dbcache.fields[category].get_composite_categories(
                tag_class, book_rating_map, bids, is_multiple, get_metadata)
        elif category == 'news':
            cats = dbcache.fields['tags'].get_news_category(tag_class, book_ids)
        else:
            cat = fm[category]
            brm = book_rating_map
            if cat['datatype'] == 'rating' and category != 'rating':
                brm = dbcache.fields[category].book_value_map
            cats = dbcache.fields[category].get_categories(
                tag_class, brm, lang_map, book_ids)
            if (category != 'authors' and cat['datatype'] == 'text' and
                cat['is_multiple'] and cat['display'].get('is_names', False)):
                for item in cats:
                    item.sort = author_to_author_sort(item.sort)
        sort_categories(cats, sort)
        categories[category] = cats

    # Needed for legacy databases that have multiple ratings that
    # map to n stars
    for r in categories['rating']:
        for x in tuple(categories['rating']):
            if r.name == x.name and r.id != x.id:
                r.id_set |= x.id_set
                r.count = r.count + x.count
                categories['rating'].remove(x)
                break

    # User categories
    user_categories = clean_user_categories(dbcache).copy()
    if user_categories:
        # We want to use same node in the user category as in the source
        # category. To do that, we need to find the original Tag node. There is
        # a time/space tradeoff here. By converting the tags into a map, we can
        # do the verification in the category loop much faster, at the cost of
        # temporarily duplicating the categories lists.
        taglist = {}
        for c, items in categories.iteritems():
            taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), items))

        muc = dbcache.pref('grouped_search_make_user_categories', [])
        gst = dbcache.pref('grouped_search_terms', {})
        for c in gst:
            if c not in muc:
                continue
            user_categories[c] = []
            for sc in gst[c]:
                if sc in categories.keys():
                    for t in categories[sc]:
                        user_categories[c].append([t.name, sc, 0])

        gst_icon = icon_map['gst'] if icon_map else None
        for user_cat in sorted(user_categories.iterkeys(), key=sort_key):
            items = []
            names_seen = {}
            for name, label, ign in user_categories[user_cat]:
                n = icu_lower(name)
                if label in taglist and n in taglist[label]:
                    if user_cat in gst:
                        # for gst items, make copy and consolidate the tags by name.
                        if n in names_seen:
                            t = names_seen[n]
                            t.id_set |= taglist[label][n].id_set
                            t.count += taglist[label][n].count
                            t.tooltip = t.tooltip.replace(')', ', ' + label + ')')
                        else:
                            t = copy.copy(taglist[label][n])
                            t.icon = gst_icon
                            names_seen[t.name] = t
                            items.append(t)
                    else:
                        items.append(taglist[label][n])
                # else: do nothing, to not include nodes w zero counts
            cat_name = '@' + user_cat  # add the '@' to avoid name collision
            # Not a problem if we accumulate entries in the icon map
            if icon_map is not None:
                icon_map[cat_name] = icon_map['user:'******'search' in icon_map:
        icon = icon_map['search']
    queries = dbcache._search_api.saved_searches.queries
    for srch in sorted(queries, key=sort_key):
        items.append(Tag(srch, tooltip=queries[srch], sort=srch, icon=icon,
                         category='search', is_editable=False))
    if len(items):
        categories['search'] = items

    return categories
Exemple #41
0
    def itervals(self, record):
        for name, fm in self.entries:
            dt = fm["datatype"]
            val = record[fm["rec_index"]]
            if dt == "composite":
                sb = fm["display"].get("composite_sort", "text")
                if sb == "date":
                    try:
                        val = parse_date(val)
                    except:
                        val = UNDEFINED_DATE
                    dt = "datetime"
                elif sb == "number":
                    try:
                        p = 1
                        for i, candidate in enumerate(("B", "KB", "MB", "GB", "TB", "PB", "EB")):
                            if val.endswith(candidate):
                                p = 1024 ** (i)
                                val = val[: -len(candidate)].strip()
                                break
                        val = locale.atof(val) * p
                    except:
                        val = 0.0
                    dt = "float"
                elif sb == "bool":
                    val = force_to_bool(val)
                    dt = "bool"

            if dt == "datetime":
                if val is None:
                    val = UNDEFINED_DATE
                if tweaks["sort_dates_using_visible_fields"]:
                    format = None
                    if name == "timestamp":
                        format = tweaks["gui_timestamp_display_format"]
                    elif name == "pubdate":
                        format = tweaks["gui_pubdate_display_format"]
                    elif name == "last_modified":
                        format = tweaks["gui_last_modified_display_format"]
                    elif fm["is_custom"]:
                        format = fm["display"].get("date_format", None)
                    val = clean_date_for_sort(val, format)
            elif dt == "series":
                if val is None:
                    val = ("", 1)
                else:
                    if self.library_order:
                        try:
                            lang = record[self.lang_idx].partition(u",")[0]
                        except (AttributeError, ValueError, KeyError, IndexError, TypeError):
                            lang = None
                        val = title_sort(val, order="library_order", lang=lang)
                    sidx_fm = self.field_metadata[name + "_index"]
                    sidx = record[sidx_fm["rec_index"]]
                    val = (self.string_sort_key(val), sidx)

            elif dt in ("text", "comments", "composite", "enumeration"):
                if val:
                    if fm["is_multiple"]:
                        jv = fm["is_multiple"]["list_to_ui"]
                        sv = fm["is_multiple"]["cache_to_list"]
                        if "&" in jv:
                            val = jv.join([author_to_author_sort(v) for v in val.split(sv)])
                        else:
                            val = jv.join(sorted(val.split(sv), key=self.string_sort_key))
                val = self.string_sort_key(val)

            elif dt == "bool":
                if not self.db_prefs.get("bools_are_tristate"):
                    val = {True: 1, False: 2, None: 2}.get(val, 2)
                else:
                    val = {True: 1, False: 2, None: 3}.get(val, 3)

            yield val
Exemple #42
0
def _author_to_author_sort(x):
    if not x:
        return ''
    return author_to_author_sort(x.replace('|', ','))
Exemple #43
0
    def test_many_many_basic(self):  # {{{
        "Test the different code paths for writing to a many-many field"
        cl = self.cloned_library
        cache = self.init_cache(cl)
        ae, af, sf = self.assertEqual, self.assertFalse, cache.set_field

        # Tags
        ae(sf("#tags", {1: cache.field_for("tags", 1), 2: cache.field_for("tags", 2)}), {1, 2})
        for name in ("tags", "#tags"):
            f = cache.fields[name]
            af(sf(name, {1: ("News", "tag one")}, allow_case_change=False))
            ae(sf(name, {1: "tag one, News"}), {1, 2})
            ae(sf(name, {3: ("tag two", "sep,sep2")}), {2, 3})
            ae(len(f.table.id_map), 4)
            ae(sf(name, {1: None}), set([1]))
            cache2 = self.init_cache(cl)
            for c in (cache, cache2):
                ae(c.field_for(name, 3), ("tag two", "sep;sep2"))
                ae(len(c.fields[name].table.id_map), 3)
                ae(len(c.fields[name].table.id_map), 3)
                ae(c.field_for(name, 1), ())
                ae(c.field_for(name, 2), ("tag two", "tag one"))
            del cache2

        # Authors
        ae(sf("#authors", {k: cache.field_for("authors", k) for k in (1, 2, 3)}), {1, 2, 3})

        for name in ("authors", "#authors"):
            f = cache.fields[name]
            ae(len(f.table.id_map), 3)
            af(cache.set_field(name, {3: None if name == "authors" else "Unknown"}))
            ae(cache.set_field(name, {3: "Kovid Goyal & Divok Layog"}), set([3]))
            ae(cache.set_field(name, {1: "", 2: "An, Author"}), {1, 2})
            cache2 = self.init_cache(cl)
            for c in (cache, cache2):
                ae(len(c.fields[name].table.id_map), 4 if name == "authors" else 3)
                ae(c.field_for(name, 3), ("Kovid Goyal", "Divok Layog"))
                ae(c.field_for(name, 2), ("An, Author",))
                ae(c.field_for(name, 1), ("Unknown",) if name == "authors" else ())
                if name == "authors":
                    ae(c.field_for("author_sort", 1), author_to_author_sort("Unknown"))
                    ae(c.field_for("author_sort", 2), author_to_author_sort("An, Author"))
                    ae(
                        c.field_for("author_sort", 3),
                        author_to_author_sort("Kovid Goyal") + " & " + author_to_author_sort("Divok Layog"),
                    )
            del cache2
        ae(cache.set_field("authors", {1: "KoviD GoyaL"}), {1, 3})
        ae(cache.field_for("author_sort", 1), "GoyaL, KoviD")
        ae(cache.field_for("author_sort", 3), "GoyaL, KoviD & Layog, Divok")

        # Languages
        f = cache.fields["languages"]
        ae(f.table.id_map, {1: "eng", 2: "deu"})
        ae(sf("languages", {1: ""}), set([1]))
        ae(cache.field_for("languages", 1), ())
        ae(sf("languages", {2: ("und",)}), set([2]))
        af(f.table.id_map)
        ae(sf("languages", {1: "eng,fra,deu", 2: "es,Dutch", 3: "English"}), {1, 2, 3})
        ae(cache.field_for("languages", 1), ("eng", "fra", "deu"))
        ae(cache.field_for("languages", 2), ("spa", "nld"))
        ae(cache.field_for("languages", 3), ("eng",))
        ae(sf("languages", {3: None}), set([3]))
        ae(cache.field_for("languages", 3), ())
        ae(sf("languages", {1: "deu,fra,eng"}), set([1]), "Changing order failed")
        ae(sf("languages", {2: "deu,eng,eng"}), set([2]))
        cache2 = self.init_cache(cl)
        for c in (cache, cache2):
            ae(cache.field_for("languages", 1), ("deu", "fra", "eng"))
            ae(cache.field_for("languages", 2), ("deu", "eng"))
        del cache2

        # Identifiers
        f = cache.fields["identifiers"]
        ae(sf("identifiers", {3: "one:1,two:2"}), set([3]))
        ae(sf("identifiers", {2: None}), set([2]))
        ae(sf("identifiers", {1: {"test": "1", "two": "2"}}), set([1]))
        cache2 = self.init_cache(cl)
        for c in (cache, cache2):
            ae(c.field_for("identifiers", 3), {"one": "1", "two": "2"})
            ae(c.field_for("identifiers", 2), {})
            ae(c.field_for("identifiers", 1), {"test": "1", "two": "2"})
        del cache2

        # Test setting of title sort
        ae(sf("title", {1: "The Moose", 2: "Cat"}), {1, 2})
        cache2 = self.init_cache(cl)
        for c in (cache, cache2):
            ae(c.field_for("sort", 1), "Moose, The")
            ae(c.field_for("sort", 2), "Cat")
Exemple #44
0
def _author_to_author_sort(x):
    if not x: return ''
    return author_to_author_sort(x.replace('|', ','))
Exemple #45
0
def get_categories(dbcache, sort='name', book_ids=None, first_letter_sort=False):
    if sort not in CATEGORY_SORTS:
        raise ValueError('sort ' + sort + ' not a valid value')

    fm = dbcache.field_metadata
    book_rating_map = dbcache.fields['rating'].book_value_map
    lang_map = dbcache.fields['languages'].book_value_map

    categories = {}
    book_ids = frozenset(book_ids) if book_ids else book_ids
    pm_cache = {}

    def get_metadata(book_id):
        ans = pm_cache.get(book_id)
        if ans is None:
            ans = pm_cache[book_id] = dbcache._get_proxy_metadata(book_id)
        return ans

    bids = None
    first_letter_sort = bool(first_letter_sort)

    for category, is_multiple, is_composite in find_categories(fm):
        tag_class = create_tag_class(category, fm)
        sort_on, reverse = sort, False
        if is_composite:
            if bids is None:
                bids = dbcache._all_book_ids() if book_ids is None else book_ids
            cats = dbcache.fields[category].get_composite_categories(
                tag_class, book_rating_map, bids, is_multiple, get_metadata)
        elif category == 'news':
            cats = dbcache.fields['tags'].get_news_category(tag_class, book_ids)
        else:
            cat = fm[category]
            brm = book_rating_map
            dt = cat['datatype']
            if dt == 'rating':
                if category != 'rating':
                    brm = dbcache.fields[category].book_value_map
                if sort_on == 'name':
                    sort_on, reverse = 'rating', True
            cats = dbcache.fields[category].get_categories(
                tag_class, brm, lang_map, book_ids)
            if (category != 'authors' and dt == 'text' and
                cat['is_multiple'] and cat['display'].get('is_names', False)):
                for item in cats:
                    item.sort = author_to_author_sort(item.sort)
        cats.sort(key=category_sort_keys[first_letter_sort][sort_on], reverse=reverse)
        categories[category] = cats

    # Needed for legacy databases that have multiple ratings that
    # map to n stars
    for r in categories['rating']:
        for x in tuple(categories['rating']):
            if r.name == x.name and r.id != x.id:
                r.id_set |= x.id_set
                r.count = len(r.id_set)
                categories['rating'].remove(x)
                break

    # User categories
    user_categories = clean_user_categories(dbcache).copy()

    # First add any grouped search terms to the user categories
    muc = dbcache.pref('grouped_search_make_user_categories', [])
    gst = dbcache.pref('grouped_search_terms', {})
    for c in gst:
        if c not in muc:
            continue
        user_categories[c] = []
        for sc in gst[c]:
            for t in categories.get(sc, ()):
                user_categories[c].append([t.name, sc, 0])

    if user_categories:
        # We want to use same node in the user category as in the source
        # category. To do that, we need to find the original Tag node. There is
        # a time/space tradeoff here. By converting the tags into a map, we can
        # do the verification in the category loop much faster, at the cost of
        # temporarily duplicating the categories lists.
        taglist = {}
        for c, items in categories.iteritems():
            taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), items))

        # Add the category values to the user categories
        for user_cat in sorted(user_categories.iterkeys(), key=sort_key):
            items = []
            names_seen = {}
            user_cat_is_gst = user_cat in gst
            for name, label, ign in user_categories[user_cat]:
                n = icu_lower(name)
                if label in taglist and n in taglist[label]:
                    if user_cat_is_gst:
                        # for gst items, make copy and consolidate the tags by name.
                        if n in names_seen:
                            # We must combine this node into a previous one with
                            # the same name ignoring case. As part of the process,
                            # remember the source categories and correct the
                            # average rating
                            t = names_seen[n]
                            other_tag = taglist[label][n]
                            t.id_set |= other_tag.id_set
                            t.count = len(t.id_set)
                            t.original_categories.add(other_tag.category)

                            total_rating = 0
                            count = 0
                            for id_ in t.id_set:
                                rating = book_rating_map.get(id_, 0)
                                if rating:
                                    total_rating += rating/2
                                    count += 1
                            if total_rating and count:
                                t.avg_rating = total_rating/count
                        else:
                            # Must deepcopy so we don't share the id_set between nodes
                            t = copy.deepcopy(taglist[label][n])
                            t.original_categories = {t.category}
                            names_seen[n] = t
                            items.append(t)
                    else:
                        items.append(taglist[label][n])
                # else: do nothing, to not include nodes w zero counts
            cat_name = '@' + user_cat  # add the '@' to avoid name collision
            items.sort(key=category_sort_keys[False][sort])
            categories[cat_name] = items

    # ### Finally, the saved searches category ####
    items = []
    queries = dbcache._search_api.saved_searches.queries
    for srch in sorted(queries, key=sort_key):
        items.append(Tag(srch, sort=srch, search_expression=queries[srch],
                         category='search', is_editable=False))
    if len(items):
        categories['search'] = items

    return categories
Exemple #46
0
def get_categories(dbcache, sort="name", book_ids=None, icon_map=None, first_letter_sort=False):
    if icon_map is not None and type(icon_map) != TagsIcons:
        raise TypeError("icon_map passed to get_categories must be of type TagIcons")
    if sort not in CATEGORY_SORTS:
        raise ValueError("sort " + sort + " not a valid value")

    fm = dbcache.field_metadata
    book_rating_map = dbcache.fields["rating"].book_value_map
    lang_map = dbcache.fields["languages"].book_value_map

    categories = {}
    book_ids = frozenset(book_ids) if book_ids else book_ids
    pm_cache = {}

    def get_metadata(book_id):
        ans = pm_cache.get(book_id)
        if ans is None:
            ans = pm_cache[book_id] = dbcache._get_proxy_metadata(book_id)
        return ans

    bids = None

    for category, is_multiple, is_composite in find_categories(fm):
        tag_class = create_tag_class(category, fm, icon_map)
        if is_composite:
            if bids is None:
                bids = dbcache._all_book_ids() if book_ids is None else book_ids
            cats = dbcache.fields[category].get_composite_categories(
                tag_class, book_rating_map, bids, is_multiple, get_metadata
            )
        elif category == "news":
            cats = dbcache.fields["tags"].get_news_category(tag_class, book_ids)
        else:
            cat = fm[category]
            brm = book_rating_map
            if cat["datatype"] == "rating" and category != "rating":
                brm = dbcache.fields[category].book_value_map
            cats = dbcache.fields[category].get_categories(tag_class, brm, lang_map, book_ids)
            if (
                category != "authors"
                and cat["datatype"] == "text"
                and cat["is_multiple"]
                and cat["display"].get("is_names", False)
            ):
                for item in cats:
                    item.sort = author_to_author_sort(item.sort)
        sort_categories(cats, sort, first_letter_sort=first_letter_sort)
        categories[category] = cats

    # Needed for legacy databases that have multiple ratings that
    # map to n stars
    for r in categories["rating"]:
        for x in tuple(categories["rating"]):
            if r.name == x.name and r.id != x.id:
                r.id_set |= x.id_set
                r.count = r.count + x.count
                categories["rating"].remove(x)
                break

    # User categories
    user_categories = clean_user_categories(dbcache).copy()
    if user_categories:
        # We want to use same node in the user category as in the source
        # category. To do that, we need to find the original Tag node. There is
        # a time/space tradeoff here. By converting the tags into a map, we can
        # do the verification in the category loop much faster, at the cost of
        # temporarily duplicating the categories lists.
        taglist = {}
        for c, items in categories.iteritems():
            taglist[c] = dict(map(lambda t: (icu_lower(t.name), t), items))

        muc = dbcache.pref("grouped_search_make_user_categories", [])
        gst = dbcache.pref("grouped_search_terms", {})
        for c in gst:
            if c not in muc:
                continue
            user_categories[c] = []
            for sc in gst[c]:
                for t in categories.get(sc, ()):
                    user_categories[c].append([t.name, sc, 0])

        gst_icon = icon_map["gst"] if icon_map else None
        for user_cat in sorted(user_categories.iterkeys(), key=sort_key):
            items = []
            names_seen = {}
            for name, label, ign in user_categories[user_cat]:
                n = icu_lower(name)
                if label in taglist and n in taglist[label]:
                    if user_cat in gst:
                        # for gst items, make copy and consolidate the tags by name.
                        if n in names_seen:
                            t = names_seen[n]
                            t.id_set |= taglist[label][n].id_set
                            t.count += taglist[label][n].count
                            t.tooltip = t.tooltip.replace(")", ", " + label + ")")
                        else:
                            t = copy.copy(taglist[label][n])
                            t.icon = gst_icon
                            names_seen[t.name] = t
                            items.append(t)
                    else:
                        items.append(taglist[label][n])
                # else: do nothing, to not include nodes w zero counts
            cat_name = "@" + user_cat  # add the '@' to avoid name collision
            # Not a problem if we accumulate entries in the icon map
            if icon_map is not None:
                icon_map[cat_name] = icon_map["user:"******"search" in icon_map:
        icon = icon_map["search"]
    queries = dbcache._search_api.saved_searches.queries
    for srch in sorted(queries, key=sort_key):
        items.append(Tag(srch, tooltip=queries[srch], sort=srch, icon=icon, category="search", is_editable=False))
    if len(items):
        categories["search"] = items

    return categories
Exemple #47
0
 def test_invalid_methos(self):
     # Invalid string defaults to invert
     name = 'Jane, Q. van Doe[ed] Jr.'
     self.assertEqual(author_to_author_sort(name, 'invert'),
                      author_to_author_sort(name, '__unknown__!(*T^U$'))
Exemple #48
0
    def itervals(self, record):
        for name, fm in self.entries:
            dt = fm['datatype']
            val = record[fm['rec_index']]
            if dt == 'composite':
                sb = fm['display'].get('composite_sort', 'text')
                if sb == 'date':
                    try:
                        val = parse_date(val)
                    except:
                        val = UNDEFINED_DATE
                    dt = 'datetime'
                elif sb == 'number':
                    try:
                        p = 1
                        for i, candidate in enumerate(
                                    ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
                            if val.endswith(candidate):
                                p = 1024**(i)
                                val = val[:-len(candidate)].strip()
                                break
                        val = locale.atof(val) * p
                    except:
                        val = 0.0
                    dt = 'float'
                elif sb == 'bool':
                    val = force_to_bool(val)
                    dt = 'bool'

            if dt == 'datetime':
                if val is None:
                    val = UNDEFINED_DATE
                if tweaks['sort_dates_using_visible_fields']:
                    format = None
                    if name == 'timestamp':
                        format = tweaks['gui_timestamp_display_format']
                    elif name == 'pubdate':
                        format = tweaks['gui_pubdate_display_format']
                    elif name == 'last_modified':
                        format = tweaks['gui_last_modified_display_format']
                    elif fm['is_custom']:
                        format = fm['display'].get('date_format', None)
                    val = clean_date_for_sort(val, format)
            elif dt == 'series':
                if val is None:
                    val = ('', 1)
                else:
                    if self.library_order:
                        try:
                            lang = record[self.lang_idx].partition(u',')[0]
                        except (AttributeError, ValueError, KeyError,
                                IndexError, TypeError):
                            lang = None
                        val = title_sort(val, order='library_order', lang=lang)
                    sidx_fm = self.field_metadata[name + '_index']
                    sidx = record[sidx_fm['rec_index']]
                    val = (self.string_sort_key(val), sidx)

            elif dt in ('text', 'comments', 'composite', 'enumeration'):
                if val:
                    if fm['is_multiple']:
                        jv = fm['is_multiple']['list_to_ui']
                        sv = fm['is_multiple']['cache_to_list']
                        if '&' in jv:
                            val = jv.join(
                                [author_to_author_sort(v) for v in val.split(sv)])
                        else:
                            val = jv.join(sorted(val.split(sv),
                                              key=self.string_sort_key))
                val = self.string_sort_key(val)

            elif dt == 'bool':
                if not self.db_prefs.get('bools_are_tristate'):
                    val = {True: 1, False: 2, None: 2}.get(val, 2)
                else:
                    val = {True: 1, False: 2, None: 3}.get(val, 3)

            yield val
Exemple #49
0
 def surname(self, au):
     return author_to_author_sort(au).split(',')[0]