def move_library(oldloc, newloc, parent, callback_on_complete): callback = Callback(callback_on_complete) try: if not os.path.exists(os.path.join(newloc, 'metadata.db')): if oldloc and os.access(os.path.join(oldloc, 'metadata.db'), os.R_OK): # Move old library to new location try: db = LibraryDatabase2(oldloc) except: return move_library(None, newloc, parent, callback) else: rq = Queue() m = MoveLibrary(oldloc, newloc, len(db.get_top_level_move_items()[0]), rq) global _mm _mm = MoveMonitor(m, rq, callback, parent) return else: # Create new library at new location db = LibraryDatabase2(newloc) callback(newloc) return # Try to load existing library at new location try: LibraryDatabase2(newloc) except Exception as err: det = traceback.format_exc() error_dialog(parent, _('Invalid database'), _('<p>An invalid library already exists at ' '%(loc)s, delete it before trying to move the ' 'existing library.<br>Error: %(err)s') % dict(loc=newloc, err=str(err)), det, show=True) callback(None) return else: callback(newloc) return except Exception as err: det = traceback.format_exc() error_dialog(parent, _('Could not move library'), unicode(err), det, show=True) callback(None)
def create_db(self, library_path): from calibre.library.database2 import LibraryDatabase2 if LibraryDatabase2.exists_at(library_path): raise ValueError('A library already exists at %r' % library_path) src = os.path.join(os.path.dirname(__file__), 'metadata.db') dest = os.path.join(library_path, 'metadata.db') shutil.copyfile(src, dest) db = LibraryDatabase2(library_path) db.set_cover(1, I('lt.png', data=True)) db.set_cover(2, I('polish.png', data=True)) db.add_format(1, 'FMT1', BytesIO(b'book1fmt1'), index_is_id=True) db.add_format(1, 'FMT2', BytesIO(b'book1fmt2'), index_is_id=True) db.add_format(2, 'FMT1', BytesIO(b'book2fmt1'), index_is_id=True) db.conn.close() return dest
def test_get_formats(self): # {{{ 'Test reading ebook formats using the format() method' from calibre.library.database2 import LibraryDatabase2 from calibre.db.cache import NoSuchFormat old = LibraryDatabase2(self.library_path) ids = old.all_ids() lf = {i:set(old.formats(i, index_is_id=True).split(',')) if old.formats( i, index_is_id=True) else set() for i in ids} formats = {i:{f:old.format(i, f, index_is_id=True) for f in fmts} for i, fmts in iteritems(lf)} old.conn.close() old = None cache = self.init_cache(self.library_path) for book_id, fmts in iteritems(lf): self.assertEqual(fmts, set(cache.formats(book_id)), 'Set of formats is not the same') for fmt in fmts: old = formats[book_id][fmt] self.assertEqual(old, cache.format(book_id, fmt), 'Old and new format disagree') f = cache.format(book_id, fmt, as_file=True) self.assertEqual(old, f.read(), 'Failed to read format as file') with open(cache.format(book_id, fmt, as_path=True, preserve_filename=True), 'rb') as f: self.assertEqual(old, f.read(), 'Failed to read format as path') with open(cache.format(book_id, fmt, as_path=True), 'rb') as f: self.assertEqual(old, f.read(), 'Failed to read format as path') buf = BytesIO() self.assertRaises(NoSuchFormat, cache.copy_format_to, 99999, 'X', buf, 'copy_format_to() failed to raise an exception for non-existent book') self.assertRaises(NoSuchFormat, cache.copy_format_to, 1, 'X', buf, 'copy_format_to() failed to raise an exception for non-existent format')
def test_get_metadata(self): # {{{ 'Test get_metadata() returns the same data for both backends' from calibre.library.database2 import LibraryDatabase2 old = LibraryDatabase2(self.library_path) old_metadata = { i: old.get_metadata(i, index_is_id=True, get_cover=True, cover_as_data=True) for i in xrange(1, 4) } for mi in old_metadata.itervalues(): mi.format_metadata = dict(mi.format_metadata) if mi.formats: mi.formats = tuple(mi.formats) old.conn.close() old = None cache = self.init_cache(self.library_path) new_metadata = { i: cache.get_metadata(i, get_cover=True, cover_as_data=True) for i in xrange(1, 4) } cache = None for mi2, mi1 in zip(new_metadata.values(), old_metadata.values()): self.compare_metadata(mi1, mi2)
def migrate(old, new): from calibre.utils.config import prefs from calibre.library.database import LibraryDatabase from calibre.library.database2 import LibraryDatabase2 from calibre.utils.terminfo import ProgressBar from calibre.constants import terminal_controller class Dummy(ProgressBar): def setLabelText(self, x): pass def setAutoReset(self, y): pass def reset(self): pass def setRange(self, min, max): self.min = min self.max = max def setValue(self, val): self.update(float(val) / getattr(self, 'max', 1)) db = LibraryDatabase(old) db2 = LibraryDatabase2(new) db2.migrate_old(db, Dummy(terminal_controller(), 'Migrating database...')) prefs['library_path'] = os.path.abspath(new) print 'Database migrated to', os.path.abspath(new)
def setUp(self): self.tdir = PersistentTemporaryDirectory('_calibre_dbtest') self.db = LibraryDatabase2(self.tdir) f = open(os.path.join(self.tdir, 'test.txt'), 'w+b') f.write('test') paths = list(repeat(f, 3)) formats = list(repeat('txt', 3)) m1 = MetaInformation('Test Ebook 1', ['Test Author 1']) m1.tags = ['tag1', 'tag2'] m1.publisher = 'Test Publisher 1' m1.rating = 2 m1.series = 'Test Series 1' m1.series_index = 3 m1.author_sort = 'as1' m1.isbn = 'isbn1' m1.cover_data = ('jpg', self.img) m2 = MetaInformation('Test Ebook 2', ['Test Author 2']) m2.tags = ['tag3', 'tag4'] m2.publisher = 'Test Publisher 2' m2.rating = 3 m2.series = 'Test Series 2' m2.series_index = 1 m2.author_sort = 'as1' m2.isbn = 'isbn1' self.db.add_books(paths, formats, [m1, m2, m2], add_duplicates=True) self.m1, self.m2 = m1, m2
def test_get_cover(self): # {{{ 'Test cover() returns the same data for both backends' from calibre.library.database2 import LibraryDatabase2 old = LibraryDatabase2(self.library_path) covers = {i: old.cover(i, index_is_id=True) for i in old.all_ids()} old.conn.close() old = None cache = self.init_cache(self.library_path) for book_id, cdata in covers.iteritems(): self.assertEqual(cdata, cache.cover(book_id), 'Reading of cover failed') f = cache.cover(book_id, as_file=True) self.assertEqual(cdata, f.read() if f else f, 'Reading of cover as file failed') if cdata: with open(cache.cover(book_id, as_path=True), 'rb') as f: self.assertEqual(cdata, f.read(), 'Reading of cover as path failed') else: self.assertEqual(cdata, cache.cover(book_id, as_path=True), 'Reading of null cover as path failed') buf = BytesIO() self.assertFalse( cache.copy_cover_to(99999, buf), 'copy_cover_to() did not return False for non-existent book_id') self.assertFalse( cache.copy_cover_to(3, buf), 'copy_cover_to() did not return False for non-existent cover')
def doit(self): from calibre.library.database2 import LibraryDatabase2 newdb = LibraryDatabase2(self.loc, is_second_db=True) with closing(newdb): self._doit(newdb) newdb.break_cycles() del newdb
def create_db(self, library_path): from calibre.library.database2 import LibraryDatabase2 if LibraryDatabase2.exists_at(library_path): raise ValueError('A library already exists at %r'%library_path) src = os.path.join(os.path.dirname(__file__), 'metadata.db') db = os.path.join(library_path, 'metadata.db') shutil.copyfile(src, db) return db
def create_db(self, library_path): from calibre.library.database2 import LibraryDatabase2 if LibraryDatabase2.exists_at(library_path): raise ValueError('A library already exists at %r' % library_path) src = os.path.join(os.path.dirname(__file__), 'metadata.db') db = os.path.join(library_path, 'metadata.db') shutil.copyfile(src, db) return db
def get_db(dbpath, options): if options.library_path is not None: dbpath = options.library_path if dbpath is None: raise ValueError('No saved library path, either run the GUI or use the' ' --with-library option') dbpath = os.path.abspath(dbpath) return LibraryDatabase2(dbpath)
def test_searching(self): # {{{ 'Test searching returns the same data for both backends' from calibre.library.database2 import LibraryDatabase2 old = LibraryDatabase2(self.library_path) oldvals = {query:set(old.search_getting_ids(query, '')) for query in ( # Date tests 'date:9/6/2011', 'date:true', 'date:false', 'pubdate:9/2011', '#date:true', 'date:<100daysago', 'date:>9/6/2011', '#date:>9/1/2011', '#date:=2011', # Number tests 'rating:3', 'rating:>2', 'rating:=2', 'rating:true', 'rating:false', 'rating:>4', 'tags:#<2', 'tags:#>7', 'cover:false', 'cover:true', '#float:>11', '#float:<1k', '#float:10.01', '#float:false', 'series_index:1', 'series_index:<3', # Bool tests '#yesno:true', '#yesno:false', '#yesno:yes', '#yesno:no', '#yesno:empty', # Keypair tests 'identifiers:true', 'identifiers:false', 'identifiers:test', 'identifiers:test:false', 'identifiers:test:one', 'identifiers:t:n', 'identifiers:=test:=two', 'identifiers:x:y', 'identifiers:z', # Text tests 'title:="Title One"', 'title:~title', '#enum:=one', '#enum:tw', '#enum:false', '#enum:true', 'series:one', 'tags:one', 'tags:true', 'tags:false', 'uuid:2', 'one', '20.02', '"publisher one"', '"my comments one"', # User categories '@Good Authors:One', '@Good Series.good tags:two', # Cover/Formats 'cover:true', 'cover:false', 'formats:true', 'formats:false', 'formats:#>1', 'formats:#=1', 'formats:=fmt1', 'formats:=fmt2', 'formats:=fmt1 or formats:fmt2', '#formats:true', '#formats:false', '#formats:fmt1', '#formats:fmt2', '#formats:fmt1 and #formats:fmt2', )} old.conn.close() old = None cache = self.init_cache(self.library_path) for query, ans in oldvals.iteritems(): nr = cache.search(query, '') self.assertEqual(ans, nr, 'Old result: %r != New result: %r for search: %s'%( ans, nr, query)) # Test searching by id, which was introduced in the new backend self.assertEqual(cache.search('id:1', ''), {1}) self.assertEqual(cache.search('id:>1', ''), {2, 3})
def command_check_library(args, dbpath): from calibre.library.check_library import CheckLibrary, CHECKS parser = check_library_option_parser() opts, args = parser.parse_args(args) if len(args) != 0: parser.print_help() return 1 if opts.library_path is not None: dbpath = opts.library_path if isbytestring(dbpath): dbpath = dbpath.decode(preferred_encoding) if opts.report is None: checks = CHECKS else: checks = [] for r in opts.report.split(','): found = False for c in CHECKS: if c[0] == r: checks.append(c) found = True break if not found: print _('Unknown report check'), r return 1 if opts.names is None: names = [] else: names = [f.strip() for f in opts.names.split(',') if f.strip()] if opts.exts is None: exts = [] else: exts = [f.strip() for f in opts.exts.split(',') if f.strip()] def print_one(checker, check): attr = check[0] list = getattr(checker, attr, None) if list is None: return if opts.csv: for i in list: print check[1] + ',' + i[0] + ',' + i[1] else: print check[1] for i in list: print ' %-40.40s - %-40.40s'%(i[0], i[1]) db = LibraryDatabase2(dbpath) checker = CheckLibrary(dbpath, db) checker.scan_library(names, exts) for check in checks: print_one(checker, check)
def accept(self): newloc = unicode(self.loc.text()) if not LibraryDatabase2.exists_at(newloc): error_dialog(self, _('No library found'), _('No existing calibre library found at %s')%newloc, show=True) return self.stats.rename(self.location, newloc) self.newloc = newloc QDialog.accept(self)
def accept(self): newloc = unicode(self.loc.text()) if not LibraryDatabase2.exists_at(newloc): error_dialog(self, _('No library found'), _('No existing calibre library found at %s') % newloc, show=True) return self.stats.rename(self.location, newloc) self.newloc = newloc QDialog.accept(self)
def create_db(self, library_path): from calibre.library.database2 import LibraryDatabase2 if LibraryDatabase2.exists_at(library_path): raise ValueError('A library already exists at %r'%library_path) src = os.path.join(os.path.dirname(__file__), 'metadata.db') dest = os.path.join(library_path, 'metadata.db') shutil.copyfile(src, dest) db = LibraryDatabase2(library_path) db.set_cover(1, I('lt.png', data=True)) db.set_cover(2, I('polish.png', data=True)) db.add_format(1, 'FMT1', BytesIO(b'book1fmt1'), index_is_id=True) db.add_format(1, 'FMT2', BytesIO(b'book1fmt2'), index_is_id=True) db.add_format(2, 'FMT1', BytesIO(b'book2fmt1'), index_is_id=True) db.conn.close() return dest
def command_backup_metadata(args, dbpath): parser = backup_metadata_option_parser() opts, args = parser.parse_args(args) if len(args) != 0: parser.print_help() return 1 if opts.library_path is not None: dbpath = opts.library_path if isbytestring(dbpath): dbpath = dbpath.decode(preferred_encoding) db = LibraryDatabase2(dbpath) book_ids = None if opts.all: book_ids = db.all_ids() db.dump_metadata(book_ids=book_ids, callback=BackupProgress())
def create_db(self, library_path): from calibre.library.database2 import LibraryDatabase2 if LibraryDatabase2.exists_at(library_path): raise ValueError("A library already exists at %r" % library_path) src = os.path.join(os.path.dirname(__file__), "metadata.db") dest = os.path.join(library_path, "metadata.db") shutil.copyfile(src, dest) db = LibraryDatabase2(library_path) db.set_cover(1, I("lt.png", data=True)) db.set_cover(2, I("polish.png", data=True)) db.add_format(1, "FMT1", BytesIO(b"book1fmt1"), index_is_id=True) db.add_format(1, "FMT2", BytesIO(b"book1fmt2"), index_is_id=True) db.add_format(2, "FMT1", BytesIO(b"book2fmt1"), index_is_id=True) db.conn.close() return dest
def test_get_metadata(self): # {{{ 'Test get_metadata() returns the same data for both backends' from calibre.library.database2 import LibraryDatabase2 old = LibraryDatabase2(self.library_path) for i in xrange(1, 3): old.add_format(i, 'txt%d'%i, StringIO(b'random%d'%i), index_is_id=True) old.add_format(i, 'text%d'%i, StringIO(b'random%d'%i), index_is_id=True) old_metadata = {i:old.get_metadata(i, index_is_id=True) for i in xrange(1, 4)} old = None cache = self.init_cache(self.library_path) new_metadata = {i:cache.get_metadata(i) for i in xrange(1, 4)} cache = None for mi2, mi1 in zip(new_metadata.values(), old_metadata.values()): self.compare_metadata(mi1, mi2)
def test_get_categories(self): # {{{ 'Check that get_categories() returns the same data for both backends' from calibre.library.database2 import LibraryDatabase2 old = LibraryDatabase2(self.library_path) old_categories = old.get_categories() old.conn.close() cache = self.init_cache(self.library_path) new_categories = cache.get_categories() self.assertEqual( set(old_categories), set(new_categories), 'The set of old categories is not the same as the set of new categories' ) def compare_category(category, old, new): for attr in ('name', 'original_name', 'id', 'count', 'is_hierarchical', 'is_editable', 'is_searchable', 'id_set', 'avg_rating', 'sort', 'use_sort_as_name', 'tooltip', 'icon', 'category'): oval, nval = getattr(old, attr), getattr(new, attr) if ((category in {'rating', '#rating'} and attr in {'id_set', 'sort'}) or (category == 'series' and attr == 'sort') or # Sorting is wrong in old (category == 'identifiers' and attr == 'id_set') or (category == '@Good Series') or # Sorting is wrong in old (category == 'news' and attr in {'count', 'id_set'}) or (category == 'formats' and attr == 'id_set')): continue self.assertEqual( oval, nval, 'The attribute %s for %s in category %s does not match. Old is %r, New is %r' % (attr, old.name, category, oval, nval)) for category in old_categories: old, new = old_categories[category], new_categories[category] self.assertEqual( len(old), len(new), 'The number of items in the category %s is not the same' % category) for o, n in zip(old, new): compare_category(category, o, n)
def main(args=sys.argv): from calibre.library.database2 import LibraryDatabase2 parser = option_parser() opts, args = parser.parse_args(args) if opts.daemonize and not iswindows: daemonize() if opts.pidfile is not None: from cherrypy.process.plugins import PIDFile PIDFile(cherrypy.engine, opts.pidfile).subscribe() cherrypy.log.screen = True from calibre.utils.config import prefs if opts.with_library is None: opts.with_library = prefs['library_path'] if not opts.with_library: print('No saved library path. Use the --with-library option' ' to specify the path to the library you want to use.') return 1 db = LibraryDatabase2(opts.with_library) server = LibraryServer(db, opts, show_tracebacks=opts.develop) server.start() return 0
def move_library(from_, to, notification=lambda x: x): time.sleep(1) old = LibraryDatabase2(from_) old.move_library_to(to, notification) return True
def library_moved(self, newloc, copy_structure=False, call_close=True, allow_rebuild=False): if newloc is None: return default_prefs = None try: olddb = self.library_view.model().db if copy_structure: default_prefs = olddb.prefs except: olddb = None try: db = LibraryDatabase2(newloc, default_prefs=default_prefs) except (DatabaseException, sqlite.Error): if not allow_rebuild: raise import traceback repair = question_dialog( self, _('Corrupted database'), _('The library database at %s appears to be corrupted. Do ' 'you want calibre to try and rebuild it automatically? ' 'The rebuild may not be completely successful.') % force_unicode(newloc, filesystem_encoding), det_msg=traceback.format_exc()) if repair: from calibre.gui2.dialogs.restore_library import repair_library_at if repair_library_at(newloc, parent=self): db = LibraryDatabase2(newloc, default_prefs=default_prefs) else: return else: return if self.content_server is not None: self.content_server.set_database(db) self.library_path = newloc prefs['library_path'] = self.library_path self.book_on_device(None, reset=True) db.set_book_on_device_func(self.book_on_device) self.library_view.set_database(db) self.tags_view.set_database(db, self.alter_tb) self.library_view.model().set_book_on_device_func(self.book_on_device) self.status_bar.clear_message() self.search.clear() self.saved_search.clear() self.book_details.reset_info() #self.library_view.model().count_changed() db = self.library_view.model().db self.iactions['Choose Library'].count_changed(db.count()) self.set_window_title() self.apply_named_search_restriction('') # reset restriction to null self.saved_searches_changed( recount=False) # reload the search restrictions combo box if db.prefs['virtual_lib_on_startup']: self.apply_virtual_library(db.prefs['virtual_lib_on_startup']) for action in self.iactions.values(): action.library_changed(db) if olddb is not None: try: if call_close: olddb.conn.close() except: import traceback traceback.print_exc() olddb.break_cycles() if self.device_connected: self.set_books_in_library(self.booklists(), reset=True) self.refresh_ondevice() self.memory_view.reset() self.card_a_view.reset() self.card_b_view.reset() self.device_manager.set_current_library_uuid(db.library_id) self.library_view.set_current_row(0) # Run a garbage collection now so that it does not freeze the # interface later gc.collect()
def is_library_dir_suitable(self, x): try: return LibraryDatabase2.exists_at(x) or not os.listdir(x) except: return False
def init_old(self, library_path=None): from calibre.library.database2 import LibraryDatabase2 return LibraryDatabase2(library_path or self.library_path)
def test_searching(self): # {{{ 'Test searching returns the same data for both backends' from calibre.library.database2 import LibraryDatabase2 old = LibraryDatabase2(self.library_path) oldvals = { query: set(old.search_getting_ids(query, '')) for query in ( # Date tests 'date:9/6/2011', 'date:true', 'date:false', 'pubdate:1/9/2011', '#date:true', 'date:<100_daysago', 'date:>9/6/2011', '#date:>9/1/2011', '#date:=2011', # Number tests 'rating:3', 'rating:>2', 'rating:=2', 'rating:true', 'rating:false', 'rating:>4', 'tags:#<2', 'tags:#>7', 'cover:false', 'cover:true', '#float:>11', '#float:<1k', '#float:10.01', '#float:false', 'series_index:1', 'series_index:<3', # Bool tests '#yesno:true', '#yesno:false', '#yesno:_yes', '#yesno:_no', '#yesno:_empty', # Keypair tests 'identifiers:true', 'identifiers:false', 'identifiers:test', 'identifiers:test:false', 'identifiers:test:one', 'identifiers:t:n', 'identifiers:=test:=two', 'identifiers:x:y', 'identifiers:z', # Text tests 'title:="Title One"', 'title:~title', '#enum:=one', '#enum:tw', '#enum:false', '#enum:true', 'series:one', 'tags:one', 'tags:true', 'tags:false', 'uuid:2', 'one', '20.02', '"publisher one"', '"my comments one"', 'series_sort:one', # User categories '@Good Authors:One', '@Good Series.good tags:two', # Cover/Formats 'cover:true', 'cover:false', 'formats:true', 'formats:false', 'formats:#>1', 'formats:#=1', 'formats:=fmt1', 'formats:=fmt2', 'formats:=fmt1 or formats:fmt2', '#formats:true', '#formats:false', '#formats:fmt1', '#formats:fmt2', '#formats:fmt1 and #formats:fmt2', ) } old.conn.close() old = None cache = self.init_cache(self.cloned_library) for query, ans in iteritems(oldvals): nr = cache.search(query, '') self.assertEqual( ans, nr, 'Old result: %r != New result: %r for search: %s' % (ans, nr, query)) # Test searching by id, which was introduced in the new backend self.assertEqual(cache.search('id:1', ''), {1}) self.assertEqual(cache.search('id:>1', ''), {2, 3}) # Numeric/rating searches with relops in the old db were incorrect, so # test them specifically here cache.set_field('rating', {1: 4, 2: 2, 3: 5}) self.assertEqual(cache.search('rating:>2'), set()) self.assertEqual(cache.search('rating:>=2'), {1, 3}) self.assertEqual(cache.search('rating:<2'), {2}) self.assertEqual(cache.search('rating:<=2'), {1, 2, 3}) self.assertEqual(cache.search('rating:<=2'), {1, 2, 3}) self.assertEqual(cache.search('rating:=2'), {1, 3}) self.assertEqual(cache.search('rating:2'), {1, 3}) self.assertEqual(cache.search('rating:!=2'), {2}) cache.field_metadata.all_metadata( )['#rating']['display']['allow_half_stars'] = True cache.set_field('#rating', {1: 3, 2: 4, 3: 9}) self.assertEqual(cache.search('#rating:1'), set()) self.assertEqual(cache.search('#rating:1.5'), {1}) self.assertEqual(cache.search('#rating:>4'), {3}) self.assertEqual(cache.search('#rating:2'), {2}) # template searches # Test text search self.assertEqual(cache.search('template:"{#formats}#@#:t:fmt1"'), {1, 2}) self.assertEqual(cache.search('template:"{authors}#@#:t:=Author One"'), {2}) cache.set_field('pubdate', { 1: p('2001-02-06'), 2: p('2001-10-06'), 3: p('2001-06-06') }) cache.set_field('timestamp', { 1: p('2002-02-06'), 2: p('2000-10-06'), 3: p('2001-06-06') }) # Test numeric compare search self.assertEqual( cache.search("template:\"program: " "floor(days_between(field(\'pubdate\'), " "field(\'timestamp\')))#@#:n:>0\""), {2, 3}) # Test date search self.assertEqual(cache.search('template:{pubdate}#@#:d:<2001-09-01"'), {1, 3}) # Test boolean search self.assertEqual(cache.search('template:{series}#@#:b:true'), {1, 2}) self.assertEqual(cache.search('template:{series}#@#:b:false'), {3}) # test primary search cache.set_field('title', {1: "Gravity’s Raiñbow"}) self.assertEqual(cache.search('title:"Gravity\'s Rainbow"'), {1})
def __init__(self): LibraryDatabase2.__init__(self, prefs['library_path'])
def db(path=None, read_only=False): from calibre.library.database2 import LibraryDatabase2 from calibre.utils.config import prefs return LibraryDatabase2(path if path else prefs['library_path'], read_only=read_only)
def test_searching(self): # {{{ 'Test searching returns the same data for both backends' from calibre.library.database2 import LibraryDatabase2 old = LibraryDatabase2(self.library_path) oldvals = { query: set(old.search_getting_ids(query, '')) for query in ( # Date tests 'date:9/6/2011', 'date:true', 'date:false', 'pubdate:1/9/2011', '#date:true', 'date:<100daysago', 'date:>9/6/2011', '#date:>9/1/2011', '#date:=2011', # Number tests 'rating:3', 'rating:>2', 'rating:=2', 'rating:true', 'rating:false', 'rating:>4', 'tags:#<2', 'tags:#>7', 'cover:false', 'cover:true', '#float:>11', '#float:<1k', '#float:10.01', '#float:false', 'series_index:1', 'series_index:<3', # Bool tests '#yesno:true', '#yesno:false', '#yesno:yes', '#yesno:no', '#yesno:empty', # Keypair tests 'identifiers:true', 'identifiers:false', 'identifiers:test', 'identifiers:test:false', 'identifiers:test:one', 'identifiers:t:n', 'identifiers:=test:=two', 'identifiers:x:y', 'identifiers:z', # Text tests 'title:="Title One"', 'title:~title', '#enum:=one', '#enum:tw', '#enum:false', '#enum:true', 'series:one', 'tags:one', 'tags:true', 'tags:false', 'uuid:2', 'one', '20.02', '"publisher one"', '"my comments one"', 'series_sort:one', # User categories '@Good Authors:One', '@Good Series.good tags:two', # Cover/Formats 'cover:true', 'cover:false', 'formats:true', 'formats:false', 'formats:#>1', 'formats:#=1', 'formats:=fmt1', 'formats:=fmt2', 'formats:=fmt1 or formats:fmt2', '#formats:true', '#formats:false', '#formats:fmt1', '#formats:fmt2', '#formats:fmt1 and #formats:fmt2', ) } old.conn.close() old = None cache = self.init_cache(self.cloned_library) for query, ans in oldvals.iteritems(): nr = cache.search(query, '') self.assertEqual( ans, nr, 'Old result: %r != New result: %r for search: %s' % (ans, nr, query)) # Test searching by id, which was introduced in the new backend self.assertEqual(cache.search('id:1', ''), {1}) self.assertEqual(cache.search('id:>1', ''), {2, 3}) # Numeric/rating searches with relops in the old db were incorrect, so # test them specifically here cache.set_field('rating', {1: 4, 2: 2, 3: 5}) self.assertEqual(cache.search('rating:>2'), set()) self.assertEqual(cache.search('rating:>=2'), {1, 3}) self.assertEqual(cache.search('rating:<2'), {2}) self.assertEqual(cache.search('rating:<=2'), {1, 2, 3}) self.assertEqual(cache.search('rating:<=2'), {1, 2, 3}) self.assertEqual(cache.search('rating:=2'), {1, 3}) self.assertEqual(cache.search('rating:2'), {1, 3}) self.assertEqual(cache.search('rating:!=2'), {2}) cache.field_metadata.all_metadata( )['#rating']['display']['allow_half_stars'] = True cache.set_field('#rating', {1: 3, 2: 4, 3: 9}) self.assertEqual(cache.search('#rating:1'), set()) self.assertEqual(cache.search('#rating:1.5'), {1}) self.assertEqual(cache.search('#rating:>4'), {3}) self.assertEqual(cache.search('#rating:2'), {2})
def command_list_categories(args, dbpath): parser = list_categories_option_parser() opts, args = parser.parse_args(args) if len(args) != 0: parser.print_help() return 1 if opts.library_path is not None: dbpath = opts.library_path if isbytestring(dbpath): dbpath = dbpath.decode(preferred_encoding) db = LibraryDatabase2(dbpath) category_data = db.get_categories() data = [] report_on = [c.strip() for c in opts.report.split(',') if c.strip()] categories = [k for k in category_data.keys() if db.metadata_for_field(k)['kind'] not in ['user', 'search'] and (not report_on or k in report_on)] categories.sort(cmp=lambda x,y: cmp(x if x[0] != '#' else x[1:], y if y[0] != '#' else y[1:])) if not opts.item_count: for category in categories: is_rating = db.metadata_for_field(category)['datatype'] == 'rating' for tag in category_data[category]: if is_rating: tag.name = unicode(len(tag.name)) data.append({'category':category, 'tag_name':tag.name, 'count':unicode(tag.count), 'rating':unicode(tag.avg_rating)}) else: for category in categories: data.append({'category':category, 'tag_name':_('CATEGORY ITEMS'), 'count': len(category_data[category]), 'rating': 0.0}) fields = ['category', 'tag_name', 'count', 'rating'] def do_list(): from calibre.constants import terminal_controller as tc terminal_controller = tc() separator = ' ' widths = list(map(lambda x : 0, fields)) for i in data: for j, field in enumerate(fields): widths[j] = max(widths[j], max(len(field), len(unicode(i[field])))) screen_width = terminal_controller.COLS if opts.width < 0 else opts.width if not screen_width: screen_width = 80 field_width = screen_width//len(fields) base_widths = map(lambda x: min(x+1, field_width), widths) while sum(base_widths) < screen_width: adjusted = False for i in range(len(widths)): if base_widths[i] < widths[i]: base_widths[i] += min(screen_width-sum(base_widths), widths[i]-base_widths[i]) adjusted = True break if not adjusted: break widths = list(base_widths) titles = map(lambda x, y: '%-*s%s'%(x-len(separator), y, separator), widths, fields) print terminal_controller.GREEN + ''.join(titles)+terminal_controller.NORMAL wrappers = map(lambda x: TextWrapper(x-1), widths) o = cStringIO.StringIO() for record in data: text = [wrappers[i].wrap(unicode(record[field]).encode('utf-8')) for i, field in enumerate(fields)] lines = max(map(len, text)) for l in range(lines): for i, field in enumerate(text): ft = text[i][l] if l < len(text[i]) else '' filler = '%*s'%(widths[i]-len(ft)-1, '') o.write(ft) o.write(filler+separator) print >>o print o.getvalue() def do_csv(): lf = '{category},"{tag_name}",{count},{rating}' lf = lf.replace(',', opts.separator).replace(r'\t','\t').replace(r'\n','\n') lf = lf.replace('"', opts.quote) for d in data: print lf.format(**d) if opts.csv: do_csv() else: do_list()