Пример #1
0
def add_news(cache, path, arg):
    from calibre.ebooks.metadata.meta import get_metadata
    from calibre.utils.date import utcnow

    fmt = os.path.splitext(getattr(path, 'name', path))[1][1:].lower()
    stream = path if hasattr(path, 'read') else lopen(path, 'rb')
    stream.seek(0)
    mi = get_metadata(stream, fmt, use_libprs_metadata=False,
            force_read_metadata=True)
    # Force the author to calibre as the auto delete of old news checks for
    # both the author==calibre and the tag News
    mi.authors = ['calibre']
    stream.seek(0)
    with cache.write_lock:
        if mi.series_index is None:
            mi.series_index = cache._get_next_series_num_for(mi.series)
        mi.tags = [_('News')]
        if arg['add_title_tag']:
            mi.tags += [arg['title']]
        if arg['custom_tags']:
            mi.tags += arg['custom_tags']
        if mi.pubdate is None:
            mi.pubdate = utcnow()
        if mi.timestamp is None:
            mi.timestamp = utcnow()

        db_id = cache._create_book_entry(mi, apply_import_tags=False)
    cache.add_format(db_id, fmt, stream)  # Cant keep write lock since post-import hooks might run

    if not hasattr(path, 'read'):
        stream.close()
    return db_id
Пример #2
0
def add_news(cache, path, arg, dbapi=None):
    from calibre.ebooks.metadata.meta import get_metadata
    from calibre.utils.date import utcnow

    fmt = os.path.splitext(getattr(path, 'name', path))[1][1:].lower()
    stream = path if hasattr(path, 'read') else lopen(path, 'rb')
    stream.seek(0)
    mi = get_metadata(stream, fmt, use_libprs_metadata=False,
            force_read_metadata=True)
    # Force the author to calibre as the auto delete of old news checks for
    # both the author==calibre and the tag News
    mi.authors = ['calibre']
    stream.seek(0)
    with cache.write_lock:
        if mi.series_index is None:
            mi.series_index = cache._get_next_series_num_for(mi.series)
        mi.tags = [_('News')]
        if arg['add_title_tag']:
            mi.tags += [arg['title']]
        if arg['custom_tags']:
            mi.tags += arg['custom_tags']
        if mi.pubdate is None:
            mi.pubdate = utcnow()
        if mi.timestamp is None:
            mi.timestamp = utcnow()

        db_id = cache._create_book_entry(mi, apply_import_tags=False)
    cache.add_format(db_id, fmt, stream, dbapi=dbapi)  # Cant keep write lock since post-import hooks might run

    if not hasattr(path, 'read'):
        stream.close()
    return db_id
Пример #3
0
    def test_dates(self):  # {{{
        from calibre.utils.date import utcnow

        def rl(root):
            p, r = read_prefixes(root), read_refines(root)
            return read_pubdate(root, p, r), read_timestamp(root, p, r)

        def st(root, pd, ts):
            p, r = read_prefixes(root), read_refines(root)
            set_pubdate(root, p, r, pd)
            set_timestamp(root, p, r, ts)
            return rl(root)

        def ae(root, y1=None, y2=None):
            x1, x2 = rl(root)
            for x, y in ((x1, y1), (x2, y2)):
                if y is None:
                    self.assertIsNone(x)
                else:
                    self.ae(y, getattr(x, 'year', None))
        root = self.get_opf('''<dc:date>1999-3-2</dc:date><meta property="calibre:timestamp" scheme="dcterms:W3CDTF">2001</meta>''')
        ae(root, 1999, 2001)
        n = utcnow()
        q = n.replace(microsecond=0)
        self.ae(st(root, n, n), (n, q))
        root = self.get_opf('''<dc:date>1999-3-2</dc:date><meta name="calibre:timestamp" content="2001-1-1"/>''')
        ae(root, 1999, 2001)
        root = self.get_opf('''<meta property="dcterms:modified">2003</meta>''')
        self.ae(read_last_modified(root, read_prefixes(root), read_refines(root)).year, 2003)
Пример #4
0
def add_catalog(cache, path, title):
    from calibre.ebooks.metadata.book.base import Metadata
    from calibre.ebooks.metadata.meta import get_metadata
    from calibre.utils.date import utcnow

    fmt = os.path.splitext(path)[1][1:].lower()
    with lopen(path, 'rb') as stream, cache.write_lock:
        matches = cache._search('title:="%s" and tags:="%s"' % (title.replace('"', '\\"'), _('Catalog')), None)
        db_id = None
        if matches:
            db_id = list(matches)[0]
        try:
            mi = get_metadata(stream, fmt)
            mi.authors = ['calibre']
        except:
            mi = Metadata(title, ['calibre'])
        mi.title, mi.authors = title, ['calibre']
        mi.tags = [_('Catalog')]
        mi.pubdate = mi.timestamp = utcnow()
        if fmt == 'mobi':
            mi.cover, mi.cover_data = None, (None, None)
        if db_id is None:
            db_id = cache._create_book_entry(mi, apply_import_tags=False)
        else:
            cache._set_metadata(db_id, mi)
        cache._add_format(db_id, fmt, stream)

    return db_id
Пример #5
0
 def __init__(self,
              stream,
              page_size,
              compress=False,
              mark_links=False,
              debug=print):
     self.stream = HashingStream(stream)
     self.compress = compress
     self.write_line(PDFVER)
     self.write_line(u'%íì¦"'.encode('utf-8'))
     creator = ('%s %s [https://calibre-ebook.com]' %
                (__appname__, __version__))
     self.write_line('%% Created by %s' % creator)
     self.objects = IndirectObjects()
     self.objects.add(PageTree(page_size))
     self.objects.add(Catalog(self.page_tree))
     self.current_page = Page(self.page_tree, compress=self.compress)
     self.info = Dictionary({
         'Creator': String(creator),
         'Producer': String(creator),
         'CreationDate': utcnow(),
     })
     self.stroke_opacities, self.fill_opacities = {}, {}
     self.font_manager = FontManager(self.objects, self.compress)
     self.image_cache = {}
     self.pattern_cache, self.shader_cache = {}, {}
     self.debug = debug
     self.links = Links(self, mark_links, page_size)
     i = QImage(1, 1, QImage.Format_ARGB32)
     i.fill(qRgba(0, 0, 0, 255))
     self.alpha_bit = i.constBits().asstring(4).find(b'\xff')
Пример #6
0
    def create_new_bookmark(self, pos_data):
        base_default_title = _('Bookmark')
        all_titles = {bm['title'] for bm in self.get_bookmarks()}
        c = 0
        while True:
            c += 1
            default_title = '{} #{}'.format(base_default_title, c)
            if default_title not in all_titles:
                break

        title, ok = QInputDialog.getText(self,
                                         _('Add bookmark'),
                                         _('Enter title for bookmark:'),
                                         text=pos_data.get('selected_text')
                                         or default_title)
        title = unicode_type(title).strip()
        if not ok or not title:
            return
        title = self.uniqify_bookmark_title(title)
        cfi = (pos_data.get('selection_bounds')
               or {}).get('start') or pos_data['cfi']
        bm = {
            'title': title,
            'pos_type': 'epubcfi',
            'pos': cfi,
            'timestamp': utcnow().isoformat(),
        }
        bookmarks = self.get_bookmarks()
        bookmarks.append(bm)
        self.set_bookmarks(bookmarks)
        self.set_current_bookmark(bm)
        self.edited.emit(bookmarks)
Пример #7
0
def add_catalog(cache, path, title, dbapi=None):
    from calibre.ebooks.metadata.book.base import Metadata
    from calibre.ebooks.metadata.meta import get_metadata
    from calibre.utils.date import utcnow

    fmt = os.path.splitext(path)[1][1:].lower()
    new_book_added = False
    with lopen(path, 'rb') as stream:
        with cache.write_lock:
            matches = cache._search('title:="%s" and tags:="%s"' % (title.replace('"', '\\"'), _('Catalog')), None)
            db_id = None
            if matches:
                db_id = list(matches)[0]
            try:
                mi = get_metadata(stream, fmt)
                mi.authors = ['calibre']
            except:
                mi = Metadata(title, ['calibre'])
            mi.title, mi.authors = title, ['calibre']
            mi.author_sort = 'calibre'  # The MOBI/AZW3 format sets author sort to date
            mi.tags = [_('Catalog')]
            mi.pubdate = mi.timestamp = utcnow()
            if fmt == 'mobi':
                mi.cover, mi.cover_data = None, (None, None)
            if db_id is None:
                db_id = cache._create_book_entry(mi, apply_import_tags=False)
                new_book_added = True
            else:
                cache._set_metadata(db_id, mi)
        cache.add_format(db_id, fmt, stream, dbapi=dbapi)  # Cant keep write lock since post-import hooks might run

    return db_id, new_book_added
Пример #8
0
 def __init__(self, stream, page_size, compress=False, mark_links=False,
              debug=print):
     self.stream = HashingStream(stream)
     self.compress = compress
     self.write_line(PDFVER)
     self.write_line(b'%íì¦"')
     creator = ('%s %s [http://calibre-ebook.com]'%(__appname__,
                                 __version__))
     self.write_line('%% Created by %s'%creator)
     self.objects = IndirectObjects()
     self.objects.add(PageTree(page_size))
     self.objects.add(Catalog(self.page_tree))
     self.current_page = Page(self.page_tree, compress=self.compress)
     self.info = Dictionary({
         'Creator':String(creator),
         'Producer':String(creator),
         'CreationDate': utcnow(),
                             })
     self.stroke_opacities, self.fill_opacities = {}, {}
     self.font_manager = FontManager(self.objects, self.compress)
     self.image_cache = {}
     self.pattern_cache, self.shader_cache = {}, {}
     self.debug = debug
     self.links = Links(self, mark_links, page_size)
     i = QImage(1, 1, QImage.Format_ARGB32)
     i.fill(qRgba(0, 0, 0, 255))
     self.alpha_bit = i.constBits().asstring(4).find(b'\xff')
Пример #9
0
 def __init__(self, storage_id, lpath, other=None):
     Metadata.__init__(self, _('Unknown'), other=other)
     self.storage_id, self.lpath = storage_id, lpath
     self.lpath = self.path = self.lpath.replace(os.sep, '/')
     self.mtp_relpath = tuple([icu_lower(x) for x in self.lpath.split('/')])
     self.datetime = utcnow().timetuple()
     self.thumbail = None
Пример #10
0
def add_catalog(cache, path, title):
    from calibre.ebooks.metadata.book.base import Metadata
    from calibre.ebooks.metadata.meta import get_metadata
    from calibre.utils.date import utcnow

    fmt = os.path.splitext(path)[1][1:].lower()
    with lopen(path, 'rb') as stream:
        with cache.write_lock:
            matches = cache._search(
                'title:="%s" and tags:="%s"' %
                (title.replace('"', '\\"'), _('Catalog')), None)
            db_id = None
            if matches:
                db_id = list(matches)[0]
            try:
                mi = get_metadata(stream, fmt)
                mi.authors = ['calibre']
            except:
                mi = Metadata(title, ['calibre'])
            mi.title, mi.authors = title, ['calibre']
            mi.tags = [_('Catalog')]
            mi.pubdate = mi.timestamp = utcnow()
            if fmt == 'mobi':
                mi.cover, mi.cover_data = None, (None, None)
            if db_id is None:
                db_id = cache._create_book_entry(mi, apply_import_tags=False)
            else:
                cache._set_metadata(db_id, mi)
        cache.add_format(
            db_id, fmt,
            stream)  # Cant keep write lock since post-import hooks might run

    return db_id
Пример #11
0
    def populate_from_preparsed_feed(self, title, articles, oldest_article=7,
                           max_articles_per_feed=100):
        self.title      = unicode(title if title else _('Unknown feed'))
        self.description = ''
        self.image_url  = None
        self.articles   = []
        self.added_articles = []

        self.oldest_article = oldest_article
        self.id_counter = 0

        for item in articles:
            if len(self.articles) >= max_articles_per_feed:
                break
            id = item.get('id', 'internal id#'+str(self.id_counter))
            if id in self.added_articles:
                return
            self.added_articles.append(id)
            self.id_counter += 1
            published   = time.gmtime(item.get('timestamp', time.time()))
            title       = item.get('title', _('Untitled article'))
            link        = item.get('url', None)
            description = item.get('description', '')
            content     = item.get('content', '')
            author      = item.get('author', '')
            article = Article(id, title, link, author, description, published, content)
            delta = utcnow() - article.utctime
            if delta.days*24*3600 + delta.seconds <= 24*3600*self.oldest_article:
                self.articles.append(article)
            else:
                t = strftime(u'%a, %d %b, %Y %H:%M', article.localtime.timetuple())
                self.logger.debug(u'Skipping article %s (%s) from feed %s as it is too old.'%
                        (title, t, self.title))
            d = item.get('date', '')
            article.formatted_date = d
Пример #12
0
    def test_dates(self):  # {{{
        from calibre.utils.date import utcnow

        def rl(root):
            p, r = read_prefixes(root), read_refines(root)
            return read_pubdate(root, p, r), read_timestamp(root, p, r)

        def st(root, pd, ts):
            p, r = read_prefixes(root), read_refines(root)
            set_pubdate(root, p, r, pd)
            set_timestamp(root, p, r, ts)
            return rl(root)

        def ae(root, y1=None, y2=None):
            x1, x2 = rl(root)
            for x, y in ((x1, y1), (x2, y2)):
                if y is None:
                    self.assertIsNone(x)
                else:
                    self.ae(y, getattr(x, 'year', None))
        root = self.get_opf('''<dc:date>1999-3-2</dc:date><meta property="calibre:timestamp" scheme="dcterms:W3CDTF">2001</meta>''')
        ae(root, 1999, 2001)
        n = utcnow()
        q = n.replace(microsecond=0)
        self.ae(st(root, n, n), (n, q))
        root = self.get_opf('''<dc:date>1999-3-2</dc:date><meta name="calibre:timestamp" content="2001-1-1"/>''')
        ae(root, 1999, 2001)
        root = self.get_opf('''<meta property="dcterms:modified">2003</meta>''')
        self.ae(read_last_modified(root, read_prefixes(root), read_refines(root)).year, 2003)
Пример #13
0
 def func(dbref, book_id, cache):
     try:
         return cache[field]
     except KeyError:
         db = dbref()
         cache[field] = ret = db.field_for(field, book_id, default_value=utcnow())
         return ret
Пример #14
0
    def __init__(self,
                 date_read,
                 is_read_only=False,
                 default_to_today=False,
                 fmt=None):
        #         debug_print("DateTableWidgetItem::__init__ - date_read=", date_read)

        if (date_read == UNDEFINED_DATE) and default_to_today:
            date_read = utcnow()
        if is_read_only:
            self.date_read = date_read
            if not date_read or date_read == UNDEFINED_DATE or date_read == '':
                date_read = ''
            else:
                date_read = format_date(date_read, fmt)

            QTableWidgetItem.__init__(self, date_read,
                                      QtGui.QTableWidgetItem.UserType)
            self.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
        else:
            QTableWidgetItem.__init__(self, '',
                                      QtGui.QTableWidgetItem.UserType)
            dt = UNDEFINED_QDATETIME if date_read is None else QDateTime(
                date_read)
            #             debug_print("DateTableWidgetItem::__init__ - dt=", dt)
            dt.setTimeSpec(Qt.UTC)
            dt = dt.toLocalTime()
            #             debug_print("DateTableWidgetItem::__init__ - dt=", dt)
            self.setData(Qt.DisplayRole, dt)
            self.date_read = dt
Пример #15
0
 def __init__(self, storage_id, lpath, other=None):
     Metadata.__init__(self, _('Unknown'), other=other)
     self.storage_id, self.lpath = storage_id, lpath
     self.lpath = self.path = self.lpath.replace(os.sep, '/')
     self.mtp_relpath = tuple([icu_lower(x) for x in self.lpath.split('/')])
     self.datetime = utcnow().timetuple()
     self.thumbail = None
Пример #16
0
 def check_if_modified(self):
     if self.last_modified() > self.last_update_check:
         self.backend.reopen()
         self.new_api.reload_from_db()
         self.data.refresh(
             clear_caches=False
         )  # caches are already cleared by reload_from_db()
     self.last_update_check = utcnow()
Пример #17
0
 def check_if_modified(self):
     if self.last_modified() > self.last_update_check:
         self.backend.reopen()
         self.new_api.reload_from_db()
         self.data.refresh(
             clear_caches=False
         )  # caches are already cleared by reload_from_db()
     self.last_update_check = utcnow()
Пример #18
0
 def test_msgpack(self):
     from calibre.utils.serialize import msgpack_dumps, msgpack_loads
     from calibre.utils.date import utcnow
     for obj in ({1:1}, utcnow()):
         s = msgpack_dumps(obj)
         self.assertEqual(obj, msgpack_loads(s))
     self.assertEqual(type(msgpack_loads(msgpack_dumps(b'b'))), bytes)
     self.assertEqual(type(msgpack_loads(msgpack_dumps(u'b'))), type(u''))
Пример #19
0
 def test_msgpack(self):
     from calibre.utils.serialize import msgpack_dumps, msgpack_loads
     from calibre.utils.date import utcnow
     for obj in ({1: 1}, utcnow()):
         s = msgpack_dumps(obj)
         self.assertEqual(obj, msgpack_loads(s))
     self.assertEqual(type(msgpack_loads(msgpack_dumps(b'b'))), bytes)
     self.assertEqual(type(msgpack_loads(msgpack_dumps(u'b'))), type(u''))
Пример #20
0
    def parse_article(self, item):
        self.id_counter += 1
        id = item.get('id', None)
        if not id:
            id = 'internal id#%s'%self.id_counter
        if id in self.added_articles:
            return
        published = None
        for date_field in ('date_parsed', 'published_parsed',
                           'updated_parsed'):
            published = item.get(date_field, None)
            if published is not None:
                break
        if not published:
            from dateutil.parser import parse
            for date_field in ('date', 'published', 'updated'):
                try:
                    published = parse(item[date_field]).timetuple()
                except Exception:
                    continue
                break
        if not published:
            published = time.gmtime()
        self.added_articles.append(id)

        title = item.get('title', _('Untitled article'))
        if title.startswith('<'):
            title = re.sub(r'<.+?>', '', title)
        try:
            link  = self.get_article_url(item)
        except:
            self.logger.warning('Failed to get link for %s'%title)
            self.logger.debug(traceback.format_exc())
            link = None

        description = item.get('summary', None)
        author = item.get('author', None)

        content = [i.value for i in item.get('content', []) if i.value]
        content = [i if isinstance(i, str) else i.decode('utf-8', 'replace')
                for i in content]
        content = '\n'.join(content)
        if not content.strip():
            content = None
        if not link and not content:
            return
        article = Article(id, title, link, author, description, published, content)
        delta = utcnow() - article.utctime
        if delta.days*24*3600 + delta.seconds <= 24*3600*self.oldest_article:
            self.articles.append(article)
        else:
            try:
                self.logger.debug('Skipping article %s (%s) from feed %s as it is too old.'%
                                  (title, article.localtime.strftime('%a, %d %b, %Y %H:%M'), self.title))
            except UnicodeDecodeError:
                if not isinstance(title, str):
                    title = title.decode('utf-8', 'replace')
                self.logger.debug('Skipping article %s as it is too old'%title)
Пример #21
0
    def recipe_needs_to_be_downloaded(self, recipe):
        try:
            typ, sch, ld = self.un_serialize_schedule(recipe)
        except:
            return False

        def is_time(now, hour, minute):
            return now.hour > hour or \
                    (now.hour == hour and now.minute >= minute)

        def is_weekday(day, now):
            return day < 0 or day > 6 or \
                    day == calendar.weekday(now.year, now.month, now.day)

        def was_downloaded_already_today(ld_local, now):
            return ld_local.date() == now.date()

        if typ == 'interval':
            return utcnow() - ld > timedelta(sch)
        elif typ == 'day/time':
            now = nowf()
            try:
                ld_local = ld.astimezone(local_tz)
            except Exception:
                return False
            day, hour, minute = sch
            return is_weekday(day, now) and \
                    not was_downloaded_already_today(ld_local, now) and \
                    is_time(now, hour, minute)
        elif typ == 'days_of_week':
            now = nowf()
            try:
                ld_local = ld.astimezone(local_tz)
            except Exception:
                return False
            days, hour, minute = sch
            have_day = False
            for day in days:
                if is_weekday(day, now):
                    have_day = True
                    break
            return have_day and \
                    not was_downloaded_already_today(ld_local, now) and \
                    is_time(now, hour, minute)
        elif typ == 'days_of_month':
            now = nowf()
            try:
                ld_local = ld.astimezone(local_tz)
            except Exception:
                return False
            days, hour, minute = sch
            have_day = now.day in days
            return have_day and \
                    not was_downloaded_already_today(ld_local, now) and \
                    is_time(now, hour, minute)

        return False
Пример #22
0
    def recipe_needs_to_be_downloaded(self, recipe):
        try:
            typ, sch, ld = self.un_serialize_schedule(recipe)
        except:
            return False

        def is_time(now, hour, minute):
            return now.hour > hour or \
                    (now.hour == hour and now.minute >= minute)

        def is_weekday(day, now):
            return day < 0 or day > 6 or \
                    day == calendar.weekday(now.year, now.month, now.day)

        def was_downloaded_already_today(ld_local, now):
            return ld_local.date() == now.date()

        if typ == 'interval':
            return utcnow() - ld > timedelta(sch)
        elif typ == 'day/time':
            now = nowf()
            try:
                ld_local = ld.astimezone(local_tz)
            except Exception:
                return False
            day, hour, minute = sch
            return is_weekday(day, now) and \
                    not was_downloaded_already_today(ld_local, now) and \
                    is_time(now, hour, minute)
        elif typ == 'days_of_week':
            now = nowf()
            try:
                ld_local = ld.astimezone(local_tz)
            except Exception:
                return False
            days, hour, minute = sch
            have_day = False
            for day in days:
                if is_weekday(day, now):
                    have_day = True
                    break
            return have_day and \
                    not was_downloaded_already_today(ld_local, now) and \
                    is_time(now, hour, minute)
        elif typ == 'days_of_month':
            now = nowf()
            try:
                ld_local = ld.astimezone(local_tz)
            except Exception:
                return False
            days, hour, minute = sch
            have_day = now.day in days
            return have_day and \
                    not was_downloaded_already_today(ld_local, now) and \
                    is_time(now, hour, minute)

        return False
Пример #23
0
 def notes_edited(self, uuid, notes):
     for h in self.current_book_data['annotations_map']['highlight']:
         if h.get('uuid') == uuid:
             h['notes'] = notes
             h['timestamp'] = utcnow().isoformat()
             break
     else:
         return
     self.save_annotations()
Пример #24
0
 def test_msgpack(self):
     from calibre.utils.serialize import msgpack_dumps, msgpack_loads
     from calibre.utils.date import utcnow
     for obj in ({1:1}, utcnow()):
         s = msgpack_dumps(obj)
         self.assertEqual(obj, msgpack_loads(s))
     self.assertEqual(type(msgpack_loads(msgpack_dumps(b'b'))), bytes)
     self.assertEqual(type(msgpack_loads(msgpack_dumps(u'b'))), unicode_type)
     large = b'x' * (100 * 1024 * 1024)
     msgpack_loads(msgpack_dumps(large))
Пример #25
0
 def search_cache(self, search):
     old = self._search_cache.pop(search, None)
     if old is None or old[0] <= self.db.last_modified():
         matches = self.search_for_books(search) or []
         self._search_cache[search] = (utcnow(), frozenset(matches))
         if len(self._search_cache) > 50:
             self._search_cache.popitem(last=False)
     else:
         self._search_cache[search] = old
     return self._search_cache[search][1]
Пример #26
0
 def search_cache(self, search):
     old = self._search_cache.pop(search, None)
     if old is None or old[0] <= self.db.last_modified():
         matches = self.search_for_books(search) or []
         self._search_cache[search] = (utcnow(), frozenset(matches))
         if len(self._search_cache) > 50:
             self._search_cache.popitem(last=False)
     else:
         self._search_cache[search] = old
     return self._search_cache[search][1]
Пример #27
0
 def test_msgpack(self):
     from calibre.utils.serialize import msgpack_dumps, msgpack_loads
     from calibre.utils.date import utcnow
     for obj in ({1:1}, utcnow()):
         s = msgpack_dumps(obj)
         self.assertEqual(obj, msgpack_loads(s))
     self.assertEqual(type(msgpack_loads(msgpack_dumps(b'b'))), bytes)
     self.assertEqual(type(msgpack_loads(msgpack_dumps('b'))), unicode_type)
     large = b'x' * (100 * 1024 * 1024)
     msgpack_loads(msgpack_dumps(large))
Пример #28
0
 def delete_bookmark(self):
     item = self.bookmarks_list.currentItem()
     if item is not None:
         bm = item.data(Qt.UserRole)
         bm['removed'] = True
         bm['timestamp'] = utcnow().isoformat()
         self.bookmarks_list.blockSignals(True)
         item.setData(Qt.UserRole, bm)
         self.bookmarks_list.blockSignals(False)
         item.setHidden(True)
         self.edited.emit(self.get_bookmarks())
Пример #29
0
 def item_changed(self, item):
     self.bookmarks_list.blockSignals(True)
     title = unicode_type(item.data(Qt.DisplayRole)) or _('Unknown')
     title = self.uniqify_bookmark_title(title)
     item.setData(Qt.DisplayRole, title)
     bm = item.data(Qt.UserRole)
     bm['title'] = title
     bm['timestamp'] = utcnow().isoformat()
     item.setData(Qt.UserRole, bm)
     self.bookmarks_list.blockSignals(False)
     self.edited.emit(self.get_bookmarks())
Пример #30
0
 def convert_metadata(self, mi):
     E = ElementMaker(namespace=namespaces['cp'], nsmap={x:namespaces[x] for x in 'cp dc dcterms xsi'.split()})
     cp = E.coreProperties(E.revision("1"), E.lastModifiedBy('calibre'))
     ts = utcnow().isoformat(str('T')).rpartition('.')[0] + 'Z'
     for x in 'created modified'.split():
         x = cp.makeelement('{%s}%s' % (namespaces['dcterms'], x), **{'{%s}type' % namespaces['xsi']:'dcterms:W3CDTF'})
         x.text = ts
         cp.append(x)
     self.mi = mi
     update_doc_props(cp, self.mi)
     return xml2str(cp)
Пример #31
0
 def cfi_changed(self, cfi):
     if not self.current_book_data:
         return
     self.current_book_data['annotations_map']['last-read'] = [{
         'pos':
         cfi,
         'pos_type':
         'epubcfi',
         'timestamp':
         utcnow()
     }]
Пример #32
0
    def open(self, device, library_uuid):
        self.current_library_uuid = library_uuid
        self.location_paths = None
        self.driveinfo = {}
        BASE.open(self, device, library_uuid)
        h = self.prefs['history']
        if self.current_serial_num:
            h[self.current_serial_num] = (self.current_friendly_name,
                    isoformat(utcnow()))
            self.prefs['history'] = h

        self.current_device_defaults = self.device_defaults(device, self)
Пример #33
0
 def search_cache(self, search):
     old = self._search_cache.pop(search, None)
     if old is None or old[0] <= self.db.last_modified():
         matches = self.db.data.search_getting_ids(search, self.search_restriction)
         if not matches:
             matches = []
         self._search_cache[search] = (utcnow(), frozenset(matches))
         if len(self._search_cache) > 50:
             self._search_cache.popitem(last=False)
     else:
         self._search_cache[search] = old
     return self._search_cache[search][1]
Пример #34
0
 def get_date(self, entry, verbose):
     try:
         d = date(entry)
         if d:
             default = utcnow().replace(day=15)
             d = parse_date(d[0].text, assume_utc=True, default=default)
         else:
             d = None
     except:
         report(verbose)
         d = None
     return d
Пример #35
0
    def open(self, device, library_uuid):
        self.current_library_uuid = library_uuid
        self.location_paths = None
        self.driveinfo = {}
        BASE.open(self, device, library_uuid)
        h = self.prefs['history']
        if self.current_serial_num:
            h[self.current_serial_num] = (self.current_friendly_name,
                    isoformat(utcnow()))
            self.prefs['history'] = h

        self.current_device_defaults = self.device_defaults(device, self)
Пример #36
0
def download_module(full_name,
                    timeout=default_timeout,
                    strategy=Strategy.download_now):
    if strategy is Strategy.download_now:
        return load_module_from_data(
            full_name, latest_data_for_module(full_name, timeout=timeout))
    cached_etag, cached_data, date = read_from_cache(full_name)
    if date is None or (utcnow() - date) > old_interval:
        return load_module_from_data(
            full_name, latest_data_for_module(full_name, timeout=timeout))
    if cached_data is not None:
        return load_module_from_data(full_name, cached_data)
Пример #37
0
 def search_cache(self, search):
     old = self._search_cache.pop(search, None)
     if old is None or old[0] <= self.db.last_modified():
         matches = self.db.data.search_getting_ids(search,
                                                   self.search_restriction)
         if not matches:
             matches = []
         self._search_cache[search] = (utcnow(), frozenset(matches))
         if len(self._search_cache) > 50:
             self._search_cache.popitem(last=False)
     else:
         self._search_cache[search] = old
     return self._search_cache[search][1]
Пример #38
0
 def metadata(self):
     self.report.theme.title = self.title.text().strip()  # Needed for report.name to work
     return {
         'title': self.title.text().strip(),
         'author': self.author.text().strip(),
         'version': self.version.value(),
         'description': self.description.toPlainText().strip(),
         'number': len(self.report.name_map) - len(self.report.extra),
         'date': utcnow().date().isoformat(),
         'name': self.report.name,
         'license': self.license.text().strip() or 'Unknown',
         'url': self.url.text().strip() or None,
     }
Пример #39
0
 def convert_metadata(self, oeb):
     E = ElementMaker(namespace=namespaces['cp'], nsmap={x:namespaces[x] for x in 'cp dc dcterms xsi'.split()})
     cp = E.coreProperties(E.revision("1"), E.lastModifiedBy('calibre'))
     ts = utcnow().isoformat(str('T')).rpartition('.')[0] + 'Z'
     for x in 'created modified'.split():
         x = cp.makeelement('{%s}%s' % (namespaces['dcterms'], x), **{'{%s}type' % namespaces['xsi']:'dcterms:W3CDTF'})
         x.text = ts
         cp.append(x)
     package = etree.Element(OPF('package'), attrib={'version': '2.0'}, nsmap={None: OPF2_NS})
     oeb.metadata.to_opf2(package)
     self.mi = ReadOPF(BytesIO(xml2str(package)), populate_spine=False, try_to_guess_cover=False).to_book_metadata()
     update_doc_props(cp, self.mi)
     return xml2str(cp)
Пример #40
0
 def get_categories(self, data, db, restrict_to_ids=None):
     if restrict_to_ids is None:
         restrict_to_ids = self.allowed_book_ids(data, db)
     with self.lock:
         cache = self.library_broker.category_caches[db.server_library_id]
         old = cache.pop(restrict_to_ids, None)
         if old is None or old[0] <= db.last_modified():
             categories = db.get_categories(book_ids=restrict_to_ids)
             cache[restrict_to_ids] = old = (utcnow(), categories)
             if len(cache) > self.CATEGORY_CACHE_SIZE:
                 cache.popitem(last=False)
         else:
             cache[restrict_to_ids] = old
         return old[1]
Пример #41
0
 def get_categories(self, request_data, db, sort='name', first_letter_sort=True, vl=''):
     restrict_to_ids = self.get_effective_book_ids(db, request_data, vl)
     key = restrict_to_ids, sort, first_letter_sort
     with self.lock:
         cache = self.library_broker.category_caches[db.server_library_id]
         old = cache.pop(key, None)
         if old is None or old[0] <= db.last_modified():
             categories = db.get_categories(book_ids=restrict_to_ids, sort=sort, first_letter_sort=first_letter_sort)
             cache[key] = old = (utcnow(), categories)
             if len(cache) > self.CATEGORY_CACHE_SIZE:
                 cache.popitem(last=False)
         else:
             cache[key] = old
         return old[1]
Пример #42
0
 def get_categories(self, request_data, db, sort='name', first_letter_sort=True, vl=''):
     restrict_to_ids = self.get_effective_book_ids(db, request_data, vl)
     key = restrict_to_ids, sort, first_letter_sort
     with self.lock:
         cache = self.library_broker.category_caches[db.server_library_id]
         old = cache.pop(key, None)
         if old is None or old[0] <= db.last_modified():
             categories = db.get_categories(book_ids=restrict_to_ids, sort=sort, first_letter_sort=first_letter_sort)
             cache[key] = old = (utcnow(), categories)
             if len(cache) > self.CATEGORY_CACHE_SIZE:
                 cache.popitem(last=False)
         else:
             cache[key] = old
         return old[1]
Пример #43
0
 def get_categories(self, data, db, restrict_to_ids=None):
     if restrict_to_ids is None:
         restrict_to_ids = self.allowed_book_ids(data, db)
     with self.lock:
         cache = self.library_broker.category_caches[db.server_library_id]
         old = cache.pop(restrict_to_ids, None)
         if old is None or old[0] <= db.last_modified():
             categories = db.get_categories(book_ids=restrict_to_ids)
             cache[restrict_to_ids] = old = (utcnow(), categories)
             if len(cache) > self.CATEGORY_CACHE_SIZE:
                 cache.popitem(last=False)
         else:
             cache[restrict_to_ids] = old
         return old[1]
Пример #44
0
    def open(self, device, library_uuid):
        from calibre.utils.date import isoformat, utcnow
        self.current_library_uuid = library_uuid
        self.location_paths = None
        self.driveinfo = {}
        BASE.open(self, device, library_uuid)
        h = self.prefs['history']
        if self.current_serial_num:
            h[self.current_serial_num] = (self.current_friendly_name,
                    isoformat(utcnow()))
            self.prefs['history'] = h

        self.current_device_defaults = self.device_defaults(device, self)
        self.calibre_file_paths = self.current_device_defaults.get(
            'calibre_file_paths', {'metadata':self.METADATA_CACHE, 'driveinfo':self.DRIVEINFO})
Пример #45
0
 def delete_bookmark(self):
     item = self.bookmarks_list.current_non_removed_item
     if item is not None:
         bm = item.data(Qt.ItemDataRole.UserRole)
         if confirm(
             _('Are you sure you want to delete the bookmark: {0}?').format(bm['title']),
             'delete-bookmark-from-viewer', parent=self, config_set=vprefs
         ):
             bm['removed'] = True
             bm['timestamp'] = utcnow().isoformat()
             self.bookmarks_list.blockSignals(True)
             item.setData(Qt.ItemDataRole.UserRole, bm)
             self.bookmarks_list.blockSignals(False)
             item.setHidden(True)
             self.edited.emit(self.get_bookmarks())
Пример #46
0
def set_last_modified(root, prefixes, refines, val=None):
    pq = '%s:modified' % reserved_prefixes['dcterms']
    val = w3cdtf(val or utcnow())
    for meta in XPath('./opf:metadata/opf:meta[@property]')(root):
        prop = expand_prefix(meta.get('property'), prefixes)
        if prop.lower() == pq:
            iid = meta.get('id')
            if not iid or not refines[iid]:
                break
    else:
        ensure_prefix(root, prefixes, 'dcterms')
        m = XPath('./opf:metadata')(root)[0]
        meta = m.makeelement(OPF('meta'), attrib={'property':'dcterms:modified', 'scheme':'dcterms:W3CDTF'})
        m.append(meta)
    meta.text = val
Пример #47
0
 def categories_cache(self, restrict_to=frozenset([])):
     base_restriction = self.search_cache('')
     if restrict_to:
         restrict_to = frozenset(restrict_to).intersection(base_restriction)
     else:
         restrict_to = base_restriction
     old = self._category_cache.pop(frozenset(restrict_to), None)
     if old is None or old[0] <= self.db.last_modified():
         categories = self.db.get_categories(ids=restrict_to)
         self._category_cache[restrict_to] = (utcnow(), categories)
         if len(self._category_cache) > 20:
             self._category_cache.popitem(last=False)
     else:
         self._category_cache[frozenset(restrict_to)] = old
     return self._category_cache[restrict_to][1]
Пример #48
0
 def get_tag_browser(self, data, db, opts, render, restrict_to_ids=None):
     if restrict_to_ids is None:
         restrict_to_ids = self.allowed_book_ids(data, db)
     key = (restrict_to_ids, opts)
     with self.lock:
         cache = self.library_broker.category_caches[db.server_library_id]
         old = cache.pop(key, None)
         if old is None or old[0] <= db.last_modified():
             categories = db.get_categories(book_ids=restrict_to_ids, sort=opts.sort_by, first_letter_sort=opts.collapse_model == 'first letter')
             cache[key] = old = (utcnow(), render(categories))
             if len(cache) > self.CATEGORY_CACHE_SIZE:
                 cache.popitem(last=False)
         else:
             cache[key] = old
         return old[1]
Пример #49
0
 def categories_cache(self, restrict_to=frozenset([])):
     base_restriction = self.search_cache('')
     if restrict_to:
         restrict_to = frozenset(restrict_to).intersection(base_restriction)
     else:
         restrict_to = base_restriction
     old = self._category_cache.pop(frozenset(restrict_to), None)
     if old is None or old[0] <= self.db.last_modified():
         categories = self.db.get_categories(ids=restrict_to)
         self._category_cache[restrict_to] = (utcnow(), categories)
         if len(self._category_cache) > 20:
             self._category_cache.popitem(last=False)
     else:
         self._category_cache[frozenset(restrict_to)] = old
     return self._category_cache[restrict_to][1]
Пример #50
0
def set_last_modified(root, prefixes, refines, val=None):
    pq = '%s:modified' % reserved_prefixes['dcterms']
    val = w3cdtf(val or utcnow())
    for meta in XPath('./opf:metadata/opf:meta[@property]')(root):
        prop = expand_prefix(meta.get('property'), prefixes)
        if prop.lower() == pq:
            iid = meta.get('id')
            if not iid or not refines[iid]:
                break
    else:
        ensure_prefix(root, prefixes, 'dcterms')
        m = XPath('./opf:metadata')(root)[0]
        meta = m.makeelement(OPF('meta'), attrib={'property':'dcterms:modified', 'scheme':'dcterms:W3CDTF'})
        m.append(meta)
    meta.text = val
Пример #51
0
    def parse_article(self, item):
        self.id_counter += 1
        id = item.get('id', None)
        if not id:
            id = 'internal id#%s'%self.id_counter
        if id in self.added_articles:
            return
        published = None
        for date_field in ('date_parsed', 'published_parsed',
                           'updated_parsed'):
            published = item.get(date_field, None)
            if published is not None:
                break
        if not published:
            published = time.gmtime()
        self.added_articles.append(id)

        title = item.get('title', _('Untitled article'))
        try:
            link  = self.get_article_url(item)
        except:
            self.logger.warning('Failed to get link for %s'%title)
            self.logger.debug(traceback.format_exc())
            link = None

        description = item.get('summary', None)
        author = item.get('author', None)

        content = [i.value for i in item.get('content', []) if i.value]
        content = [i if isinstance(i, unicode) else i.decode('utf-8', 'replace')
                for i in content]
        content = u'\n'.join(content)
        if not content.strip():
            content = None
        if not link and not content:
            return
        article = Article(id, title, link, author, description, published, content)
        delta = utcnow() - article.utctime
        if delta.days*24*3600 + delta.seconds <= 24*3600*self.oldest_article:
            self.articles.append(article)
        else:
            try:
                self.logger.debug('Skipping article %s (%s) from feed %s as it is too old.'%
                                  (title, article.localtime.strftime('%a, %d %b, %Y %H:%M'), self.title))
            except UnicodeDecodeError:
                if not isinstance(title, unicode):
                    title = title.decode('utf-8', 'replace')
                self.logger.debug('Skipping article %s as it is too old'%title)
Пример #52
0
    def sample_results(self):
        m1 = Metadata("The Great Gatsby", ["Francis Scott Fitzgerald"])
        m2 = Metadata("The Great Gatsby - An extra long title to test resizing", ["F. Scott Fitzgerald"])
        m1.has_cached_cover_url = True
        m2.has_cached_cover_url = False
        m1.comments = "Some comments " * 10
        m1.tags = ["tag%d" % i for i in range(20)]
        m1.rating = 4.4
        m1.language = "en"
        m2.language = "fr"
        m1.pubdate = utcnow()
        m2.pubdate = fromordinal(1000000)
        m1.publisher = "Publisher 1"
        m2.publisher = "Publisher 2"

        return [m1, m2]
Пример #53
0
    def sample_results(self):
        m1 = Metadata('The Great Gatsby', ['Francis Scott Fitzgerald'])
        m2 = Metadata('The Great Gatsby', ['F. Scott Fitzgerald'])
        m1.has_cached_cover_url = True
        m2.has_cached_cover_url = False
        m1.comments  = 'Some comments '*10
        m1.tags = ['tag%d'%i for i in range(20)]
        m1.rating = 4.4
        m1.language = 'en'
        m2.language = 'fr'
        m1.pubdate = utcnow()
        m2.pubdate = fromordinal(1000000)
        m1.publisher = 'Publisher 1'
        m2.publisher = 'Publisher 2'

        return [m1, m2]
Пример #54
0
 def update_last_downloaded(self, recipe_id):
     with self.lock:
         now = utcnow()
         for x in self.iter_recipes():
             if x.get('id', False) == recipe_id:
                 typ, sch, last_downloaded = self.un_serialize_schedule(x)
                 if typ == 'interval':
                     # Prevent downloads more frequent than once an hour
                     actual_interval = now - last_downloaded
                     nominal_interval = timedelta(days=sch)
                     if abs(actual_interval - nominal_interval) < \
                             timedelta(hours=1):
                         now = last_downloaded + nominal_interval
                 x.set('last_downloaded', isoformat(now))
                 break
         self.write_scheduler_file()
Пример #55
0
 def get_tag_browser(self, request_data, db, opts, render, vl=''):
     restrict_to_ids = self.get_effective_book_ids(db, request_data, vl)
     key = restrict_to_ids, opts
     with self.lock:
         cache = self.library_broker.category_caches[db.server_library_id]
         old = cache.pop(key, None)
         if old is None or old[0] <= db.last_modified():
             categories = db.get_categories(book_ids=restrict_to_ids, sort=opts.sort_by, first_letter_sort=opts.collapse_model == 'first letter')
             data = json.dumps(render(db, categories), ensure_ascii=False)
             if isinstance(data, type('')):
                 data = data.encode('utf-8')
             cache[key] = old = (utcnow(), data)
             if len(cache) > self.CATEGORY_CACHE_SIZE:
                 cache.popitem(last=False)
         else:
             cache[key] = old
         return old[1]