def feed_entry(category_id, feed_id, entry_id): feed, feed_permalink, entry, entry_permalink = \ find_feed_and_entry(feed_id, entry_id) content = entry.content or entry.summary if content is not None: content = content.sanitized_html entry_data = { 'title': text_type(entry.title), 'content': content, 'updated': text_type(entry.updated_at), 'permalink': entry_permalink or None, } feed_data = { 'title': text_type(feed.title), 'permalink': feed_permalink or None } add_urls( entry_data, ['read_url', 'unread_url', 'star_url', 'unstar_url'], category_id, feed_id, entry_id ) add_urls( feed_data, ['entries_url'], category_id, feed_id ) entry_data['feed'] = feed_data return jsonify(entry_data)
def test_text_str(): assert text_type(Text(type='text', value='Hello world')) == 'Hello world' assert (text_type(Text(type='text', value='<p>Hello <em>world</em></p>')) == '<p>Hello <em>world</em></p>') assert text_type(Text(type='html', value='Hello world')) == 'Hello world' assert (text_type(Text(type='html', value='<p>Hello <em>world</em></p>')) == 'Hello world')
def category_entries(category_id): cursor = Cursor(category_id) generator = None url_token, entry_after, read, starred = get_optional_args() if url_token: try: generator = get_entry_generator(url_token) except IteratorNotFound: pass else: url_token = text_type(now()) if not generator: subscriptions = cursor.recursive_subscriptions generator = CategoryEntryGenerator() if entry_after: id_after, time_after = decode_entry_after(entry_after) else: time_after = None id_after = None for subscription in subscriptions: try: with get_stage() as stage: feed = stage.feeds[subscription.feed_id] except KeyError: continue feed_title = text_type(feed.title) it = iter(feed.entries) feed_permalink = get_permalink(feed) child = FeedEntryGenerator(category_id, subscription.feed_id, feed_title, feed_permalink, it, now(), read, starred) generator.add(child) generator.set_generators(id_after, time_after) save_entry_generators(url_token, generator) tidy_generators_up() entries = generator.get_entries() if not entries or len(entries) < app.config['PAGE_SIZE']: next_url = None if not entries: remove_entry_generator(url_token) else: next_url = make_next_url( category_id, url_token, encode_entry_after(entries[-1]['entry_id'], entries[-1]['updated']), read, starred) # FIXME: use Entry.updated_at instead of from json data. codec = Rfc3339() last_updated_at = '' if len(entries) and not entry_after: last_updated_at = max(codec.decode(x['updated']) for x in entries).isoformat() return jsonify(title=category_id.split('/')[-1][1:] or app.config['ALLFEED'], entries=entries, read_url=url_for('read_all_entries', category_id=category_id, last_updated=last_updated_at, _external=True), next_url=next_url)
def test_text_str(): assert text_type(Text(type='text', value='Hello world')) == 'Hello world' assert (text_type(Text(type='text', value='<p>Hello <em>world</em></p>')) == '<p>Hello <em>world</em></p>') assert text_type(Text(type='html', value='Hello world')) == 'Hello world' assert (text_type(Text(type='html', value='<p>Hello <em>world</em></p>')) == 'Hello world')
def feed_entry(category_id, feed_id, entry_id): feed, feed_permalink, entry, entry_permalink = \ find_feed_and_entry(feed_id, entry_id) content = entry.content or entry.summary if content is not None: content = content.sanitized_html entry_data = { 'title': text_type(entry.title), 'content': content, 'updated': text_type(entry.updated_at), 'permalink': entry_permalink or None, } feed_data = { 'title': text_type(feed.title), 'permalink': feed_permalink or None } add_urls( entry_data, ['read_url', 'unread_url', 'star_url', 'unstar_url'], category_id, feed_id, entry_id ) add_urls( feed_data, ['entries_url'], category_id, feed_id ) entry_data['feed'] = feed_data return jsonify(entry_data)
def test_generator_str(): assert text_type(Generator(value="Earth Reader")) == "Earth Reader" assert text_type(Generator(value="Earth Reader", uri="http://earthreader.github.io/")) == "Earth Reader" assert text_type(Generator(value="Earth Reader", version="1.0")) == "Earth Reader 1.0" assert ( text_type(Generator(value="Earth Reader", version="1.0", uri="http://earthreader.github.io/")) == "Earth Reader 1.0" )
def test_generator_str(): assert text_type(Generator(value='Earth Reader')) == 'Earth Reader' assert text_type(Generator(value='Earth Reader', uri='http://earthreader.github.io/')) == 'Earth Reader' assert (text_type(Generator(value='Earth Reader', version='1.0')) == 'Earth Reader 1.0') assert text_type(Generator(value='Earth Reader', version='1.0', uri='http://earthreader.github.io/')) == 'Earth Reader 1.0'
def test_generator_str(): assert text_type(Generator(value='Earth Reader')) == 'Earth Reader' assert text_type(Generator(value='Earth Reader', uri='http://earthreader.github.io/')) == 'Earth Reader' assert (text_type(Generator(value='Earth Reader', version='1.0')) == 'Earth Reader 1.0') assert text_type(Generator(value='Earth Reader', version='1.0', uri='http://earthreader.github.io/')) == 'Earth Reader 1.0'
def test_person_str(): assert text_type(Person(name='Hong Minhee')) == 'Hong Minhee' assert (text_type( Person(name='Hong Minhee', uri='http://dahlia.kr/')) == 'Hong Minhee <http://dahlia.kr/>') email = '\x6d\x69\x6e\x68\x65\x65\x40\x64\x61\x68\x6c\x69\x61\x2e\x6b\x72' assert (text_type(Person(name='Hong Minhee', email=email)) == 'Hong Minhee <' + email + '>') assert u('홍민희 <http://dahlia.kr/>') == text_type( Person(name=u('홍민희'), uri='http://dahlia.kr/', email=email))
def test_person_str(): assert text_type(Person(name='Hong Minhee')) == 'Hong Minhee' assert (text_type(Person(name='Hong Minhee', uri='http://dahlia.kr/')) == 'Hong Minhee <http://dahlia.kr/>') email = '\x6d\x69\x6e\x68\x65\x65\x40\x64\x61\x68\x6c\x69\x61\x2e\x6b\x72' assert (text_type(Person(name='Hong Minhee', email=email)) == 'Hong Minhee <' + email + '>') assert u'홍민희 <http://dahlia.kr/>' == text_type( Person( name=u'홍민희', uri='http://dahlia.kr/', email=email ) )
def test_element_list_repr(fx_test_doc): doc, consume_log = fx_test_doc elist = doc.text_multi_attr assert repr(elist) == '<libearth.schema.ElementList [...]>' it = iter(elist) next(it) assert (repr(elist) == "<libearth.schema.ElementList [{0!r}, ...]>".format( text_type('a'))) next(it) assert (repr(elist) == "<libearth.schema.ElementList [{0!r}, {1!r}, ...]>".format( text_type('a'), text_type('b'))) next(it, None) assert ( repr(elist) == "<libearth.schema.ElementList [{0!r}, {1!r}]>".format( text_type('a'), text_type('b')))
def test_link_str(): link = Link( uri='http://dahlia.kr/', relation='alternate', mimetype='text/html', title="Hong Minhee's website" ) assert text_type(link) == 'http://dahlia.kr/'
def test_link_str(): link = Link( uri='http://dahlia.kr/', relation='alternate', mimetype='text/html', title="Hong Minhee's website" ) assert text_type(link) == 'http://dahlia.kr/'
def test_write_xmlns_doc(fx_xmlns_doc): doc = fx_xmlns_doc g = write(doc, indent=' ', canonical_order=True) assert ''.join(g) == text_type('''\ <?xml version="1.0" encoding="utf-8"?> <ns0:nstest xmlns:ns0="http://earthreader.github.io/"\ xmlns:ns1="https://github.com/earthreader/libearth"> <ns0:samens>Same namespace</ns0:samens> <ns1:otherns>Other namespace</ns1:otherns> </ns0:nstest>''')
def test_write_xmlns_doc(fx_xmlns_doc): doc = fx_xmlns_doc g = write(doc, indent=' ', canonical_order=True) assert ''.join(g) == text_type('''\ <?xml version="1.0" encoding="utf-8"?> <ns0:nstest xmlns:ns0="http://earthreader.github.io/"\ xmlns:libearth="http://earthreader.org/schema/"\ xmlns:ns1="https://github.com/earthreader/libearth"> <ns0:samens>Same namespace</ns0:samens> <ns1:otherns>Other namespace</ns1:otherns> </ns0:nstest>''')
def test_element_list_repr(fx_test_doc): doc, consume_log = fx_test_doc elist = doc.text_multi_attr assert repr(elist) == '<libearth.schema.ElementList [...]>' it = iter(elist) next(it) assert (repr(elist) == "<libearth.schema.ElementList [{0!r}, ...]>".format(text_type('a'))) next(it) assert ( repr(elist) == "<libearth.schema.ElementList [{0!r}, {1!r}, ...]>".format( text_type('a'), text_type('b') ) ) next(it, None) assert ( repr(elist) == "<libearth.schema.ElementList [{0!r}, {1!r}]>".format( text_type('a'), text_type('b') ) )
def get_entry_data(self): if not self.entry: raise StopIteration entry_permalink = get_permalink(self.entry) entry_data = { 'title': text_type(self.entry.title), 'entry_id': get_hash(self.entry.id), 'permalink': entry_permalink or None, 'updated': Rfc3339().encode(self.entry.updated_at.astimezone(utc)), 'read': bool(self.entry.read), 'starred': bool(self.entry.starred) } feed_data = { 'title': self.feed_title, 'permalink': self.feed_permalink or None } add_urls(entry_data, ['entry_url'], self.category_id, self.feed_id, get_hash(self.entry.id)) add_urls(feed_data, ['entries_url'], self.category_id, self.feed_id) entry_data['feed'] = feed_data return entry_data
def get_entry_data(self): if not self.entry: raise StopIteration entry_permalink = get_permalink(self.entry) entry_data = { 'title': text_type(self.entry.title), 'entry_id': get_hash(self.entry.id), 'permalink': entry_permalink or None, 'updated': Rfc3339().encode(self.entry.updated_at.astimezone(utc)), 'read': bool(self.entry.read), 'starred': bool(self.entry.starred) } feed_data = { 'title': self.feed_title, 'permalink': self.feed_permalink or None } add_urls(entry_data, ['entry_url'], self.category_id, self.feed_id, get_hash(self.entry.id)) add_urls(feed_data, ['entries_url'], self.category_id, self.feed_id) entry_data['feed'] = feed_data return entry_data
def test_entry_str(): assert text_type(Entry(title=Text(value='Title desu'))) == 'Title desu' assert text_type(Entry()) == ''
def test_category_str(): assert text_type(Category(term='python')) == 'python' assert text_type(Category(term='python', label='Python')) == 'Python'
def test_rss_without_title(): feed, _ = parse_rss2(rss_without_title, None) assert not feed.entries assert (text_type(feed.title) == text_type(feed.subtitle) == 'only description')
def test_category_str(): assert text_type(Category(term='python')) == 'python' assert text_type(Category(term='python', label='Python')) == 'Python'
def test_rss_without_title(): feed, _ = parse_rss2(rss_without_title, None) assert not feed.entries assert (text_type(feed.title) == text_type(feed.subtitle) == 'only description')
def test_link_str(): link = Link(uri="http://dahlia.kr/", relation="alternate", mimetype="text/html", title="Hong Minhee's website") assert text_type(link) == "http://dahlia.kr/"
def category_entries(category_id): cursor = Cursor(category_id) generator = None url_token, entry_after, read, starred = get_optional_args() if url_token: try: generator = get_entry_generator(url_token) except IteratorNotFound: pass else: url_token = text_type(now()) if not generator: subscriptions = cursor.recursive_subscriptions generator = CategoryEntryGenerator() if entry_after: id_after, time_after = entry_after.split('@') else: time_after = None id_after = None for subscription in subscriptions: try: with stage: feed = stage.feeds[subscription.feed_id] except KeyError: continue feed_title = text_type(feed.title) it = iter(feed.entries) feed_permalink = get_permalink(feed) try: child = FeedEntryGenerator(category_id, subscription.feed_id, feed_title, feed_permalink, it, now(), read, starred) except StopIteration: continue generator.add(child) generator.set_generators(id_after, time_after) save_entry_generators(url_token, generator) tidy_generators_up() entries = generator.get_entries() if not entries or len(entries) < app.config['PAGE_SIZE']: next_url = None if not entries: remove_entry_generator(url_token) else: entry_after = entries[-1]['entry_id'] + '@' + entries[-1]['updated'] next_url = make_next_url(category_id, url_token, entry_after, read, starred) # FIXME: use Entry.updated_at instead of from json data. codec = Rfc3339() last_updated_at = '' if len(entries) and not entry_after: last_updated_at = max(codec.decode(x['updated']) for x in entries).isoformat() if worker.is_running(): crawl_url = url_for('update_entries', category_id=category_id), else: crawl_url = None return jsonify( title=category_id.split('/')[-1][1:] or app.config['ALLFEED'], entries=entries, read_url=url_for('read_all_entries', category_id=category_id, last_updated=last_updated_at, _external=True), crawl_url=crawl_url, next_url=next_url )
def test_category_str(): assert text_type(Category(term="python")) == "python" assert text_type(Category(term="python", label="Python")) == "Python"
def test_entry_str(): assert text_type(Entry(title=Text(value='Title desu'))) == 'Title desu' assert text_type(Entry()) == ''
def test_person_str(): assert text_type(Person(name="Hong Minhee")) == "Hong Minhee" assert text_type(Person(name="Hong Minhee", uri="http://dahlia.kr/")) == "Hong Minhee <http://dahlia.kr/>" email = "\x6d\x69\x6e\x68\x65\x65\x40\x64\x61\x68\x6c\x69\x61\x2e\x6b\x72" assert text_type(Person(name="Hong Minhee", email=email)) == "Hong Minhee <" + email + ">" assert u("홍민희 <http://dahlia.kr/>") == text_type(Person(name=u("홍민희"), uri="http://dahlia.kr/", email=email))
def feed_entries(category_id, feed_id): stage = get_stage() Cursor(category_id) try: with stage: feed = stage.feeds[feed_id] except KeyError: r = jsonify(error='feed-not-found', message='Given feed does not exist') r.status_code = 404 return r if feed.__revision__: updated_at = feed.__revision__.updated_at if request.if_modified_since: if_modified_since = request.if_modified_since.replace(tzinfo=utc) last_modified = updated_at.replace(microsecond=0) if if_modified_since >= last_modified: return '', 304, {} # Not Modified else: updated_at = None url_token, entry_after, read, starred = get_optional_args() generator = None if url_token: try: generator = get_entry_generator(url_token) except IteratorNotFound: pass else: url_token = text_type(now()) if not generator: it = iter(feed.entries) feed_title = text_type(feed.title) feed_permalink = get_permalink(feed) generator = FeedEntryGenerator(category_id, feed_id, feed_title, feed_permalink, it, now(), read, starred) try: generator.set_iterator(entry_after) except StopIteration: return jsonify(title=generator.feed_title, entries=[], next_url=None, read_url=url_for( 'read_all_entries', feed_id=feed_id, last_updated=(updated_at or now()).isoformat(), _external=True)) save_entry_generators(url_token, generator) tidy_generators_up() entries = generator.get_entries() if len(entries) < app.config['PAGE_SIZE']: next_url = None if not entries: remove_entry_generator(url_token) else: next_url = make_next_url(category_id, url_token, entries[-1]['entry_id'], read, starred, feed_id) response = jsonify(title=text_type(feed.title), entries=entries, next_url=next_url, read_url=url_for('read_all_entries', feed_id=feed_id, last_updated=(updated_at or now()).isoformat(), _external=True)) if feed.__revision__: response.last_modified = updated_at return response
def feed_entries(category_id, feed_id): try: Cursor(category_id) except InvalidCategoryID: r = jsonify( error='category-id-invalid', message='Given category does not exist' ) r.status_code = 404 return r try: with stage: feed = stage.feeds[feed_id] except KeyError: r = jsonify( error='feed-not-found', message='Given feed does not exist' ) r.status_code = 404 return r if feed.__revision__: updated_at = feed.__revision__.updated_at if request.if_modified_since: if_modified_since = request.if_modified_since.replace(tzinfo=utc) last_modified = updated_at.replace(microsecond=0) if if_modified_since >= last_modified: return '', 304, {} # Not Modified else: updated_at = None if worker.is_running(): crawl_url = url_for('update_entries', category_id=category_id, feed_id=feed_id) else: crawl_url = None url_token, entry_after, read, starred = get_optional_args() generator = None if url_token: try: generator = get_entry_generator(url_token) except IteratorNotFound: pass else: url_token = text_type(now()) if not generator: it = iter(feed.entries) feed_title = text_type(feed.title) feed_permalink = get_permalink(feed) try: generator = FeedEntryGenerator(category_id, feed_id, feed_title, feed_permalink, it, now(), read, starred) generator.set_iterator(entry_after) except StopIteration: return jsonify( title=feed_title, entries=[], next_url=None, read_url=url_for('read_all_entries', feed_id=feed_id, last_updated=(updated_at or now()).isoformat(), _external=True), crawl_url=crawl_url ) save_entry_generators(url_token, generator) tidy_generators_up() entries = generator.get_entries() if len(entries) < app.config['PAGE_SIZE']: next_url = None if not entries: remove_entry_generator(url_token) else: next_url = make_next_url( category_id, url_token, entries[-1]['entry_id'], read, starred, feed_id ) response = jsonify( title=text_type(feed.title), entries=entries, next_url=next_url, read_url=url_for('read_all_entries', feed_id=feed_id, last_updated=(updated_at or now()).isoformat(), _external=True), crawl_url=crawl_url ) if feed.__revision__: response.last_modified = updated_at return response
def test_entry_str(): assert text_type(Entry(title=Text(value="Title desu"))) == "Title desu" assert text_type(Entry()) == ""
def test_text_str(): assert text_type(Text(type="text", value="Hello world")) == "Hello world" assert text_type(Text(type="text", value="<p>Hello <em>world</em></p>")) == "<p>Hello <em>world</em></p>" assert text_type(Text(type="html", value="Hello world")) == "Hello world" assert text_type(Text(type="html", value="<p>Hello <em>world</em></p>")) == "Hello world"
def test_binary(): assert binary(b'test') == b'test' assert binary(text_type('Test')) == b'Test'