def test_story_readability(): """ readability + lxml 4.5.0 has issue: readability.readability.Unparseable: IO_ENCODER """ html = _read_text('test_sample.html') story_readability(html)
def _clean_story_html(text, *, readability=False): text = story_html_clean(text) if readability: text = story_readability(text) url = 'https://rss.anyant.com/' text = process_story_links(text, url) return text
def do_process_story_webpage( ctx: ActorContext, story_id: T.int, url: T.url, text: T.str.maxlen(5 * 1024 * 1024), ): # https://github.com/dragnet-org/dragnet # https://github.com/misja/python-boilerpipe # https://github.com/dalab/web2text # https://github.com/grangier/python-goose # https://github.com/buriy/python-readability # https://github.com/codelucas/newspaper text = text.strip() if not text: return content = story_readability(text) content = process_story_links(content, url) summary = shorten(story_html_to_text(content), width=300) if not summary: return ctx.hope( 'harbor_rss.update_story', dict( story_id=story_id, content=content, summary=summary, url=url, ))
def do_process_story_webpage( ctx: ActorContext, feed_id: T.int, offset: T.int, url: T.url, text: T.str.maxlen(_MAX_STORY_HTML_LENGTH), num_sub_sentences: T.int.optional, ) -> SCHEMA_FETCH_STORY_RESULT: # https://github.com/dragnet-org/dragnet # https://github.com/misja/python-boilerpipe # https://github.com/dalab/web2text # https://github.com/grangier/python-goose # https://github.com/buriy/python-readability # https://github.com/codelucas/newspaper DEFAULT_RESULT = dict(feed_id=feed_id, offset=offset, url=url) text = text.strip() if not text: return DEFAULT_RESULT text = story_html_clean(text) content = story_readability(text) content = process_story_links(content, url) content_info = StoryContentInfo(content) text_content = shorten(content_info.text, width=_MAX_STORY_CONTENT_LENGTH) num_sentences = len(split_sentences(text_content)) if len(content) > _MAX_STORY_CONTENT_LENGTH: msg = 'too large story#%s,%s size=%s url=%r, will only save plain text' LOG.warning(msg, feed_id, offset, len(content), url) content = text_content # 如果取回的内容比RSS内容更短,就不是正确的全文 if num_sub_sentences is not None: if not is_fulltext_content(content_info): if num_sentences <= num_sub_sentences: msg = 'fetched story#%s,%s url=%s num_sentences=%s less than num_sub_sentences=%s' LOG.info(msg, feed_id, offset, url, num_sentences, num_sub_sentences) return DEFAULT_RESULT summary = shorten(text_content, width=_MAX_STORY_SUMMARY_LENGTH) if not summary: return DEFAULT_RESULT result = dict( **DEFAULT_RESULT, content=content, summary=summary, sentence_count=num_sentences, ) if not ctx.message.is_ask: ctx.hope('harbor_rss.update_story', result) return result
def do_process_story_webpage( ctx: ActorContext, story_id: T.int, url: T.url, text: T.str.maxlen(_MAX_STORY_HTML_LENGTH), num_sub_sentences: T.int.optional, ): # https://github.com/dragnet-org/dragnet # https://github.com/misja/python-boilerpipe # https://github.com/dalab/web2text # https://github.com/grangier/python-goose # https://github.com/buriy/python-readability # https://github.com/codelucas/newspaper text = text.strip() if not text: return text = story_html_clean(text) content = story_readability(text) content = process_story_links(content, url) if len(content) > _MAX_STORY_CONTENT_LENGTH: msg = 'too large story#%s size=%s url=%r, will only save plain text' LOG.warning(msg, story_id, len(content), url) content = shorten(story_html_to_text(content), width=_MAX_STORY_CONTENT_LENGTH) # 如果取回的内容比RSS内容更短,就不是正确的全文 if num_sub_sentences is not None: if not is_fulltext_content(content): num_sentences = len(split_sentences(story_html_to_text(content))) if num_sentences <= num_sub_sentences: msg = 'fetched story#%s url=%s num_sentences=%s less than num_sub_sentences=%s' LOG.info(msg, story_id, url, num_sentences, num_sub_sentences) return summary = shorten(story_html_to_text(content), width=_MAX_STORY_SUMMARY_LENGTH) if not summary: return ctx.hope( 'harbor_rss.update_story', dict( story_id=story_id, content=content, summary=summary, url=url, ))