def test_split_short_sentences(): text = ('这是一句话。' '短句\n' 'short.' 'I like read RSS!' '。。。') sentences = split_sentences(text) assert len(sentences) == 2, sentences assert sentences[0] == '这是一句话' assert sentences[1] == 'I like read RSS' assert split_sentences('hello') == [] assert split_sentences('你好,世界') == []
def test_split_link_sentences(): text = ('这是一个 Google 的链接https://www.google.com和一个蚁阅的链接' 'https://rss.anyant.com/feed?id=1234。\n' 'I like read RSS!') sentences = split_sentences(text) assert len(sentences) == 3, sentences assert sentences[0] == '这是一个 Google 的链接' assert sentences[1] == '和一个蚁阅的链接' assert sentences[2] == 'I like read RSS'
def do_sync_story_fulltext( ctx: ActorContext, feed_id: T.int, offset: T.int, ) -> T.dict( feed_id=T.int, offset=T.int.min(0), use_proxy=T.bool, url=T.url, response_status=T.int, accept=T_ACCEPT, ): with log_django_context_metric('harbor_rss.sync_story_fulltext:read'): feed = Feed.get_by_pk(feed_id, detail='+use_proxy') story = STORY_SERVICE.get_by_offset(feed_id, offset, detail=True) assert story, f'story#{feed_id},{offset} not found' story_content_info = StoryContentInfo(story.content) num_sub_sentences = len(split_sentences(story_content_info.text)) ret = dict( feed_id=feed_id, offset=offset, url=story.link, use_proxy=feed.use_proxy, accept=FulltextAcceptStrategy.REJECT.value, ) try: result = ctx.ask( 'worker_rss.fetch_story', dict( url=story.link, use_proxy=feed.use_proxy, feed_id=feed_id, offset=offset, num_sub_sentences=num_sub_sentences, )) except _TIMEOUT_ERRORS as ex: LOG.error(f'Ask worker_rss.fetch_story timeout: {ex}') ret.update(response_status=FeedResponseStatus.CONNECTION_TIMEOUT) return ret else: ret.update( response_status=result['response_status'], use_proxy=result['use_proxy'], ) if not result['content']: return ret with log_django_context_metric('harbor_rss.sync_story_fulltext:write'): accept = _update_story( story=story, story_content_info=story_content_info, content=result['content'], summary=None, # not need update summary url=result['url'], sentence_count=result['sentence_count'], ) ret.update(accept=accept.value) return ret
def do_process_story_webpage( ctx: ActorContext, feed_id: T.int, offset: T.int, url: T.url, text: T.str.maxlen(_MAX_STORY_HTML_LENGTH), num_sub_sentences: T.int.optional, ) -> SCHEMA_FETCH_STORY_RESULT: # https://github.com/dragnet-org/dragnet # https://github.com/misja/python-boilerpipe # https://github.com/dalab/web2text # https://github.com/grangier/python-goose # https://github.com/buriy/python-readability # https://github.com/codelucas/newspaper DEFAULT_RESULT = dict(feed_id=feed_id, offset=offset, url=url) text = text.strip() if not text: return DEFAULT_RESULT text = story_html_clean(text) content = story_readability(text) content = process_story_links(content, url) content_info = StoryContentInfo(content) text_content = shorten(content_info.text, width=_MAX_STORY_CONTENT_LENGTH) num_sentences = len(split_sentences(text_content)) if len(content) > _MAX_STORY_CONTENT_LENGTH: msg = 'too large story#%s,%s size=%s url=%r, will only save plain text' LOG.warning(msg, feed_id, offset, len(content), url) content = text_content # 如果取回的内容比RSS内容更短,就不是正确的全文 if num_sub_sentences is not None: if not is_fulltext_content(content_info): if num_sentences <= num_sub_sentences: msg = 'fetched story#%s,%s url=%s num_sentences=%s less than num_sub_sentences=%s' LOG.info(msg, feed_id, offset, url, num_sentences, num_sub_sentences) return DEFAULT_RESULT summary = shorten(text_content, width=_MAX_STORY_SUMMARY_LENGTH) if not summary: return DEFAULT_RESULT result = dict( **DEFAULT_RESULT, content=content, summary=summary, sentence_count=num_sentences, ) if not ctx.message.is_ask: ctx.hope('harbor_rss.update_story', result) return result
def do_process_story_webpage( ctx: ActorContext, story_id: T.int, url: T.url, text: T.str.maxlen(_MAX_STORY_HTML_LENGTH), num_sub_sentences: T.int.optional, ): # https://github.com/dragnet-org/dragnet # https://github.com/misja/python-boilerpipe # https://github.com/dalab/web2text # https://github.com/grangier/python-goose # https://github.com/buriy/python-readability # https://github.com/codelucas/newspaper text = text.strip() if not text: return text = story_html_clean(text) content = story_readability(text) content = process_story_links(content, url) if len(content) > _MAX_STORY_CONTENT_LENGTH: msg = 'too large story#%s size=%s url=%r, will only save plain text' LOG.warning(msg, story_id, len(content), url) content = shorten(story_html_to_text(content), width=_MAX_STORY_CONTENT_LENGTH) # 如果取回的内容比RSS内容更短,就不是正确的全文 if num_sub_sentences is not None: if not is_fulltext_content(content): num_sentences = len(split_sentences(story_html_to_text(content))) if num_sentences <= num_sub_sentences: msg = 'fetched story#%s url=%s num_sentences=%s less than num_sub_sentences=%s' LOG.info(msg, story_id, url, num_sentences, num_sub_sentences) return summary = shorten(story_html_to_text(content), width=_MAX_STORY_SUMMARY_LENGTH) if not summary: return ctx.hope( 'harbor_rss.update_story', dict( story_id=story_id, content=content, summary=summary, url=url, ))
def test_split_number_sentences(): text = ('中国移动的号码是10086,短信价格是0.1元/条') sentences = split_sentences(text) assert len(sentences) == 2, sentences assert sentences[0] == '中国移动的号码是' assert sentences[1] == '短信价格是'
def test_split_sentences(): sentences = split_sentences(sentence_text) assert len(sentences) == 17, sentences
def _compute_sentence_count(content: str) -> int: return len(split_sentences(story_html_to_text(content)))
def do_update_feed( ctx: ActorContext, feed_id: T.int, feed: FeedSchema, is_refresh: T.bool.default(False), ): with transaction.atomic(): feed_dict = feed storys = feed_dict.pop('storys') feed = Feed.get_by_pk(feed_id) is_feed_url_changed = feed.url != feed_dict['url'] if is_feed_url_changed: target_feed = Feed.get_first_by_url(feed_dict['url']) # FIXME: feed merge 无法正确处理订阅重定向问题。 # 对于这种情况,暂时保留旧的订阅,以后再彻底解决。 # if target_feed: # LOG.info(f'merge feed#{feed.id} url={feed.url} into ' # f'feed#{target_feed.id} url={target_feed.url}') # target_feed.merge(feed) # return if target_feed: LOG.warning( f'FIXME: redirect feed#{feed.id} url={feed.url!r} into ' f'feed#{target_feed.id} url={target_feed.url!r}') feed_dict.pop('url') # only update dt_updated if has storys or feed fields updated is_feed_updated = bool(storys) for k, v in feed_dict.items(): if k == 'dt_updated': continue if (v != '' and v is not None) or k in {'warnings'}: old_v = getattr(feed, k, None) if v != old_v: is_feed_updated = True setattr(feed, k, v) now = timezone.now() now_sub_30d = now - timezone.timedelta(days=30) if is_feed_updated: # set dt_updated to now, not trust rss date feed.dt_updated = now feed.dt_checked = feed.dt_synced = now feed.reverse_url = reverse_url(feed.url) feed.status = FeedStatus.READY feed.save() # save storys, bulk_save_by_feed has standalone transaction for s in storys: if not s['dt_updated']: s['dt_updated'] = now if not s['dt_published']: # set dt_published to now - 30d to avoid these storys # take over mushroom page, i.e. Story.query_recent_by_user s['dt_published'] = now_sub_30d modified_storys = STORY_SERVICE.bulk_save_by_feed(feed.id, storys, is_refresh=is_refresh) LOG.info('feed#%s save storys total=%s num_modified=%s', feed.id, len(storys), len(modified_storys)) feed = Feed.get_by_pk(feed_id) is_freezed = feed.freeze_level is None or feed.freeze_level > 1 if modified_storys and is_freezed: Feed.unfreeze_by_id(feed_id) need_fetch_story = _is_feed_need_fetch_storys(feed, modified_storys) for story in modified_storys: if not story.link: continue if need_fetch_story and (not _is_fulltext_story(story)): text = processor.story_html_to_text(story.content) num_sub_sentences = len(split_sentences(text)) ctx.tell( 'worker_rss.fetch_story', dict( url=story.link, use_proxy=feed.use_proxy, feed_id=story.feed_id, offset=story.offset, num_sub_sentences=num_sub_sentences, ))
def do_update_feed( ctx: ActorContext, feed_id: T.int, feed: FeedSchema, is_refresh: T.bool.default(False), ): with transaction.atomic(): feed_dict = feed storys = feed_dict.pop('storys') feed = Feed.get_by_pk(feed_id) is_feed_url_changed = feed.url != feed_dict['url'] if is_feed_url_changed: target_feed = Feed.get_first_by_url(feed_dict['url']) if target_feed: LOG.info(f'merge feed#{feed.id} url={feed.url} into ' f'feed#{target_feed.id} url={target_feed.url}') target_feed.merge(feed) return # only update dt_updated if has storys or feed fields updated is_feed_updated = bool(storys) for k, v in feed_dict.items(): if k == 'dt_updated': continue if v != '' and v is not None: old_v = getattr(feed, k, None) if v != old_v: is_feed_updated = True setattr(feed, k, v) now = timezone.now() now_sub_30d = now - timezone.timedelta(days=30) if is_feed_updated: # set dt_updated to now, not trust rss date feed.dt_updated = now feed.dt_checked = feed.dt_synced = now feed.status = FeedStatus.READY feed.save() for s in storys: if not s['dt_updated']: s['dt_updated'] = now if not s['dt_published']: # set dt_published to now - 30d to avoid these storys # take over mushroom page, i.e. Story.query_recent_by_user s['dt_published'] = now_sub_30d modified_storys = Story.bulk_save_by_feed(feed.id, storys, is_refresh=is_refresh) LOG.info( 'feed#%s save storys total=%s num_modified=%s', feed.id, len(storys), len(modified_storys) ) feed.refresh_from_db() if modified_storys: feed.unfreeze() need_fetch_story = _is_feed_need_fetch_storys(feed, modified_storys) for story in modified_storys: if not story.link: continue if need_fetch_story and (not is_fulltext_story(story)): text = processor.story_html_to_text(story.content) num_sub_sentences = len(split_sentences(text)) ctx.tell('worker_rss.fetch_story', dict( url=story.link, use_proxy=feed.use_proxy, story_id=str(story.id), num_sub_sentences=num_sub_sentences, )) else: _detect_story_images(ctx, story)