Beispiel #1
0
def do_process_story_webpage(
        ctx: ActorContext,
        story_id: T.int,
        url: T.url,
        text: T.str.maxlen(5 * 1024 * 1024),
):
    # https://github.com/dragnet-org/dragnet
    # https://github.com/misja/python-boilerpipe
    # https://github.com/dalab/web2text
    # https://github.com/grangier/python-goose
    # https://github.com/buriy/python-readability
    # https://github.com/codelucas/newspaper
    text = text.strip()
    if not text:
        return
    content = story_readability(text)
    content = process_story_links(content, url)
    summary = shorten(story_html_to_text(content), width=300)
    if not summary:
        return
    ctx.hope(
        'harbor_rss.update_story',
        dict(
            story_id=story_id,
            content=content,
            summary=summary,
            url=url,
        ))
Beispiel #2
0
def _clean_story_html(text, *, readability=False):
    text = story_html_clean(text)
    if readability:
        text = story_readability(text)
    url = 'https://rss.anyant.com/'
    text = process_story_links(text, url)
    return text
Beispiel #3
0
def process_story_links(storys=None):
    story_ids = _get_story_ids(storys)
    LOG.info('total %s storys', len(story_ids))
    for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
        with transaction.atomic():
            story = Story.objects.only('id', 'content', '_version').get(pk=story_id)
            content = processor.process_story_links(story.content, story.link)
            if story.content != content:
                story.content = content
                story.save()
Beispiel #4
0
def do_process_story_webpage(
    ctx: ActorContext,
    feed_id: T.int,
    offset: T.int,
    url: T.url,
    text: T.str.maxlen(_MAX_STORY_HTML_LENGTH),
    num_sub_sentences: T.int.optional,
) -> SCHEMA_FETCH_STORY_RESULT:
    # https://github.com/dragnet-org/dragnet
    # https://github.com/misja/python-boilerpipe
    # https://github.com/dalab/web2text
    # https://github.com/grangier/python-goose
    # https://github.com/buriy/python-readability
    # https://github.com/codelucas/newspaper
    DEFAULT_RESULT = dict(feed_id=feed_id, offset=offset, url=url)
    text = text.strip()
    if not text:
        return DEFAULT_RESULT
    text = story_html_clean(text)
    content = story_readability(text)
    content = process_story_links(content, url)
    content_info = StoryContentInfo(content)
    text_content = shorten(content_info.text, width=_MAX_STORY_CONTENT_LENGTH)
    num_sentences = len(split_sentences(text_content))
    if len(content) > _MAX_STORY_CONTENT_LENGTH:
        msg = 'too large story#%s,%s size=%s url=%r, will only save plain text'
        LOG.warning(msg, feed_id, offset, len(content), url)
        content = text_content
    # 如果取回的内容比RSS内容更短,就不是正确的全文
    if num_sub_sentences is not None:
        if not is_fulltext_content(content_info):
            if num_sentences <= num_sub_sentences:
                msg = 'fetched story#%s,%s url=%s num_sentences=%s less than num_sub_sentences=%s'
                LOG.info(msg, feed_id, offset, url, num_sentences,
                         num_sub_sentences)
                return DEFAULT_RESULT
    summary = shorten(text_content, width=_MAX_STORY_SUMMARY_LENGTH)
    if not summary:
        return DEFAULT_RESULT
    result = dict(
        **DEFAULT_RESULT,
        content=content,
        summary=summary,
        sentence_count=num_sentences,
    )
    if not ctx.message.is_ask:
        ctx.hope('harbor_rss.update_story', result)
    return result
Beispiel #5
0
def _get_storys(entries: list):
    storys = deque(maxlen=300)  # limit num storys
    while entries:
        data = entries.pop()
        story = {}
        content = ''
        if data["content"]:
            # both content and summary will in content list, peek the longest
            for x in data["content"]:
                value = x["value"]
                if value and len(value) > len(content):
                    content = value
        if not content:
            content = data["description"]
        if not content:
            content = data["summary"]
        story['has_mathjax'] = story_has_mathjax(content)
        link = normlize_url(data["link"])
        valid_link = ''
        if link:
            try:
                valid_link = validate_url(link)
            except Invalid:
                LOG.warning(f'invalid story link {link!r}')
        story['link'] = valid_link
        content = story_html_clean(content)
        if len(content) >= 1024 * 1024:
            msg = 'too large story link=%r content length=%s, will only save plain text!'
            LOG.warning(msg, link, len(content))
            content = story_html_to_text(content)
        content = process_story_links(content, valid_link)
        story['content'] = content
        summary = data["summary"]
        if not summary:
            summary = content
        summary = shorten(story_html_to_text(summary), width=300)
        story['summary'] = summary
        title = shorten(data["title"] or link or summary, 200)
        unique_id = shorten(data['id'] or link or title, 200)
        content_hash_base64 = compute_hash_base64(content, summary, title)
        story['title'] = title
        story['content_hash_base64'] = content_hash_base64
        story['unique_id'] = unique_id
        story['author'] = shorten(data["author"], 200)
        story['dt_published'] = _get_dt_published(data)
        story['dt_updated'] = _get_dt_updated(data)
        storys.append(story)
    return list(storys)
Beispiel #6
0
def do_process_story_webpage(
    ctx: ActorContext,
    story_id: T.int,
    url: T.url,
    text: T.str.maxlen(_MAX_STORY_HTML_LENGTH),
    num_sub_sentences: T.int.optional,
):
    # https://github.com/dragnet-org/dragnet
    # https://github.com/misja/python-boilerpipe
    # https://github.com/dalab/web2text
    # https://github.com/grangier/python-goose
    # https://github.com/buriy/python-readability
    # https://github.com/codelucas/newspaper
    text = text.strip()
    if not text:
        return
    text = story_html_clean(text)
    content = story_readability(text)
    content = process_story_links(content, url)
    if len(content) > _MAX_STORY_CONTENT_LENGTH:
        msg = 'too large story#%s size=%s url=%r, will only save plain text'
        LOG.warning(msg, story_id, len(content), url)
        content = shorten(story_html_to_text(content),
                          width=_MAX_STORY_CONTENT_LENGTH)
    # 如果取回的内容比RSS内容更短,就不是正确的全文
    if num_sub_sentences is not None:
        if not is_fulltext_content(content):
            num_sentences = len(split_sentences(story_html_to_text(content)))
            if num_sentences <= num_sub_sentences:
                msg = 'fetched story#%s url=%s num_sentences=%s less than num_sub_sentences=%s'
                LOG.info(msg, story_id, url, num_sentences, num_sub_sentences)
                return
    summary = shorten(story_html_to_text(content),
                      width=_MAX_STORY_SUMMARY_LENGTH)
    if not summary:
        return
    ctx.hope(
        'harbor_rss.update_story',
        dict(
            story_id=story_id,
            content=content,
            summary=summary,
            url=url,
        ))
Beispiel #7
0
def _get_storys(entries: list):
    storys = deque(maxlen=300)  # limit num storys
    while entries:
        data = entries.pop()
        story = {}
        story['unique_id'] = shorten(_get_story_unique_id(data), 200)
        content = ''
        if data["content"]:
            # both content and summary will in content list, peek the longest
            for x in data["content"]:
                value = x["value"]
                if value and len(value) > len(content):
                    content = value
        if not content:
            content = data["description"]
        if not content:
            content = data["summary"]
        story['has_mathjax'] = story_has_mathjax(content)
        content = story_html_clean(content)
        content = process_story_links(content, data["link"])
        story['content'] = content
        summary = data["summary"]
        if not summary:
            summary = content
        # TODO: performance
        summary = shorten(story_html_to_text(summary), width=300)
        story['summary'] = summary
        story['link'] = data["link"]
        title = shorten(data["title"] or story['link'] or story['unique_id'], 200)
        content_hash_base64 = compute_hash_base64(content, summary, title)
        story['title'] = title
        story['content_hash_base64'] = content_hash_base64
        story['author'] = shorten(data["author"], 200)
        story['dt_published'] = _get_dt_published(data)
        story['dt_updated'] = _get_dt_updated(data)
        storys.append(story)
    return list(storys)