コード例 #1
0
def process_blogpost(doc):
    """
    retrieve post info, check if philosophical content, classify
    """
    debug(1, "processing new blog post from %s", doc.source_name)
    try:
        blogpostparser.parse(doc)
    except Exception as e:
        debug(1, "parser error %s", e)
        remove_from_db(doc)
        return 0
    if len(doc.content) < 500:
        debug(1, "content too short")
        remove_from_db(doc)
        return 0

    # estimate whether post is on philosophy:
    blogspamfilter = classifier.get_classifier('blogspam')
    try:
        doc.is_philosophy = 100 - int(blogspamfilter.classify(doc) * 100)
    except UntrainedClassifierException as e:
        doc.is_philosophy = 90
    if doc.is_philosophy < 25:
        debug(1, "spam: blogspam score %s > 75", 100 - doc.is_philosophy)
        remove_from_db(doc)
        return 0
        
    # flag for manual approval if dubious relevance:
    if doc.is_philosophy < 60:
        debug(1, "flagging for manual approval")
        doc.hidden = True

    # categorize:
    if doc.numwords > 700:
        for (cat_id, cat) in categories():
            clf = classifier.get_classifier(cat)
            try:
                strength = int(clf.classify(doc) * 100)
                debug(3, "%s score %s", cat, strength)
            except UntrainedClassifierException as e:
                continue 
            doc.assign_category(cat_id, strength)
   
    doc.found_date = datetime.now()
    doc.status = 1
    doc.update_db()
コード例 #2
0
def process_link(li, force_reprocess=False, redir_url=None, keep_tempfiles=False,
                 recurse=0):
    """
    Fetch url, check for http errors and steppingstones, filter spam,
    parse candidate papers, check for duplicates, check if published
    before last year.

    Links often lead to intermediate pages (e.g. on repositories) with
    another link to the actual paper. In this case, we only store the
    original link in the 'links' table, so the 'doc' entry has a url
    that doesn't match any link. To process the new link, process_link
    is called again, with redir_url set to the new url and recurse +=
    1.

    If force_reprocess is False and the link has already been checked
    at some point, if_modified_since and etag headers are sent.
    """

    # ignore links to old and published papers:
    li.context = li.html_context()
    debug(2, "link context: %s", li.context)
    if context_suggests_published(li.context):
        return li.update_db(status=1, doc_id=None)
    
    # fetch url and handle errors, redirects, etc.:
    url = redir_url or li.url
    r = li.fetch(url=url, only_if_modified=not(force_reprocess))
    # note: li.fetch() updates the link entry in case of errors
    if not r:
        return 0
        
    if r.url != url: # redirected
        url = util.normalize_url(r.url)
        # now we treat li as if it directly led to the redirected document

    if r.filetype not in ('html', 'pdf', 'doc', 'rtf'):
        li.update_db(status=error.code['unsupported filetype'])
        return debug(1, "unsupported filetype: %s", r.filetype)

    doc = Doc(url=url, r=r, link=li, source=li.source)
    
    if doc.load_from_db() and not force_reprocess:
        li.update_db(status=1, doc_id=doc.doc_id)
        return debug(1, "%s is already in docs table", url)
    
    if r.filetype == 'html':
        r.encoding = 'utf-8'
        try:
            doc.page = Webpage(url, html=r.text)
        except UnparsableHTMLException:
            li.update_db(status=error.code['unsupported filetype'])
            return debug(1, "unparsable html")

        debug(6, "\n====== %s ======\n%s\n======\n", url, r.text)

        # check for steppingstone pages with link to a paper:
        target_url = check_steppingstone(doc.page)
        if target_url and recurse < 3:
            debug(1, "steppingstone to %s", target_url)
            return process_link(li, redir_url=target_url, 
                                force_reprocess=force_reprocess, recurse=recurse+1)

        # Genuine papers are almost never in HTML format, and almost
        # every HTML page is not a paper. The few exceptions (such as
        # entries on SEP) tend to require special parsing. Hence the
        # following special treatment. If people start posting
        # articles on medium or in plain HTML, we might return to the
        # old procedure of converting the page to pdf and treating it
        # like any candidate paper.
        from .docparser import webpageparser as htmlparser
        if not htmlparser.parse(doc):
            debug(1, "page ignored")
            li.update_db(status=1)
            return 0

    else:
        try:
            doc.tempfile = save_local(r)
        except:
            return li.update_db(status=error.code['cannot save local file'])
        try:
            process_file(doc, keep_tempfiles=keep_tempfiles)
        except Exception as e:
            debug(1, 'could not process %s: %s', doc.tempfile, e)
            return li.update_db(status=error.code.get(str(e), 10))
            
    # estimate whether doc is a handout, cv etc.:
    from .doctyper import paperfilter
    paperprob = paperfilter.evaluate(doc)
    doc.is_paper = int(paperprob * 100)
    if doc.is_paper < 25:
        li.update_db(status=1)
        debug(1, "spam: paper score %s < 50", doc.is_paper)
        return 0
        
    # estimate whether doc is on philosophy:
    from .doctyper import classifier
    philosophyfilter = classifier.get_classifier('philosophy')
    try:
        doc.is_philosophy = int(philosophyfilter.classify(doc) * 100)
    except UntrainedClassifierException as e:
        doc.is_philosophy = 90
    if doc.is_philosophy < 25:
        li.update_db(status=1)
        debug(1, "spam: philosophy score %s < 50", doc.is_philosophy)
        return 0
        
    if li.doc_id:
        # check for revisions:
        olddoc = Doc(doc_id=li.doc_id)
        olddoc.load_from_db()
        if doc.content != olddoc.content:
            sm = SequenceMatcher(None, doc.content, olddoc.content)
            match_ratio = sm.ratio()
            if match_ratio < 0.8:
                debug(1, "substantive revisions, ratio %s", match_ratio)
                doc.earlier_id = olddoc.doc_id
        if not doc.earlier_id:
            li.update_db(status=1)
            debug(1, "no substantive revisions")
            return 0
    
    else:
        # check for duplicates:
        dupe = get_duplicate(doc)
        if dupe:
            debug(1, "duplicate of document %s", dupe.doc_id)
            li.update_db(status=1, doc_id=dupe.doc_id)
            return 0
    
        # ignore old and published paper:
        if paper_is_old(doc):
            li.update_db(status=1, doc_id=None)
            debug(1, "ignoring already published paper")
            return 0

        # flag for manual approval if confidence low or dubious relevance:
        if doc.is_paper < 60 or doc.is_philosophy < 60 or doc.meta_confidence < 60:
            debug(1, "flagging for manual approval")
            doc.hidden = True

        # don't show papers (incl HTML pages) from newly added source
        # pages in news feed:
        if doc.source.status == 0:
            debug(2, "new source page: setting found_date to 1970")
            doc.found_date = datetime(1970, 1, 1)
        
    doc.update_db()
    li.update_db(status=1, doc_id=doc.doc_id)

    # categorize, but only if doc has more than 1000 words --
    # otherwise categorization is pretty random:
    if doc.numwords > 700:
        for (cat_id, cat) in categories():
            clf = classifier.get_classifier(cat)
            try:
                strength = int(clf.classify(doc) * 100)
                debug(3, "%s score %s", cat, strength)
            except UntrainedClassifierException as e:
                continue 
            doc.assign_category(cat_id, strength)

    return 1