Beispiel #1
0
def check_steppingstone(page):
    """
    checks wether Webpage <page> is a cover page for a single paper,
    as repositories, journals etc. often have; if yes, returns the url
    of the actual paper.
    """

    debug(3, "checking: intermediate page leading to article?")

    # steppingstone pages from known repositories:
    redir_patterns = [
        # arxiv.org, springer.com, researchgate, etc.:
        (re.compile('<meta name="citation_pdf_url" content="(.+?)"'),
        (lambda m: page.make_absolute(requests.utils.unquote(m.group(1))))),
        # philpapers.org:
        (re.compile('class=\'outLink\' href="http://philpapers.org/go.pl[^"]+u=(http.+?)"'),
        (lambda m: page.make_absolute(requests.utils.unquote(m.group(1))))),
        # philsci-archive.pitt.edu:
        (re.compile('<meta name="eprints.document_url" content="(.+?)"'),
        (lambda m: page.make_absolute(requests.utils.unquote(m.group(1))))),
        # sciencedirect.com:
        (re.compile('pdfurl="(.+?)"'),
        (lambda m: page.make_absolute(requests.utils.unquote(m.group(1))))),
        # PLOSOne:
        (re.compile('(http://www.plosone.org/article/.+?representation=PDF)" id="downloadPdf"'),
        (lambda m: page.make_absolute(requests.utils.unquote(m.group(1))))),
        # Google Drive:
        (re.compile('content="https://drive.google.com/file/d/(.+?)/'),
        (lambda m: 'https://googledrive.com/host/{}'.format(requests.utils.unquote(m.group(1)))))
    ]
    for (pattern, retr_target) in redir_patterns:
        m = pattern.search(page.html)
        if m:
            target = util.normalize_url(retr_target(m))
            if target == page.url:
                return None
            debug(3, "yes: repository page for %s", target)
            return target
    
    # other steppingstone pages must have link(s) to a single pdf file:
    targets = set(u for u in page.xpath('//a/@href') if re.search('.pdf$', u, re.I))
    if len(targets) != 1:
        debug(3, "no: %s links to pdf files", len(targets))
        return None
    target = targets.pop()
    debug(3, "yes: single link to pdf file %s", target)
    target = util.normalize_url(page.make_absolute(target))
    return target
Beispiel #2
0
 def find_new_pages(self, name):
     """searches for papers pages matching author name, returns urls of new pages"""
     logger.info("\nsearching papers page(s) for %s", name)
     pages = set()
     search_terms = [
         # careful with google.com: don't block sites.google.com...
         '-site:academia.edu',
         '-site:wikipedia.org',
         '-site:philpapers.org',
         '-filetype:pdf',
         '~philosophy',
         '(publications OR articles OR papers OR "in progress" OR forthcoming)',
     ]
     # search full name first, then last name only:
     search_phrase = '"{}" '.format(name) + ' '.join(search_terms)
     searchresults = set(googlesearch.search(search_phrase))
     search_phrase = '"{}" '.format(name.split()[-1]) + ' '.join(search_terms)
     searchresults |= set(googlesearch.search(search_phrase))
     for url in searchresults:
         logger.debug("\n")
         url = util.normalize_url(url) 
         if self.bad_url(url):
             logger.info("bad url: %s", url)
             continue
         # check if url already known:
         cur = db.cursor()
         cur.execute("SELECT 1 FROM sources WHERE url = %s", (url,))
         rows = cur.fetchall()
         if rows:
             logger.info("%s already known", url)
             continue
         try:
             status, r = util.request_url(url)
             if status != 200:
                 raise Exception('status {}'.format(status))
         except:
             logger.info("cannot retrieve %s", url)
         else:
             score = self.evaluate(r, name)
             if score < 0.7:
                 logger.info("%s doesn't look like a papers page", url)
                 continue
             dupe = self.is_duplicate(url)
             if dupe:
                 logger.info("%s is a duplicate of already known %s", url, dupe)
                 continue
             logger.info("new papers page for %s: %s", name, url)                
             pages.add(url)
     if not pages:
         logger.info("no pages found")
     self.update_author(name)
     return pages
Beispiel #3
0
def process_link(li, force_reprocess=False, redir_url=None, keep_tempfiles=False,
                 recurse=0):
    """
    Fetch url, check for http errors and steppingstones, filter spam,
    parse candidate papers, check for duplicates, check if published
    before last year.

    Links often lead to intermediate pages (e.g. on repositories) with
    another link to the actual paper. In this case, we only store the
    original link in the 'links' table, so the 'doc' entry has a url
    that doesn't match any link. To process the new link, process_link
    is called again, with redir_url set to the new url and recurse +=
    1.

    If force_reprocess is False and the link has already been checked
    at some point, if_modified_since and etag headers are sent.
    """

    # ignore links to old and published papers:
    li.context = li.html_context()
    debug(2, "link context: %s", li.context)
    if context_suggests_published(li.context):
        return li.update_db(status=1, doc_id=None)
    
    # fetch url and handle errors, redirects, etc.:
    url = redir_url or li.url
    r = li.fetch(url=url, only_if_modified=not(force_reprocess))
    # note: li.fetch() updates the link entry in case of errors
    if not r:
        return 0
        
    if r.url != url: # redirected
        url = util.normalize_url(r.url)
        # now we treat li as if it directly led to the redirected document

    if r.filetype not in ('html', 'pdf', 'doc', 'rtf'):
        li.update_db(status=error.code['unsupported filetype'])
        return debug(1, "unsupported filetype: %s", r.filetype)

    doc = Doc(url=url, r=r, link=li, source=li.source)
    
    if doc.load_from_db() and not force_reprocess:
        li.update_db(status=1, doc_id=doc.doc_id)
        return debug(1, "%s is already in docs table", url)
    
    if r.filetype == 'html':
        r.encoding = 'utf-8'
        try:
            doc.page = Webpage(url, html=r.text)
        except UnparsableHTMLException:
            li.update_db(status=error.code['unsupported filetype'])
            return debug(1, "unparsable html")

        debug(6, "\n====== %s ======\n%s\n======\n", url, r.text)

        # check for steppingstone pages with link to a paper:
        target_url = check_steppingstone(doc.page)
        if target_url and recurse < 3:
            debug(1, "steppingstone to %s", target_url)
            return process_link(li, redir_url=target_url, 
                                force_reprocess=force_reprocess, recurse=recurse+1)

        # Genuine papers are almost never in HTML format, and almost
        # every HTML page is not a paper. The few exceptions (such as
        # entries on SEP) tend to require special parsing. Hence the
        # following special treatment. If people start posting
        # articles on medium or in plain HTML, we might return to the
        # old procedure of converting the page to pdf and treating it
        # like any candidate paper.
        from .docparser import webpageparser as htmlparser
        if not htmlparser.parse(doc):
            debug(1, "page ignored")
            li.update_db(status=1)
            return 0

    else:
        try:
            doc.tempfile = save_local(r)
        except:
            return li.update_db(status=error.code['cannot save local file'])
        try:
            process_file(doc, keep_tempfiles=keep_tempfiles)
        except Exception as e:
            debug(1, 'could not process %s: %s', doc.tempfile, e)
            return li.update_db(status=error.code.get(str(e), 10))
            
    # estimate whether doc is a handout, cv etc.:
    from .doctyper import paperfilter
    paperprob = paperfilter.evaluate(doc)
    doc.is_paper = int(paperprob * 100)
    if doc.is_paper < 25:
        li.update_db(status=1)
        debug(1, "spam: paper score %s < 50", doc.is_paper)
        return 0
        
    # estimate whether doc is on philosophy:
    from .doctyper import classifier
    philosophyfilter = classifier.get_classifier('philosophy')
    try:
        doc.is_philosophy = int(philosophyfilter.classify(doc) * 100)
    except UntrainedClassifierException as e:
        doc.is_philosophy = 90
    if doc.is_philosophy < 25:
        li.update_db(status=1)
        debug(1, "spam: philosophy score %s < 50", doc.is_philosophy)
        return 0
        
    if li.doc_id:
        # check for revisions:
        olddoc = Doc(doc_id=li.doc_id)
        olddoc.load_from_db()
        if doc.content != olddoc.content:
            sm = SequenceMatcher(None, doc.content, olddoc.content)
            match_ratio = sm.ratio()
            if match_ratio < 0.8:
                debug(1, "substantive revisions, ratio %s", match_ratio)
                doc.earlier_id = olddoc.doc_id
        if not doc.earlier_id:
            li.update_db(status=1)
            debug(1, "no substantive revisions")
            return 0
    
    else:
        # check for duplicates:
        dupe = get_duplicate(doc)
        if dupe:
            debug(1, "duplicate of document %s", dupe.doc_id)
            li.update_db(status=1, doc_id=dupe.doc_id)
            return 0
    
        # ignore old and published paper:
        if paper_is_old(doc):
            li.update_db(status=1, doc_id=None)
            debug(1, "ignoring already published paper")
            return 0

        # flag for manual approval if confidence low or dubious relevance:
        if doc.is_paper < 60 or doc.is_philosophy < 60 or doc.meta_confidence < 60:
            debug(1, "flagging for manual approval")
            doc.hidden = True

        # don't show papers (incl HTML pages) from newly added source
        # pages in news feed:
        if doc.source.status == 0:
            debug(2, "new source page: setting found_date to 1970")
            doc.found_date = datetime(1970, 1, 1)
        
    doc.update_db()
    li.update_db(status=1, doc_id=doc.doc_id)

    # categorize, but only if doc has more than 1000 words --
    # otherwise categorization is pretty random:
    if doc.numwords > 700:
        for (cat_id, cat) in categories():
            clf = classifier.get_classifier(cat)
            try:
                strength = int(clf.classify(doc) * 100)
                debug(3, "%s score %s", cat, strength)
            except UntrainedClassifierException as e:
                continue 
            doc.assign_category(cat_id, strength)

    return 1