Beispiel #1
0
 def receive(self):
     if USE_TIMEOUT:
         import timeout
         stdout_read = timeout.TimeoutFunction(self.client.stdout.readline, 10)
         try:
             return stdout_read().strip()
         except timeout.TimeoutFunctionException:
             logging.error("Client taking too long to respond (%s)" % self.name)
             raise ClientErrorException(loser=self.name, message="Client taking too long to respond (%s)" % self.name)
     else:
         return self.client.stdout.readline().strip()
Beispiel #2
0
def fetchStory(url):
    siteDB = 'peepbuzz'
    infoModule.info.page['url'] = url
    log.plog("fetching " + url, 2)
    request_obj = urllib2.Request(url)
    request_obj.add_header('Referer', 'http://www.google.com/')
    request_obj.add_header(
        'User-agent',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)'
    )
    try:
        websock = urllib2.urlopen(request_obj)
    except IOError:
        log.plog('could not open ' + url, 4)
        return failOn('could not open ' + url)
    responseCode = websock.getcode()
    headerInfo = websock.info()
    pprint.pprint(headerInfo)
    log.plog('urllib2 response code: ' + str(responseCode), 2)
    if responseCode != 200 and responseCode != 302 and responseCode != 301 and responseCode != 303:
        log.plog('got failure response code from server', 4)
        return failOn('got failure response code from server')
    contentType = headerInfo.gettype()
    if contentType != 'text/html' and contentType != 'text/html, text/html':
        log.plog('content type: ' + contentType + '. not fetching', 4)
        return failOn('content type: ' + contentType + '. not fetching')

    readWithTimeout = timeout.TimeoutFunction(websock.read, 5)
    #infoModule.info.page['rawHTML'] = websock.read()
    try:
        infoModule.info.page['rawHTML'] = readWithTimeout()
    except timeout.TimeoutFunctionException:
        log.plog("timeout while trying to fetch " + url, 101)
        return failOn('read timeout ' + url)
    redirURL = websock.geturl()
    if redirURL != url:
        log.plog('redirected to ' + redirURL, 2)
        url = redirURL
        #redirected urls need to be blocked too

    if len(infoModule.info.page['rawHTML']) > 500000:
        log.plog("article length exceeds 500k, probably not html", 2)
        return failOn('article length exceeds 500k, probably not html')

    windows_trouble_list = [u'\x93', u'\x92', u'\x91', u'\x96', u'\x94']
    cd = chardet.detect(infoModule.info.page['rawHTML'])
    if cd['encoding'] != 'ascii':
        log.plog('Server encoding: ' + cd['encoding'], 2)
        oldHTML = infoModule.info.page['rawHTML']
        infoModule.info.page['rawHTML'] = infoModule.info.page[
            'rawHTML'].decode(cd['encoding'])
        windows_chars_in_html = [
            trouble for trouble in windows_trouble_list
            if infoModule.info.page['rawHTML'].find(trouble) >= 0
        ]
        if len(windows_chars_in_html) > 0:
            #windows = infoModule.info.page['rawHTML'].find(u'\x93')
            log.plog('this is actually windows-1252', 3)
            infoModule.info.page['rawHTML'] = oldHTML.decode('windows-1252')

    # some configuration options
    infoModule.info.page['meta_description'] = ''
    meta_search = re.search(
        'meta name="description" content="(.*?\s+.*?\s+.*?\s+.*?\s+).*?"',
        infoModule.info.page['rawHTML'], re.I | re.S)
    if meta_search != None:
        infoModule.info.page['meta_description'] = meta_search.group(1)
        log.plog(
            "meta_description: " + infoModule.info.page['meta_description'], 2)

    log.plog(
        '======================================= TITLE ================================',
        2)
    # get title
    #set HTMLTitle first

    HTMLTitle = re.search('<title>(.*?)<\/title>',
                          infoModule.info.page['rawHTML'], re.S | re.I)
    if HTMLTitle != None:
        infoModule.info.page['HTMLTitle'] = HTMLTitle.group(1)
        log.plog('html title found: ' + infoModule.info.page['HTMLTitle'], 2)
    else:
        infoModule.info.page['HTMLTitle'] = ""
    title = find_title.findTitle()
    if title != False:
        infoModule.info.page['title'] = title
        log.plog('title from regex', 2)
    if 'potential_title' in infoModule.info.page and len(
            infoModule.info.page['potential_title']) > 0:
        infoModule.info.page['title'] = strip_html.clearHTML(
            infoModule.info.page['potential_title'])
        log.plog('title from potential_title', 2)
    else:
        infoModule.info.page['title'] = real_title2.realTitle()
        if infoModule.info.page['title'] == False:
            infoModule.info.page['title'] = infoModule.info.page['HTMLTitle']
            log.plog('using html title', 2)
        else:
            log.plog('title from realTitle', 2)

    if infoModule.info.page['title'] == '':
        log.plog('could not find title for page. Setting to HTML Title', 4)
        infoModule.info.page['title'] = infoModule.info.page['HTMLTitle']

    #clear html from title
    infoModule.info.page['title'] = strip_html.clearHTML(
        infoModule.info.page['title'])
    #also titleCase the title
    #infoModule.info.page['title'] = infoModule.info.page['title'].title()
    log.plog('final title: ' + infoModule.info.page['title'], 2)

    #cd = chardet.detect(infoModule.info.page['title'])
    #if cd['encoding'] != 'ascii':
    #    log.plog('title encoding: ' + cd['encoding'], 2)
    #    oldTitle = infoModule.info.page['title']
    #    infoModule.info.page['title'] = infoModule.info.page['title'].decode(cd['encoding'])
    #    windows_chars_in_html = [trouble for trouble in windows_trouble_list if infoModule.info.page['title'].find(trouble) >= 0]
    #    if len(windows_chars_in_html) > 0:
    #        #windows = infoModule.info.page['rawHTML'].find(u'\x93')
    #        log.plog('title is actually windows-1252', 3)
    #        infoModule.info.page['title'] = oldTitle.decode('windows-1252')

    log.plog(
        '======================================= OUTLINE ================================',
        2)
    ## fetch outline
    #remove special case elements from the html.  These are lines or blocks of code that cause
    #problems if left in
    infoModule.info.page['plainText'] = strip_html.removeSpecialCases(
        infoModule.info.page['rawHTML'])
    infoModule.info.page['plainText'] = strip_html.clearHTML(
        infoModule.info.page['plainText'])
    #clearHTML can't take out title, because title gets passed to clearHTML, but it should be removed here
    infoModule.info.page['plainText'] = re.sub(
        '<title.*?</title.*?>', '', infoModule.info.page['plainText'], 0,
        re.I | re.S | re.M)
    outline = False
    #this toggle allows for ignoring regex in favor of body_extractor

    log.plog('searching for body using body extractor', 2)
    infoModule.info.site['body_extractor_no_date'] = True
    outline = body_extractor.extract(infoModule.info.page['plainText'],
                                     doAsciiConvert=False)
    infoModule.info.page['imageHTML'] = infoModule.info.page['rawHTML']
    if outline != None:
        abbreviatedHTML = html_body_extractor.html_body_extractor(
            infoModule.info.page['rawHTML'], outline)
        if abbreviatedHTML != None:
            infoModule.info.page['rawHTML'] = abbreviatedHTML
        infoModule.info.page['outline'] = outline
        #use largestBlock to strip leading dom elements off that seem extraneous
        infoModule.info.page['outline'] = largestBlock.removePreceedingBlocks(
            infoModule.info.page['imageHTML'], infoModule.info.page['outline'])
    else:
        log.plog('could not create an outline for this story!', 5)
        infoModule.info.page['outline'] = ''

        #return failOn('could not create an outline for this story!')

    # outline must be at least minOutlineLen
    minOutlineLen = 255
    if len(infoModule.info.page['outline']) > 0 and len(
            infoModule.info.page['outline']) < minOutlineLen:
        log.plog('outline too short, assuming failure', 3)
        infoModule.info.page['outline'] = ''

    log.plog(
        '======================================= IMAGES ================================',
        2)
    #find images
    image_start_marker = ''
    image_end_marker = ''
    imageArray = find_all_images.findImages(infoModule.info.page['imageHTML'],
                                            url)
    if imageArray == None:
        log.plog('could not find image', 3)
        imageArray = ''

    log.plog(
        '======================================= VIDEOS ================================',
        2)
    ###look for videos
    allVideosJSON = find_all_videos.find_all_videos(
        infoModule.info.page['imageHTML'])

    allVideos = json.loads(allVideosJSON)
    if len(allVideos) > 0:
        log.plog('found video embed', 2)
        print allVideosJSON

    #if no outline and no images over x by y and no videos, then no story
    if infoModule.info.page['outline'] == '' and (
            imageArray == '' or imageArray == []) and allVideos == '':
        failOn('nothing found')

    #largest image if no outline must be at least 450 x 450 to make it an image page
    largestImageDimensions = 0
    largestImage = []
    for image in imageArray:
        if image['width'] * image['height'] > largestImageDimensions:
            largestImage = image
            largestImageDimensions = image['width'] * image['height']

    print largestImage
    minImageSize = 400
    if infoModule.info.page['outline'] == '' and allVideos == [] and (
            largestImage == [] or largestImage['width'] < minImageSize
            or largestImage['height'] < minImageSize):
        return (failOn(
            'no story or video found, and largest image less than min size'))

    status = 'OK'
    storyObj = {}
    storyObj['title'] = infoModule.info.page['title']

    storyObj['outline'] = unicodeMapper.clearCurlies(
        infoModule.info.page['outline'])
    storyObj['url'] = url
    storyObj['images'] = imageArray
    storyObj['videos'] = allVideos
    returnVal = {"status": status, "story": storyObj}
    output = json.dumps(returnVal)
    return output
Beispiel #3
0
def scanPage():
    siteDB = infoModule.info.site['database']

    if 'url' not in infoModule.info.page:
        log.plog('scan page called without url', 4)
        os._exit(0)

    urlBlockerQ = mysql_tools.mysqlQuery(
        "select * from " + siteDB + ".urlBlocker",
        infoModule.info.site['dblink'])
    while True:
        urlBlocker = urlBlockerQ.fetch_row(1, 1)
        if urlBlocker == ():
            break
        blockTest = re.search(urlBlocker[0]['regex'],
                              infoModule.info.page['url'])
        if blockTest != None:
            log.plog(
                'url ' + infoModule.info.page['url'] + " matches urlBlocker " +
                urlBlocker[0]['regex'], 2)
            os._exit(0)

    log.plog("fetching " + infoModule.info.page['url'], 2)
    try:
        socket = urllib.urlopen(infoModule.info.page['url'])
    except IOError:
        log.plog('could not open ' + infoModule.info.page['url'], 4)
        return False
    responseCode = socket.getcode()
    log.plog('urllib response code: ' + str(responseCode), 2)
    if responseCode != 200 and responseCode != 302 and responseCode != 301 and responseCode != 303:
        log.plog('got failure response code from server', 4)
        return False
    headerInfo = socket.info()
    contentType = headerInfo.gettype()
    if contentType != 'text/html' and contentType != 'text/html, text/html':
        log.plog('content type: ' + contentType + '. not fetching', 4)
        return False
    # put in to account for WSJ -dpg
    if re.search("wsj\.com", infoModule.info.page['url'], re.S | re.M | re.I):
        infoModule.info.page['rawHTML'] = wsjAuthHack(
            infoModule.info.page['url'])
    elif re.search("nytimes\.com", infoModule.info.page['url'],
                   re.S | re.M | re.I):
        infoModule.info.page['rawHTML'] = nytAuthHack(
            infoModule.info.page['url'])
    else:
        infoModule.info.page['rawHTML'] = socket.read()
    redirURL = socket.geturl()
    if redirURL != infoModule.info.page['url']:
        log.plog('redirected to ' + redirURL, 2)
        infoModule.info.page['url'] = redirURL
        #redirected urls need to be blocked too
        urlBlockerQ = mysql_tools.mysqlQuery(
            "select * from " + siteDB + ".urlBlocker",
            infoModule.info.site['dblink'])
        while True:
            urlBlocker = urlBlockerQ.fetch_row(1, 1)
            if urlBlocker == ():
                break
            blockTest = re.search(urlBlocker[0]['regex'],
                                  infoModule.info.page['url'])
            if blockTest != None:
                log.plog(
                    'url ' + infoModule.info.page['url'] +
                    " matches urlBlocker " + urlBlocker[0]['regex'], 2)
                os._exit(0)

        ### and short url needs to be blocked
        #do not read links that have only one string in them
        linkParts = urlparse.urlparse(infoModule.info.page['url'])
        shortPath = re.search('^/\w+/*$', linkParts[2])
        lp = linkParts[2]
        if shortPath != None:
            log.plog(
                "link excluded because it only has a short path of characters: %s"
                % linkParts[2], 2)
            os._exit(0)

    ## anything in htmlBlacklist?
    htmlBlacklistQ = mysql_tools.mysqlQuery(
        "select regex from " + siteDB + ".htmlBlacklist",
        infoModule.info.site['dblink'])
    while True:
        htmlBlacklist = htmlBlacklistQ.fetch_row(1, 1)
        if htmlBlacklist == ():
            break
        badSeedHTML = re.search(htmlBlacklist[0]['regex'],
                                infoModule.info.page['rawHTML'])
        if badSeedHTML != None:
            log.plog(
                'html matches htmlBlocker regex: ' + htmlBlacklist[0]['regex'],
                3)
            os._exit(0)

    ###################################
    #special case for feedburner sources
    #ernst does not like special cases
    ###################################
    infoModule.info.page['url'] = re.sub('\?.*utm_source.*$', '',
                                         infoModule.info.page['url'])

    #check AGAIN to see if url is already in system
    escURL = infoModule.info.page['url'].replace("'", "\\'")
    urlCheckQ = mysql_tools.mysqlQuery(
        "select sub_id from " + siteDB + ".newsroom where url='" + escURL +
        "'", infoModule.info.site['dblink'])
    #don't exit, return false so that a new story can be tried
    if urlCheckQ.num_rows() > 0:
        log.plog(
            "scanpage-url already in newsroom: %s" %
            infoModule.info.page['url'], 2)
        log.plog("newsroom_id: " + str(urlCheckQ.fetch_row(1, 1)))
        return False
    urlCheckQ = mysql_tools.mysqlQuery(
        "select sub_id from " + siteDB + ".subs where url='" + escURL + "'",
        infoModule.info.site['dblink'])
    if urlCheckQ.num_rows() > 0:

        log.plog(
            "scanpage-url already in subs: %s" % infoModule.info.page['url'],
            2)
        log.plog("sub_id: " + str(urlCheckQ.fetch_row(1, 1)))
        return False

    ## if source is '0', try to find source
    if infoModule.info.source['source_id'] == '0':
        sourceRegexQ = mysql_tools.mysqlQuery(
            "select * from " + siteDB + ".sources where url_regex != ''",
            infoModule.info.site['dblink'])
        while True:
            sourceRegex = sourceRegexQ.fetch_row(1, 1)
            if sourceRegex == ():
                break
            urlTest = re.search(sourceRegex[0]['url_regex'],
                                infoModule.info.page['url'])
            if urlTest != None:
                log.plog('found source via regex: ' + sourceRegex[0]['title'],
                         2)
                infoModule.info.source = sourceRegex[0]
                for i in infoModule.info.source.keys():
                    ## this is sort of hack-y, but stupid python returns None for null
                    if infoModule.info.source[i] == None:
                        infoModule.info.source[i] = ''

                break

    ## maybe check last modified header and don't get stories older than 7 days?
    '''possibleAgeInDays = dateGuesser.urlDateGuesser(infoModule.info.page['url'])
    if possibleAgeInDays != None:
        log.plog("age of story might be: " + str(possibleAgeInDays) + " based on " + infoModule.info.page['url'], 2)
        if int(possibleAgeInDays) > 5:
            log.plog("story is " + str(possibleAgeInDays) + " days old.  Not reading", 2)
            return False
'''
    if len(infoModule.info.page['rawHTML']) > 500000:
        log.plog("article length exceeds 500k, probably not html", 2)
        os._exit(0)

    #add meta description into the mix
    infoModule.info.page['meta_description'] = ''
    meta_search = re.search(
        'meta name="description" content="(.*?\s+.*?\s+.*?\s+.*?\s+).*?"',
        infoModule.info.page['rawHTML'], re.I | re.S)
    if meta_search != None:
        infoModule.info.page['meta_description'] = meta_search.group(1).decode(
            'utf-8')
        log.plog(
            "meta_description: " + infoModule.info.page['meta_description'], 2)

    log.plog(
        '======================================= TITLE ================================',
        2)
    # get title
    #set HTMLTitle first
    HTMLTitle = re.search('<title>(.*?)<\/title>',
                          infoModule.info.page['rawHTML'], re.S | re.I)
    if HTMLTitle != None:
        infoModule.info.page['HTMLTitle'] = HTMLTitle.group(1)
        log.plog('html title found: ' + infoModule.info.page['HTMLTitle'], 2)
    else:
        infoModule.info.page['HTMLTitle'] = ""
    title = find_title.findTitle()
    if title != False:
        infoModule.info.page['title'] = title
        log.plog('title from regex', 2)
    if 'potential_title' in infoModule.info.page and len(
            infoModule.info.page['potential_title']) > 0:
        infoModule.info.page['title'] = strip_html.clearHTML(
            infoModule.info.page['potential_title'])
        log.plog('title from potential_title', 2)
    else:
        infoModule.info.page['title'] = real_title2.realTitle()
        if infoModule.info.page['title'] == False:
            infoModule.info.page['title'] = infoModule.info.page['HTMLTitle']
            log.plog('using html title', 2)
        else:
            log.plog('title from realTitle', 2)

    if infoModule.info.page['title'] == '':
        log.plog('could not find title for page. Setting to HTML Title', 4)
        infoModule.info.page['title'] = infoModule.info.page['HTMLTitle']

    #clear html from title
    infoModule.info.page['title'] = strip_html.clearHTML(
        infoModule.info.page['title'])
    #also titleCase the title
    #infoModule.info.page['title'] = infoModule.info.page['title'].title()
    log.plog('final title: ' + infoModule.info.page['title'], 2)

    log.plog(
        '======================================= OUTLINE ================================',
        2)
    ## fetch outline
    if 'featured_source' in infoModule.info.source and infoModule.info.source[
            'featured_source'] == '1':
        infoModule.info.page['plainText'] = strip_html.clearHTMLFeatures(
            infoModule.info.page['rawHTML'])
    else:
        infoModule.info.page['plainText'] = strip_html.clearHTML(
            infoModule.info.page['rawHTML'])

    outline = False
    #this toggle allows for ignoring regex in favor of body_extractor
    if infoModule.info.site['skipBodyRegex'] == False:
        storySearch = timeout.TimeoutFunction(find_story.findStoryViaRegex, 2)
        try:
            outline = storySearch()
            #set html block used for imaage, author and links to be what outline returns
            if outline != False:
                infoModule.info.page['imageHTML'] = infoModule.info.page[
                    'rawHTML']
                infoModule.info.page['rawHTML'] = outline
        except TimeoutFunctionException:
            outline = False
            log.plog(
                "ERROR regex timed out for %s" %
                infoModule.info.source['story_start_marker'], 5)

    #outline = find_story.findStoryViaRegex()
    if outline != False:
        if infoModule.info.page['promoter'] == '0' and infoModule.info.source[
                'source_id'] != '0' and 'source_format' in infoModule.info.source and len(
                    infoModule.info.source['source_format']) > 0:
            #link scoring only happens on rss feeds
            ## parse links in page only in regex block if we have regex
            log.plog(
                '======================================= LINK SCORING ================================',
                2)
            links.linkScoring(outline, 'subs')
            links.linkScoring(outline, 'newsroom')
            log.plog(
                '======================================= OUTBOUND LINKS ================================',
                2)
            #don't go more than one level deep on blind stories
            links.outboundLinks(outline)

        if 'featured_source' in infoModule.info.source and infoModule.info.source[
                'featured_source'] == '1':
            infoModule.info.page['outline'] = strip_html.clearHTMLFeatures(
                outline)
        else:
            infoModule.info.page['outline'] = strip_html.clearHTML(outline)
    else:
        log.plog('searching for body using body extractor', 2)
        outline = body_extractor.extract(infoModule.info.page['plainText'])
        if outline != False:
            infoModule.info.page['imageHTML'] = infoModule.info.page['rawHTML']
            abbreviatedHTML = html_body_extractor.html_body_extractor(
                infoModule.info.page['rawHTML'], outline)
            if abbreviatedHTML != None:
                infoModule.info.page['rawHTML'] = abbreviatedHTML
            infoModule.info.page['outline'] = outline
        else:
            log.plog('could not create an outline for this story!', 5)
            os._exit(0)
        ## parse links in page - no regex, so look in rawHTML for links
        ## if there are widgetBlockers, first clear them from the html
        linkHTML = infoModule.info.page['rawHTML']
        widgetBlacklistQ = mysql_tools.mysqlQuery(
            "select * from " + siteDB + ".widgetBlacklist",
            infoModule.info.site['dblink'])
        while True:
            widgetBlacklist = widgetBlacklistQ.fetch_row(1, 1)
            if widgetBlacklist == ():
                break
            if isinstance(linkHTML, str) == False:
                log.plog('linkHTML is not string', 5)
                os._exit(0)
            wblMatch = re.search(
                widgetBlacklist[0]['start_text'] + '.*?' +
                widgetBlacklist[0]['end_text'], linkHTML, re.S | re.I)
            if wblMatch != None:
                log.plog(
                    "found widget blacklist for " +
                    widgetBlacklist[0]['start_text'] + '.*?' +
                    widgetBlacklist[0]['end_text'], 2)
                linkHTML = linkHTML.replace(wblMatch.group(0), '')
                mysql_tools.mysqlQuery(
                    "update " + siteDB +
                    ".widgetBlacklist set hits=hits+1 where widget_id=" +
                    widgetBlacklist[0]['widget_id'],
                    infoModule.info.site['dblink'])

        if infoModule.info.page['promoter'] == '0' and infoModule.info.source[
                'source_id'] != '0' and 'source_format' in infoModule.info.source and len(
                    infoModule.info.source['source_format']) > 0:
            #link scoring only happens on rss feeds
            log.plog(
                '======================================= LINK SCORING ================================',
                2)
            links.linkScoring(linkHTML, 'subs')
            links.linkScoring(linkHTML, 'newsroom')
            log.plog(
                '======================================= OUTBOUND LINKS ================================',
                2)
            #don't go more than one level deep on blind stories
            links.outboundLinks(linkHTML)

    log.plog(
        '======================================= IMAGES ================================',
        2)
    #find images
    if 'image_start_marker' in infoModule.info.source:
        image_start_marker = infoModule.info.source['image_start_marker']
    else:
        image_start_marker = ''

    if 'image_end_marker' in infoModule.info.source:
        image_end_marker = infoModule.info.source['image_end_marker']
    else:
        image_end_marker = ''
    imageArray = find_images.findImages(infoModule.info.page['imageHTML'],
                                        image_start_marker, image_end_marker)
    if imageArray == None:
        log.plog('could not find image', 3)
    else:
        x = imageArray[0]
        y = imageArray[1]
        imageURL = imageArray[2]

        if imageURL == '':
            log.plog('could not find image', 3)
        else:
            log.plog('image found: ' + imageURL, 2)
            infoModule.info.page['largestImage'] = imageURL
            infoModule.info.page['maxSize'] = x * y

    log.plog(
        '======================================= IMAGE CREDIT ================================',
        2)
    ## image credit if any
    infoModule.info.page['imageSource'] = ''
    if 'image_source_start_marker' in infoModule.info.source and 'image_source_end_marker' in infoModule.info.source:
        imageSource = find_credit.findCredit(
            infoModule.info.page['rawHTML'],
            infoModule.info.source['image_source_start_marker'],
            infoModule.info.source['image_source_end_marker'])
        if imageSource != False:
            infoModule.info.page['imageSource'] = imageSource

    log.plog(
        '======================================= VIDEOS ================================',
        2)
    ###look for videos
    videoLink = find_video.findVideoEmbed(infoModule.info.page['rawHTML'])

    if videoLink == False:
        infoModule.info.page['vlink'] = ''
    else:
        log.plog('found video embed', 2)
        infoModule.info.page['vlink'] = videoLink
        vthumb = find_video.findVideoThumb(videoLink)
        if vthumb == False:
            infoModule.info.page['vthumb'] = ''
        else:
            log.plog('found video thumb', 2)
            infoModule.info.page['vthumb'] = vthumb

    log.plog(
        '======================================= AUTHOR ================================',
        2)
    ##author in story?
    if 'author_start_marker' in infoModule.info.source and 'author_end_marker' in infoModule.info.source:
        author = find_author.findAuthor()
        if author != False:
            author = strip_html.clearHTML(author)
            infoModule.info.page['author'] = author
        else:
            infoModule.info.page['author'] = ''
    else:
        infoModule.info.page['author'] = ''

    log.plog(
        '======================================= ENTITIES ================================',
        2)
    #### find entities
    entities.entityFinder(
        infoModule.info.page['title'] + ' ' + infoModule.info.page['outline'],
        True)
    nickname = False
    while nickname is False:
        try:
            entities.nicknameFinder(
                infoModule.info.page['title'] + ' ' +
                infoModule.info.page['outline'], True)
            nickname = True
        except:
            pass
    ## test cityToTeam
    #cityToTeam.getRelevantEntity()

    entities.setPrimo()

    #### chop outline to 500 chars unless featured
    if 'featured_source' not in infoModule.info.source or infoModule.info.source[
            'featured_source'] == '0':
        infoModule.info.page[
            'outline'] = infoModule.info.page['outline'][0:500] + '...'

    if len(infoModule.info.entityList) < 1:
        log.plog("no entities found in story!", 5)
        os._exit(0)

    log.plog(
        '======================================= UNKNOWN ENTITIES ================================',
        2)
    ## any unknown entities?
    entityFixedString = infoModule.info.page[
        'title'] + ' ' + infoModule.info.page['outline']
    entityFixedString = entityFixedString.replace("'s", "")
    entityFixedString = re.sub('\W+', ' ', entityFixedString)

    find_new_entities.find_new_entities(entityFixedString)
    ## page must have at least one non-hidden entity
    invisibleTypesQuery = mysql_tools.mysqlQuery(
        "select mptype_id from db_topics.mptypes where visibility='invisible'",
        infoModule.info.site['dblink'])
    invisibleTypes = ''
    sep = ''
    while True:
        oneType = invisibleTypesQuery.fetch_row(1, 1)
        if oneType == ():
            break
        invisibleTypes = invisibleTypes + sep + oneType[0]['mptype_id']
        sep = ','

    sep = ''
    cclist = ''
    for eKey in infoModule.info.entityList.keys():
        cclist = cclist + sep + str(eKey)
        sep = ','

    sql = "select celeb_id from db_topics.celebs where celeb_id in (" + cclist + ") and mptype_id not in (" + invisibleTypes + ")"
    nonHiddenEntitiesQ = mysql_tools.mysqlQuery(sql,
                                                infoModule.info.site['dblink'])
    if nonHiddenEntitiesQ.num_rows() == 0:
        log.plog('no non-hidden entities found in story!', 4)
        os._exit(0)

    newSubId = addStory.addStory()
    if newSubId == False:
        log.plog('addStory failed', 5)

    else:
        log.plog("Story added.  New sub_id: " + str(newSubId), 2)

    os._exit(0)
Beispiel #4
0
def scanPage(step):
    if 'url' not in infoModule.info.page:
        log.plog('scan page called without url', 4)
        sys.exit()

    log.plog("fetching " + infoModule.info.page['url'], 2)
    socket = urllib.urlopen(infoModule.info.page['url'])
    infoModule.info.page['rawHTML'] = socket.read()
    redirURL = socket.geturl()
    if redirURL != infoModule.info.page['url']:
        log.plog('redirected to ' + redirURL, 2)
        infoModule.info.page['url'] = redirURL

    ## maybe check last modified header and don't get stories older than 7 days?

    if len(infoModule.info.page['rawHTML']) > 500000:
        log.plog("article length exceeds 500k, probably not html", 2)
        sys.exit()

    print infoModule.info.page['url']

    ## fetch outline
    if 'featured_source' in infoModule.info.source and infoModule.info.source[
            'featured_source'] == '1':
        infoModule.info.page['plainText'] = strip_html.clearHTMLFeatures(
            infoModule.info.page['rawHTML'])
    else:
        infoModule.info.page['plainText'] = strip_html.clearHTML(
            infoModule.info.page['rawHTML'])

    hit = False
    outline = False
    originalStep = step
    while hit == False:
        #pick out most popular regex
        sql = "select count(*) as common, story_start_marker, story_end_marker from " + siteDB + ".sources where story_start_marker != '' group by story_start_marker order by count(*) desc limit %d,1" % step
        regexQ = mysql_tools.mysqlQuery(sql, infoModule.info.site['dblink'])
        if regexQ == False:
            break
        if regexQ.num_rows() == 0:
            break
        regex = regexQ.fetch_row(1, 1)
        infoModule.info.source['story_start_marker'] = regex[0][
            'story_start_marker']
        infoModule.info.source['story_end_marker'] = regex[0][
            'story_end_marker']
        infoModule.info.source['story_end_marker'] = infoModule.info.source[
            'story_end_marker'].replace('\/', '/')
        infoModule.info.source['story_start_marker'] = infoModule.info.source[
            'story_start_marker'].replace('\/', '/')
        storySearch = timeout.TimeoutFunction(find_story.findStoryViaRegex, 2)
        try:
            outline = storySearch()
        except:
            outline = False

        if outline != False:
            hit = True
        step += 1
    if outline != False:
        startMarker = infoModule.info.source['story_start_marker'].replace(
            '<', '&lt;')
        endMarker = infoModule.info.source['story_end_marker'].replace(
            '<', '&lt;')
        if 'featured_source' in infoModule.info.source and infoModule.info.source[
                'featured_source'] == '1':
            infoModule.info.page['outline'] = strip_html.clearHTMLFeatures(
                outline)
        else:
            infoModule.info.page['outline'] = strip_html.clearHTML(outline)
        infoModule.info.page['outline'] = infoModule.info.page[
            'outline'].decode('utf-8')
        infoModule.info.page['outline'] = infoModule.info.page[
            'outline'].encode('ascii', 'xmlcharrefreplace')
        print str(step)
        print startMarker
        print endMarker
        print infoModule.info.page['outline']

    else:
        print "no match"
Beispiel #5
0
    def pdf_gen_list(self,
                     flist,
                     cssfile=CSSFILE,
                     timeoutSecs=None,
                     outdir_option=None,
                     data_root=None,
                     force=False,
                     nohtml=False,
                     savehtml=False,
                     debug=False,
                     htmlonly=False):
        if outdir_option is not None:
            if (outdir_option.lower().find('parallel') !=
                    -1) and not data_root:
                logging.error(
                    "To use parallel output dir in list mode, you must specify the data_root (--data_root=)"
                )
                raise OptionError
        completed = []
        timeouts = []
        errs = []
        skipped = []
        logger = logging.getLogger('OAC')
        self.starttime = datetime.datetime.now()
        if timeoutSecs and (int(timeoutSecs) != 0):
            convert_func = timeout.TimeoutFunction(self.xml_to_pdf,
                                                   timeoutSecs)
        else:
            logging.warn("RUNNING WITH NO TIMEOUT")
            convert_func = self.xml_to_pdf

        cssfile_orig = cssfile
        for fname in flist:
            cssfile = cssfile_orig
            if OAC_EADtoPDFGenerator.isNot_DC_or_METS_XML(fname):
                #If pdf exists and is newer than xml, do nothing?
                # may want to make an opt for this behavior
                tStart = datetime.datetime.now()
                (dirname, name) = os.path.split(fname)
                (name, ext) = os.path.splitext(name)
                outputdir = self.outpath(outdir_option, data_root, dirname)
                outfilename = name + '.pdf'
                outfile_path = os.path.join(outputdir, outfilename)
                logger.info("FILE: %s%s started at %s outputdir:%s]" %
                            (name, ext, tStart.strftime(STRFTIME), outputdir))
                #ext,
                #tStart.strftime(STRFTIME),
                #outputdir) )
                # where i need to check file stamps if not force
                # need function outputdir to become outputfpath
                # and check filestamp of input xml file against output pdf file
                if force or self.input_is_newer(fname, outfile_path):
                    self.numfileattempt += 1
                    msg = ''
                    status = 'WORKING'
                    #Check for deja vu font compatibility

                    dejavu_compat = test_file_against_font_coverage(
                        fname, 'dejavu')
                    if not dejavu_compat:
                        cssfile = u''.join(
                            (os.path.splitext(cssfile)[0], "-unifont",
                             os.path.splitext(cssfile)[1]))
                        logger.info("Using unifont for {0} -- css:{1}.".format(
                            fname, cssfile))
                    try:
                        html, result_post = convert_func(fname,
                                                         outputdir,
                                                         cssfile=cssfile,
                                                         nohtml=nohtml,
                                                         savehtml=savehtml,
                                                         htmlonly=htmlonly,
                                                         debug=debug)
                        self.numfilecomplete += 1
                        completed.append((fname, outfile_path))
                        status = 'SUCCESS'
                        if (debug or savehtml or nohtml):
                            #Save html string as file
                            htmlfilepath = os.path.join(
                                outputdir, (os.path.split(fname)[1] + '.html'))
                            logger.info("Saving html to file: " + htmlfilepath)
                            f = open(htmlfilepath, 'w')
                            try:
                                f.write(html)
                            finally:
                                f.close()
                    except timeout.TimeoutFunctionException:
                        status = 'TIMEOUT'
                        tFinish = datetime.datetime.now()
                        tDiff = tFinish - tStart
                        msg = "%s:: +++++TIMEOUT CONVERT FUNCTION TIMED OUT!----- Elapsed time:%s +++++ %s" % (
                            status, tDiff, fname)
                        logger.log(TIMEOUT_LOG_LEVEL, msg)
                        self.numtimeouts += 1
                        timeouts.append(fname)
                    except:
                        self.numerrs += 1
                        errs.append(fname)
                        status = 'ERROR'
                        msg = "%s:: Unknown Exception caught for file- +++++ %s\n\n" % (
                            status, fname)
                        logger.exception(msg)
                    # remove pdf is size is 0
                    if os.path.exists(outfile_path):
                        out_stat = os.stat(outfile_path)
                        if out_stat.st_size == 0:
                            os.remove(outfile_path)
                else:
                    status = 'SKIPPED'
                    skipped.append((fname, outfile_path))
                tFinish = datetime.datetime.now()
                tDiff = tFinish - tStart
                logger.info(
                    "[%s: IN FILE: %s -- OUTDIR:%s -- Elapsed time = %s]\n\n" %
                    (status, fname, outputdir, tDiff))
                self.timer = self.timer + tDiff
                if self.timer.seconds > 0:
                    self.timer = datetime.timedelta(0)
                    if len(flist) > 1:
                        logger.info("CURRENT RUNNING TIME FOR LIST:%s" %
                                    (tFinish - self.starttime))
                if len(flist) > 1:
                    logger.info(
                        "ATTEMPTS: %d; SUCCESS:%d; ERR:%d; TIMEOUTS:%d" %
                        (self.numfileattempt, self.numfilecomplete,
                         self.numerrs, self.numtimeouts))
        return completed, timeouts, errs, skipped