def combine_children(websites, texts, classes, child_links, child_text):
    combined_sites = {'websites': [], 'texts': [], 'classes': []}
    sites_classes = zip(websites, classes)

    for site, cls in sites_classes:
        group = filter(lambda s: class_order(
            conversions.page_relation(s[0], False, site),
            'False') > 0, zip(websites, classes, texts)
        )

        try:
            min_tuple = min(group, key=lambda g: class_ranks[g[1]])
        except:
            import pdb
            pdb.set_trace()

        group_class = min_tuple[1] + 'Combined'
        group_site = min_tuple[0]
        group_children_text = []
        child_site_texts = zip(child_links, child_text)

        for child, ctext in child_site_texts:
            if conversions.child_or_equal_page(group_site, child, True):
                group_children_text.append(ctext)
                child_idx = child_links.index(child)
                child_links.pop(child_idx)
                child_text.pop(child_idx)
                child_site_texts.pop(child_idx)

        combined_sites['websites'].append(group_site)
        combined_sites['classes'].append(group_class)

        group_text = ' '.join(group_children_text)

        for g in group:
            site_idx = websites.index(g[0])
            websites.pop(site_idx)
            classes.pop(site_idx)
            sites_classes.pop(site_idx)
            texts.pop(site_idx)
        group_text += ' ' + ' '.join(map(lambda g: g[2], group))
        combined_sites['texts'].append(group_text)

    return (combined_sites['websites'],
            combined_sites['texts'],
            combined_sites['classes'])
def getlinks(candidate, webpage, state, district_type, district_name):
    district_type = district_type.replace('_',' ').strip()
    state = state_map[state.strip()]
    candidate, last, first = conversions.clean_name(candidate)
    candidate = '+'.join(candidate.split(' '))
    print candidate
    state = '+'.join(state.split(' '))
    district_type = '+'.join(district_type.split(' '))
    district_name = '+'.join(district_name.strip().split(' '))
    search_urls = []
    extra_children_searches = []
    precise_searches = []
    search_urls.append(u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}'.format(name=candidate, state=state))
    extra_children_searches.append(u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}+info'.format(name=candidate, state=state))
    extra_children_searches.append(u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}+sk=info'.format(name=candidate, state=state))
    precise_searches.append(u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}+campaign'.format(name=candidate, state=state))
    precise_searches.append(u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}+elect'.format(name=candidate, state=state))
    search_urls = [s.encode(chardet.detect(s.encode('utf-8'))['encoding']) for s in search_urls]
    extra_children_searches = [s.encode(chardet.detect(s.encode('utf-8'))['encoding']) for s in extra_children_searches]
    precise_searches = [s.encode(chardet.detect(s.encode('utf-8'))['encoding']) for s in precise_searches]
    old_webpage = webpage
    if webpage != 'www.gernensamples.com':
        webpage = conversions.get_redirect(webpage)
    #if webpage == '404' or webpage == 'ERROR':
        #raise Exception
    websites = []
    webpage_stripped = re.match(r'(?:https?://)?(?:www\.)?(?P<content>.+)',webpage).groupdict()['content'].rstrip('/')
    old_webpage_stripped = re.match(r'(?:https?://)?(?:www\.)?(?P<content>.+)',old_webpage).groupdict()['content'].rstrip('/')
    #TODO strip queries
    webpage_no_queries = ul.urlparse.urlparse(webpage)
    webpage_no_queries = re.match(r'(?:www\.)?(?P<content>.+)',webpage_no_queries.netloc + webpage_no_queries.path).groupdict()['content'].rstrip('/')
    old_webpage_no_queries = ul.urlparse.urlparse(old_webpage)
    old_webpage_no_queries = re.match(r'(?:www\.)?(?P<content>.+)',old_webpage_no_queries.netloc + old_webpage_no_queries.path).groupdict()['content'].rstrip('/')
    patt = re.compile(r'^https?://(?:www.)?{webpage}/?$'.format(webpage=webpage_stripped.lower()))
    old_patt = re.compile(r'^https?://(?:www.)?{webpage}/?$'.format(webpage=old_webpage_stripped.lower()))
    child_patt = re.compile(r'^https?://(?:www\.)?{webpage}.+'.format(webpage=webpage_no_queries.lower()))
    old_child_patt = re.compile(r'^https?://(?:www\.)?{webpage}.+'.format(webpage=old_webpage_no_queries.lower()))
    n = 4
    while True:
        results = map(lambda x: json.loads(requests.get(x).text),search_urls)
        if any(map(lambda r: r.has_key('error') and (r['error']['code'] == 403 or r['error']['code'] == 503),results)):
            print 'sleeping'
            time.sleep(n + random.randint(1,1000)/1000.)
            n = n*2
        elif any(map(lambda r: r.has_key('error'), results)):
            raise Exception(', '.join(map(lambda r: r['error']['message'], filter(lambda r: r.has_key('error'),results))))
        else:
            break
    n = 4
    while True:
        child_results = map(lambda x: json.loads(requests.get(x).text),extra_children_searches)
        if any(map(lambda r: r.has_key('error') and (r['error']['code'] == 403 or r['error']['code'] == 503),child_results)):
            print 'sleeping'
            time.sleep(n + random.randint(1,1000)/1000.)
            n = n*2
        elif any(map(lambda r: r.has_key('error'), child_results)):
            raise Exception(', '.join(map(lambda r: r['error']['message'], filter(lambda r: r.has_key('error'),child_results))))
        else:
            break
    n = 4
    while True:
        precise_results = map(lambda x: json.loads(requests.get(x).text),precise_searches)
        if any(map(lambda r: r.has_key('error') and (r['error']['code'] == 403 or r['error']['code'] == 503),precise_results)):
            print 'sleeping'
            time.sleep(n + random.randint(1,1000)/1000.)
            n = n*2
        elif any(map(lambda r: r.has_key('error'), precise_results)):
            raise Exception(', '.join(map(lambda r: r['error']['message'], filter(lambda r: r.has_key('error'),precise_results))))
        else:
            break

    if type(results) != list:
        print type(results)
        results = [results]
    real_results = [(r if r.has_key('items') else {'items':[]}) for r in results]
    results = real_results
    search_links = [[i['link'].lower() for i in r['items']] for r in results]
    search_text = [[u'{title} {link} {pagemap} {snippet}'.format(**convert_pagemap_dict(i)).lower().encode('utf-8') for i in r['items']] for r in results]
    for ri in range(len(search_links)):
        for si in range(len(search_links[ri])):
            for r in precise_results:
                if r.has_key('items'):
                    for i in r['items']:
                        if conversions.child_or_equal_page(search_links[ri][si], i['link'].lower(), True):
                            search_text[ri][si] += ' bipspecialappearsinprecise'
    child_links = [i['link'].lower() for r in child_results if r.has_key('items') for i in r['items']]
    child_text = [u'{title} {link} {pagemap} {snippet}'.format(**convert_pagemap_dict(i)).lower().encode('utf-8') for r in child_results if r.has_key('items') for i in r['items']]
    #search_text = [[u'{title} {link} {pagemap} {snippet}'.format(**i).lower().encode('utf-8') for i in r['items']] for r in results]
    search_class = [map(lambda s: conversions.page_relation(s, True, webpage,old_webpage),sl) for sl in search_links]
    #search_class = [map(lambda s: 'True' if patt.match(s) != None or old_patt.match(s) != None else ('Child' if child_patt.match(s) != None or old_child_patt.match(s) != None else 'False'),sl) for sl in search_links]
    #print search_text
    #TODO Clean up ssv code
    ssv = [any(map(patt.match,sl)) or any(map(old_patt.match,sl)) for sl in search_links]
    non_websites = [[i['link'] for i in r['items'] if webpage not in i['link']] for r in results]
    cs,ct,cc = zip(*[combine_children(search_links[i],search_text[i],search_class[i], child_links, child_text) for i in range(len(search_links))])
    print 'got there',len(results[0]['items'])
    return non_websites, ssv, webpage_stripped, search_links, search_text, [r['items'] for r in results], search_class, cs, ct, cc,child_links,child_text
def combine_children(websites, texts, classes, child_links, child_text):
    combined_sites = {'websites':[],'texts':[],'classes':[]}
    root_sites = []
    temp_root_sites = {}
    sites_classes = zip(websites, classes)
    for site,cls in sites_classes:
        group = filter(lambda s: class_order(conversions.page_relation(s[0],True,site),'False') > 0,zip(websites,classes,texts))
        try:
            min_tuple = min(group,key=lambda g:class_ranks[g[1]])
        except:
            import pdb;pdb.set_trace()
        group_class = min_tuple[1]+'Combined'
        group_site = min_tuple[0]
        group_children_text = []
        child_site_texts = zip(child_links, child_text)
        for child,ctext in child_site_texts:
            if conversions.child_or_equal_page(group_site, child, True):
                group_children_text.append(ctext)
                child_idx=child_links.index(child)
                child_links.pop(child_idx)
                child_text.pop(child_idx)
                child_site_texts.pop(child_idx)
        combined_sites['websites'].append(group_site)
        combined_sites['classes'].append(group_class)
        group_text = ' '.join(group_children_text)
        for g in group:
            site_idx = websites.index(g[0])
            websites.pop(site_idx)
            classes.pop(site_idx)
            sites_classes.pop(site_idx)
            texts.pop(site_idx)
        group_text += ' ' + ' '.join(map(lambda g:g[2],group))
        combined_sites['texts'].append(group_text)

    """
        actual_root = conversion.strip_queries(webpage)
        #webpage_no_queries = ul.urlparse.urlparse(webpage)
        #actual_root = webpage_no_queries.scheme + '://' + webpage_no_queries.netloc + webpage_no_queries.path
        webpage_no_queries = re.match(r'(?:www\.)?(?P<content>.+)',webpage_no_queries.netloc + webpage_no_queries.path).groupdict()['content'].rstrip('/')
        child_patt = re.compile(r'^https?://(?:www\.)?{webpage}.+'.format(webpage=webpage_no_queries.lower()))
        if not temp_root_site.has_key(actual_root) or class_order(cls,temp_root_site[actual_root]['class']) > 0:
            temp_root_sites[actual_root] = {'actual_root':actual_root,'child_patt':child_patt,'class':cls+'Combined'})
    for site_dict in temp_root_sites:
        if any(map(lambda rs['child_patt'].match(site_dict['actual_root']),temp_root_sites)):
            continue
        root_sites.append(site_dict)
        combined_sites[site_dict['actual_root']] = {'text':'','class':site_dict['class']}
        for child,ctext in zip(child_links, child_text):
            if site_dict['child_patt'].match(child):


    site_dict = defaultdict(lambda: {'children':set(),'has_parent':False,'text':''})
    for child,ctext in zip(child_links, child_text):
        site_dict[child]['text'] = ctext
    for site,text in zip(websites, texts):
        webpage_no_queries = ul.urlparse.urlparse(webpage)
        webpage_no_queries = re.match(r'(?:www\.)?(?P<content>.+)',webpage_no_queries.netloc + webpage_no_queries.path).groupdict()['content'].rstrip('/')
        child_patt = re.compile(r'^https?://(?:www\.)?{webpage}.+'.format(webpage=webpage_no_queries.lower()))
        site_dict[site]['text'] = text
        for s in websites:
            if s != site and child_patt.match(s):
                site_dict[site]['children'].add(s)
                site_dict[s]['has_parent'] = True
        for c in child_links:
            if c != site and child_patt.match(c):
                site_dict[site]['children'].add(c)
                site_dict[c]['has_parent'] = True
    combined_sites = {'websites':[],'texts':[],'classes':[]}
    def combine_text(site):
        if len(site_dict[site]['children']) > 0:
            ret_text = site_dict[site]['text']
            for c in site_dict[site]['children']:
                ret_text += ' ' + combine_text(c)
            return ret_text
        else:
            return site_dict[site]['text']
    for site,cl in zip(websites, classes):
        if not site_dict[site]['has_parent']:
            combined_sites['websites'].append(site)
            combined_sites['texts'].append(combine_text(site))
            combined_sites['classes'].append(cl+'Combined')
    """
    return combined_sites['websites'], combined_sites['texts'],combined_sites['classes']
示例#4
0
def combine_children(websites, texts, classes, child_links, child_text):
    """
    Combines child sites into a group under the main parent site,
    and returns a tuple consisting of the parent site, the parent class,
    and the combined text from all child sites.
    """
    combined_sites = {'websites': [], 'texts': [], 'classes': []}
    # root_sites = []
    # temp_root_sites = {}

    # Create tuples of each search link with its class
    # (i.e. parent, child, identical, no-match)
    sites_classes = zip(websites, classes)

    # Loop over each site and combine
    for site, cls in sites_classes:

        # This creates a group object that contains
        # a list of tuples for every search result
        # which is a child, parent, or equal of another search result
        # for this candidate
        group = filter(
            lambda s: class_order(
                conversions.page_relation(s[0], True, site), 'False'
            ) > 0,
            zip(websites, classes, texts)
        )

        # Get the lowest ranked classes, which at this point should be
        # parents
        try:
            min_tuple = min(group, key=lambda g: class_ranks[g[1]])
        except:
            import pdb
            pdb.set_trace()

        # Setup the '{class}combined' group
        group_class = min_tuple[1]+'Combined'
        group_site = min_tuple[0]
        group_children_text = []

        # Create a tuple of child pages with their text
        child_site_texts = zip(child_links, child_text)

        # Loop over each child site and determine if it is
        # a child of the group_site website.  If so, append its info
        # to the group_children_text list and remove it from the list
        # of child sites
        for child, ctext in child_site_texts:
            if conversions.child_or_equal_page(group_site, child, True):
                group_children_text.append(ctext)
                child_idx = child_links.index(child)
                child_links.pop(child_idx)
                child_text.pop(child_idx)
                child_site_texts.pop(child_idx)

        # Add this site to the combined_sites dict
        combined_sites['websites'].append(group_site)
        combined_sites['classes'].append(group_class)
        group_text = ' '.join(group_children_text)

        # Remove each site in this group from the various lists
        for g in group:
            site_idx = websites.index(g[0])
            websites.pop(site_idx)
            classes.pop(site_idx)
            sites_classes.pop(site_idx)
            texts.pop(site_idx)

        # Add the group's text to to the combined_sites dict
        group_text += ' ' + ' '.join(map(lambda g: g[2], group))
        combined_sites['texts'].append(group_text)

    return (combined_sites['websites'], combined_sites['texts'],
            combined_sites['classes'])
示例#5
0
def getlinks(candidate, webpage, state, district_type, district_name):
    """
    Gets all the facebook links found via the Google Search API
    """

    # ### Cleanup input variables

    # District
    district_type = district_type.replace('_', ' ').strip()
    district_type = '+'.join(district_type.split(' '))
    district_name = '+'.join(district_name.strip().split(' '))

    # State
    state = state_map[state.strip()]
    state = '+'.join(state.split(' '))

    # Candidate name
    candidate, last, first = conversions.clean_name(candidate)
    candidate = '+'.join(candidate.split(' '))
    #print 'CANDIDATE: {}'.format(candidate)

    # Setup search urls
    search_urls = []
    extra_children_searches = []
    precise_searches = []

    # Common values
    url = "https://www.googleapis.com/customsearch/v1"
    cx = "011743744063680272768:cp4-iesopjm"
    key = "AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA"

    search_urls.append(
        u'{url}?cx={cx}&key={key}&hl=en&q={name}+{state}'.format(
            url=url, cx=cx, key=key, name=candidate, state=state
        )
    )

    # Just searches for general about pages
    extra_children_searches.append(
        u'{url}?cx={cx}&key={key}&hl=en&q={name}+{state}+info'.format(
            url=url, cx=cx, key=key, name=candidate, state=state
        )
    )

    # sk=info specifies Facebook's about page
    extra_children_searches.append(
        u'{url}?cx={cx}&key={key}&hl=en&q={name}+{state}+sk=info'.format(
            url=url, cx=cx, key=key, name=candidate, state=state
        )
    )

    precise_searches.append(
        u'{url}?cx={cx}&key={key}&hl=en&q={name}+{state}+campaign'.format(
            url=url, cx=cx, key=key, name=candidate, state=state
        )
    )

    precise_searches.append(
        u'{url}?cx={cx}&key={key}&hl=en&q={name}+{state}+elect'.format(
            url=url, cx=cx, key=key, name=candidate, state=state
        )
    )

    # Clean up encoding of URL's
    search_urls = [
        s.encode(
            chardet.detect(s.encode('utf-8'))['encoding']
        ) for s in search_urls
    ]

    extra_children_searches = [
        s.encode(
            chardet.detect(s.encode('utf-8'))['encoding']
        ) for s in extra_children_searches
    ]

    #print 'SEARCH_URLS: {}'.format(search_urls)

    precise_searches = [
        s.encode(
            chardet.detect(s.encode('utf-8'))['encoding']
        ) for s in precise_searches
    ]

    # This must be  a test for a dummy webside used for testing
    # get_redirect simply gets the final page that returns a 200
    old_webpage = webpage
    if webpage != 'www.gernensamples.com':
        webpage = conversions.get_redirect(webpage)

    #print 'WBBPAGES: {}'.format(webpage)

    has_webpage = True
    #    raise Exception  # why do we need this exception??
    # print 'ok?'
    # Cleanup web pages by removing protocol, subdomain, and trailing '/'

    if has_webpage:
        #print has_webpage
        webpage_stripped = re.match(
            r'(?:https?://)?(?:www\.)?(?P<content>.+)', webpage
        ).groupdict()['content'].rstrip('/')

        old_webpage_stripped = re.match(
            r'(?:https?://)?(?:www\.)?(?P<content>.+)', old_webpage
        ).groupdict()['content'].rstrip('/')

        # TODO strip queries
        webpage_no_queries = ul.urlparse.urlparse(webpage)
        webpage_no_queries = re.match(
            r'(?:www\.)?(?P<content>.+)',
            webpage_no_queries.netloc + webpage_no_queries.path
        ).groupdict()['content'].rstrip('/')

        old_webpage_no_queries = ul.urlparse.urlparse(old_webpage)
        
        #print 'NO:{}'.format(old_webpage_no_queries)
        if old_webpage_no_queries is not None:
            old_webpage_no_queries = re.match(
                r'(?:www\.)?(?P<content>.+)',
                old_webpage_no_queries.netloc + old_webpage_no_queries.path
            ).groupdict()['content'].rstrip('/')

        patt = re.compile(
            r'^https?://(?:www.)?{webpage}/?$'.format(
                webpage=webpage_stripped.lower()
            )
        )
        old_patt = re.compile(
            r'^https?://(?:www.)?{webpage}/?$'.format(
                webpage=old_webpage_stripped.lower()
            )
        )

        child_patt = re.compile(
            r'^https?://(?:www\.)?{webpage}.+'.format(
                webpage=webpage_no_queries.lower()
            )
        )

        old_child_patt = re.compile(
            r'^https?://(?:www\.)?{webpage}.+'.format(
                webpage=old_webpage_no_queries.lower()
            )
        )

    print 'starting'
    n = 4
    while True:
        results = map(lambda x: json.loads(requests.get(x).text), search_urls)
        #for r in results:
        #    print 'error' in r
        if any(map(
                lambda r: ('error' in r and (
                    r['error']['code'] == 403 or r['error']['code'] == 503)
                ), results)):
            print 'sleeping'
            time.sleep(n + random.randint(1, 1000)/1000.)
            n = n*2
        elif any(map(lambda r: 'error' in r, results)):
            raise Exception(', '.join(
                map(
                    lambda r: r['error']['message'],
                    filter(lambda r: 'error' in r, results)
                )
            ))
        else:
            break

    n = 4
    while True:
        child_results = map(
            lambda x: json.loads(requests.get(x).text),
            extra_children_searches
        )
        if any(map(
                lambda r: 'error' in r and (
                    r['error']['code'] == 403 or r['error']['code'] == 503
                ), child_results)):
            print 'sleeping'
            time.sleep(n + random.randint(1, 1000) / 1000.)
            n = n * 2
        elif any(map(
            lambda r: 'error' in r, child_results
        )):
            raise Exception(', '.join(
                map(
                    lambda r: r['error']['message'],
                    filter(lambda r: 'error' in r, child_results)
                )
            ))
        else:
            break

    n = 4
    while True:
        precise_results = map(
            lambda x: json.loads(requests.get(x).text), precise_searches
        )
        if any(map(
            lambda r: 'error' in r and (
                r['error']['code'] == 403 or r['error']['code'] == 503
                ), precise_results)):
            print 'sleeping'
            time.sleep(n + random.randint(1, 1000) / 1000.)
            n = n * 2
        elif any(map(lambda r: 'error' in r, precise_results)):
            raise Exception(', '.join(
                map(
                    lambda r: r['error']['message'],
                    filter(lambda r: 'error' in r, precise_results)
                )
            ))
        else:
            break

  
    if type(results) != list:
        results = [results]

    # Get results from the "items" key and store it in the results variable
    real_results = [
        (r if 'items' in r else {'items': []}) for r in results
    ]
    results = real_results

    # print 'RESULTS:{}'.format(results)
    # Get the result URLs, Extract searchable text from the pagemap
    search_links = [[i['link'].lower() for i in r['items']] for r in results]
    search_text = [
        [u'{title} {link} {pagemap} {snippet}'.format(
            **convert_pagemap_dict(i)
        ).lower().encode('utf-8') for i in r['items']] for r in results
    ]

    # first loop may be unneccessary
    for ri in range(len(search_links)):  # for 1 to number of result objects
        for si in range(len(search_links[ri])):  # for 1 to number of links
            # For each "precise result" (name+state+'elect'),
            # see if the link is equivalent
            # or a sub page of the main results (name+state)
            for r in precise_results:
                if 'items' in r:
                    for i in r['items']:
                        if conversions.child_or_equal_page(
                            search_links[ri][si], i['link'].lower(), True
                        ):
                            search_text[ri][si] += ' bipspecialappearsinprecise'  # noqa

    # Get the result URLs, Extract searchable text from the pagemap
    child_links = [
        i['link'].lower() for r in child_results if 'items' in r
        for i in r['items']
    ]

    child_text = [
        u'{title} {link} {pagemap} {snippet}'.format(
            **convert_pagemap_dict(i)
        ).lower().encode('utf-8') for r in child_results if 'items' in r
        for i in r['items']
    ]

    # Classify each search link based on it's relationship
    # to the provided web page, either PARENT, CHILD, TRUE (identity),
    # or FALSE (no match)
    search_class = [
        map(lambda s: conversions.page_relation(
            s, True, webpage, old_webpage
        ), sl) for sl in search_links
    ]

    # TODO Clean up ssv code

    # Seems to match each search link result against the webpage domain
    ssv = [
        any(map(patt.match, sl)) or any(map(old_patt.match, sl))
        for sl in search_links
    ]

    non_websites = [
        [i['link'] for i in r['items'] if webpage not in i['link']]
        for r in results
    ]

    cs, ct, cc = zip(
        *[combine_children(
            search_links[i], search_text[i], search_class[i],
            child_links, child_text
        ) for i in range(len(search_links))]
    )

    print 'got there', len(results[0]['items'])

    return (non_websites, ssv, webpage_stripped, search_links, search_text,
            [r['items'] for r in results], search_class, cs, ct, cc,
            child_links, child_text)
def getlinks(candidate, webpage, state, district_type, district_name):

    # District
    district_type = district_type.replace('_', ' ').strip()
    district_type = '+'.join(district_type.split(' '))
    district_name = '+'.join(district_name.strip().split(' '))

    # State
    state = state_map[state.strip()]
    state = '+'.join(state.split(' '))

    # Candidate Name
    candidate, last, first = conversions.clean_name(candidate)
    candidate = '+'.join(candidate.split(' '))
    #print candidate

    # Search URLs
    search_urls = []
    precise_searches = []


    url = 'https://www.googleapis.com/customsearch/v1'
    cx = '011743744063680272768:xcugk1a_1t0'
    #cx = '009761440872559920339:eqjjlrdgzma'
    key = 'AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl'

    # Create search URLs
    search_urls.append(
        u'{url}?cx={cx}&key={key}&hl=en&q={name}+{state}'.format(
            url=url, cx=cx, key=key, name=candidate, state=state
        )
    )

    precise_searches.append(
        u'{url}?cx={cx}&key={key}&hl=en&q={name}+{state}+campaign'.format(
            url=url, cx=cx, key=key, name=candidate, state=state
        )
    )

    precise_searches.append(
        u'{url}?cx={cx}&key={key}&hl=en&q={name}+{state}+elect'.format(
            url=url, cx=cx, key=key, name=candidate, state=state
        )
    )

    # URL Encoding Cleanup
    search_urls = [
        s.encode(
            chardet.detect(s.encode('utf-8'))['encoding']
        ) for s in search_urls
    ]

    precise_searches = [
        s.encode(
            chardet.detect(s.encode('utf-8'))['encoding']
        ) for s in precise_searches
    ]

    # ?? Some sort of test?
    webpage = conversions.twitter_handle_to_web(webpage)
    print webpage
    old_webpage = webpage


    if webpage != 'www.gernensamples.com':
        webpage = conversions.get_redirect(webpage)

    # if webpage == '404' or webpage == 'ERROR':
        # raise Exception

    #print search_urls
    #print precise_searches
    webpage_stripped = re.match(
        r'(?:https?://)?(?:www\.)?(?P<content>.+)', webpage
    ).groupdict()['content'].rstrip('/')

    old_webpage_stripped = re.match(
        r'(?:https?://)?(?:www\.)?(?P<content>.+)', old_webpage
    ).groupdict()['content'].rstrip('/')

    # TODO strip queries
    webpage_no_queries = ul.urlparse.urlparse(webpage)
    webpage_no_queries = re.match(
        r'(?:www\.)?(?P<content>.+)',
        webpage_no_queries.netloc + webpage_no_queries.path
    ).groupdict()['content'].rstrip('/')

    old_webpage_no_queries = ul.urlparse.urlparse(old_webpage)
    old_webpage_no_queries = re.match(
        r'(?:www\.)?(?P<content>.+)',
        old_webpage_no_queries.netloc + old_webpage_no_queries.path
    ).groupdict()['content'].rstrip('/')

    patt = re.compile(
        r'^https?://(?:www.)?{webpage}/?$'.format(
            webpage=webpage_stripped.lower()
        )
    )

    old_patt = re.compile(
        r'^https?://(?:www.)?{webpage}/?$'.format(
            webpage=old_webpage_stripped.lower()
        )
    )

    print 'searching'
    # Timeout work
    n = 4
    while True:
        results = map(lambda x: json.loads(requests.get(x).text), search_urls)
        if any(map(
                lambda r: 'error' in r and (
                    r['error']['code'] == 403 or r['error']['code'] == 503
                ), results)):
            print 'sleeping'
            time.sleep(n + random.randint(1, 1000)/1000.)
            n = n*2
        elif any(map(
                lambda r: 'error' in r, results)):
            raise Exception(', '.join(
                map(lambda r: r['error']['message'],
                    filter(lambda r: 'error' in r, results))
            ))
        else:
            break

    n = 4
    while True:
        precise_results = map(
            lambda x: json.loads(requests.get(x).text), precise_searches
        )
        if any(map(
                lambda r: 'error' in r and (
                    r['error']['code'] == 403 or r['error']['code'] == 503
                ), precise_results)):
            print 'sleeping'
            time.sleep(n + random.randint(1, 1000)/1000.)
            n = n*2
        elif any(map(lambda r: 'error' in r, precise_results)):
            raise Exception(', '.join(
                map(lambda r: r['error']['message'],
                    filter(lambda r: 'error' in r, precise_results))
            ))
        else:
            break
    print 'done searching'

    if type(results) != list:
        results = [results]

    # Get results
    real_results = [(r if 'items' in r else {'items': []}) for r in results]
    results = real_results

    search_links = [
        [conversions.clean_twitter(i['link'].lower()) for i in r['items']]
        for r in results
    ]

    search_text = [
        [u'{title} {link} {pagemap} {snippet}'.format(
            **convert_pagemap_dict(i)
        ).lower().encode('utf-8') for i in r['items']] for r in results
    ]

    for ri in range(len(search_links)):
        for si in range(len(search_links[ri])):
            for r in precise_results:
                if 'items' in r:
                    for i in r['items']:
                        if conversions.child_or_equal_page(
                                search_links[ri][si],
                                conversions.clean_twitter(i['link'].lower()),
                                True):
                            search_text[ri][si] += ' bipspecialappearsinprecise'

    child_links = []
    child_text = []
    search_class = [map(lambda s: conversions.page_relation(
        s, False, webpage, old_webpage), sl) for sl in search_links
    ]

    # TODO Clean up ssv code
    ssv = [
        any(map(patt.match, sl)) or any(map(old_patt.match, sl))
        for sl in search_links
    ]

    non_websites = [
        [
            i['link'] for i in r['items'] if webpage not in i['link']
        ] for r in results
    ]

    cs, ct, cc = zip(*[combine_children(
        search_links[i],
        search_text[i],
        search_class[i],
        child_links,
        child_text) for i in range(len(search_links))
        ]
    )

    print 'got there', len(results[0]['items'])
    return (non_websites, ssv, webpage_stripped, search_links, search_text,
            [r['items'] for r in results], search_class,
            cs, ct, cc, child_links, child_text)
示例#7
0
def combine_children(websites, texts, classes, child_links, child_text):
    combined_sites = {'websites': [], 'texts': [], 'classes': []}
    root_sites = []
    temp_root_sites = {}
    sites_classes = zip(websites, classes)
    for site, cls in sites_classes:
        group = filter(
            lambda s: class_order(conversions.page_relation(s[0], True, site),
                                  'False') > 0, zip(websites, classes, texts))
        try:
            min_tuple = min(group, key=lambda g: class_ranks[g[1]])
        except:
            import pdb
            pdb.set_trace()
        group_class = min_tuple[1] + 'Combined'
        group_site = min_tuple[0]
        group_children_text = []
        child_site_texts = zip(child_links, child_text)
        for child, ctext in child_site_texts:
            if conversions.child_or_equal_page(group_site, child, True):
                group_children_text.append(ctext)
                child_idx = child_links.index(child)
                child_links.pop(child_idx)
                child_text.pop(child_idx)
                child_site_texts.pop(child_idx)
        combined_sites['websites'].append(group_site)
        combined_sites['classes'].append(group_class)
        group_text = ' '.join(group_children_text)
        for g in group:
            site_idx = websites.index(g[0])
            websites.pop(site_idx)
            classes.pop(site_idx)
            sites_classes.pop(site_idx)
            texts.pop(site_idx)
        group_text += ' ' + ' '.join(map(lambda g: g[2], group))
        combined_sites['texts'].append(group_text)
    """
        actual_root = conversion.strip_queries(webpage)
        #webpage_no_queries = ul.urlparse.urlparse(webpage)
        #actual_root = webpage_no_queries.scheme + '://' + webpage_no_queries.netloc + webpage_no_queries.path
        webpage_no_queries = re.match(r'(?:www\.)?(?P<content>.+)',webpage_no_queries.netloc + webpage_no_queries.path).groupdict()['content'].rstrip('/')
        child_patt = re.compile(r'^https?://(?:www\.)?{webpage}.+'.format(webpage=webpage_no_queries.lower()))
        if not temp_root_site.has_key(actual_root) or class_order(cls,temp_root_site[actual_root]['class']) > 0:
            temp_root_sites[actual_root] = {'actual_root':actual_root,'child_patt':child_patt,'class':cls+'Combined'})
    for site_dict in temp_root_sites:
        if any(map(lambda rs['child_patt'].match(site_dict['actual_root']),temp_root_sites)):
            continue
        root_sites.append(site_dict)
        combined_sites[site_dict['actual_root']] = {'text':'','class':site_dict['class']}
        for child,ctext in zip(child_links, child_text):
            if site_dict['child_patt'].match(child):


    site_dict = defaultdict(lambda: {'children':set(),'has_parent':False,'text':''})
    for child,ctext in zip(child_links, child_text):
        site_dict[child]['text'] = ctext
    for site,text in zip(websites, texts):
        webpage_no_queries = ul.urlparse.urlparse(webpage)
        webpage_no_queries = re.match(r'(?:www\.)?(?P<content>.+)',webpage_no_queries.netloc + webpage_no_queries.path).groupdict()['content'].rstrip('/')
        child_patt = re.compile(r'^https?://(?:www\.)?{webpage}.+'.format(webpage=webpage_no_queries.lower()))
        site_dict[site]['text'] = text
        for s in websites:
            if s != site and child_patt.match(s):
                site_dict[site]['children'].add(s)
                site_dict[s]['has_parent'] = True
        for c in child_links:
            if c != site and child_patt.match(c):
                site_dict[site]['children'].add(c)
                site_dict[c]['has_parent'] = True
    combined_sites = {'websites':[],'texts':[],'classes':[]}
    def combine_text(site):
        if len(site_dict[site]['children']) > 0:
            ret_text = site_dict[site]['text']
            for c in site_dict[site]['children']:
                ret_text += ' ' + combine_text(c)
            return ret_text
        else:
            return site_dict[site]['text']
    for site,cl in zip(websites, classes):
        if not site_dict[site]['has_parent']:
            combined_sites['websites'].append(site)
            combined_sites['texts'].append(combine_text(site))
            combined_sites['classes'].append(cl+'Combined')
    """
    return combined_sites['websites'], combined_sites['texts'], combined_sites[
        'classes']
示例#8
0
def getlinks(candidate, webpage, state, district_type, district_name):
    district_type = district_type.replace('_', ' ').strip()
    state = state_map[state.strip()]
    candidate, last, first = conversions.clean_name(candidate)
    candidate = '+'.join(candidate.split(' '))
    print candidate
    state = '+'.join(state.split(' '))
    district_type = '+'.join(district_type.split(' '))
    district_name = '+'.join(district_name.strip().split(' '))
    search_urls = []
    extra_children_searches = []
    precise_searches = []
    search_urls.append(
        u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}'
        .format(name=candidate, state=state))
    extra_children_searches.append(
        u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}+info'
        .format(name=candidate, state=state))
    extra_children_searches.append(
        u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}+sk=info'
        .format(name=candidate, state=state))
    precise_searches.append(
        u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}+campaign'
        .format(name=candidate, state=state))
    precise_searches.append(
        u'https://www.googleapis.com/customsearch/v1?cx=011743744063680272768:cp4-iesopjm&key=AIzaSyCdHlGJuMzGBH9hNsEMObffDIkzJ44EQhA&hl=en&q={name}+{state}+elect'
        .format(name=candidate, state=state))
    search_urls = [
        s.encode(chardet.detect(s.encode('utf-8'))['encoding'])
        for s in search_urls
    ]
    extra_children_searches = [
        s.encode(chardet.detect(s.encode('utf-8'))['encoding'])
        for s in extra_children_searches
    ]
    precise_searches = [
        s.encode(chardet.detect(s.encode('utf-8'))['encoding'])
        for s in precise_searches
    ]
    old_webpage = webpage
    if webpage != 'www.gernensamples.com':
        webpage = conversions.get_redirect(webpage)
    #if webpage == '404' or webpage == 'ERROR':
    #raise Exception
    websites = []
    webpage_stripped = re.match(r'(?:https?://)?(?:www\.)?(?P<content>.+)',
                                webpage).groupdict()['content'].rstrip('/')
    old_webpage_stripped = re.match(
        r'(?:https?://)?(?:www\.)?(?P<content>.+)',
        old_webpage).groupdict()['content'].rstrip('/')
    #TODO strip queries
    webpage_no_queries = ul.urlparse.urlparse(webpage)
    webpage_no_queries = re.match(
        r'(?:www\.)?(?P<content>.+)', webpage_no_queries.netloc +
        webpage_no_queries.path).groupdict()['content'].rstrip('/')
    old_webpage_no_queries = ul.urlparse.urlparse(old_webpage)
    old_webpage_no_queries = re.match(
        r'(?:www\.)?(?P<content>.+)', old_webpage_no_queries.netloc +
        old_webpage_no_queries.path).groupdict()['content'].rstrip('/')
    patt = re.compile(r'^https?://(?:www.)?{webpage}/?$'.format(
        webpage=webpage_stripped.lower()))
    old_patt = re.compile(r'^https?://(?:www.)?{webpage}/?$'.format(
        webpage=old_webpage_stripped.lower()))
    child_patt = re.compile(r'^https?://(?:www\.)?{webpage}.+'.format(
        webpage=webpage_no_queries.lower()))
    old_child_patt = re.compile(r'^https?://(?:www\.)?{webpage}.+'.format(
        webpage=old_webpage_no_queries.lower()))
    n = 4
    while True:
        results = map(lambda x: json.loads(requests.get(x).text), search_urls)
        if any(
                map(
                    lambda r: r.has_key('error') and
                    (r['error']['code'] == 403 or r['error']['code'] == 503),
                    results)):
            print 'sleeping'
            time.sleep(n + random.randint(1, 1000) / 1000.)
            n = n * 2
        elif any(map(lambda r: r.has_key('error'), results)):
            raise Exception(', '.join(
                map(lambda r: r['error']['message'],
                    filter(lambda r: r.has_key('error'), results))))
        else:
            break
    n = 4
    while True:
        child_results = map(lambda x: json.loads(requests.get(x).text),
                            extra_children_searches)
        if any(
                map(
                    lambda r: r.has_key('error') and
                    (r['error']['code'] == 403 or r['error']['code'] == 503),
                    child_results)):
            print 'sleeping'
            time.sleep(n + random.randint(1, 1000) / 1000.)
            n = n * 2
        elif any(map(lambda r: r.has_key('error'), child_results)):
            raise Exception(', '.join(
                map(lambda r: r['error']['message'],
                    filter(lambda r: r.has_key('error'), child_results))))
        else:
            break
    n = 4
    while True:
        precise_results = map(lambda x: json.loads(requests.get(x).text),
                              precise_searches)
        if any(
                map(
                    lambda r: r.has_key('error') and
                    (r['error']['code'] == 403 or r['error']['code'] == 503),
                    precise_results)):
            print 'sleeping'
            time.sleep(n + random.randint(1, 1000) / 1000.)
            n = n * 2
        elif any(map(lambda r: r.has_key('error'), precise_results)):
            raise Exception(', '.join(
                map(lambda r: r['error']['message'],
                    filter(lambda r: r.has_key('error'), precise_results))))
        else:
            break

    if type(results) != list:
        print type(results)
        results = [results]
    real_results = [(r if r.has_key('items') else {
        'items': []
    }) for r in results]
    results = real_results
    search_links = [[i['link'].lower() for i in r['items']] for r in results]
    search_text = [[
        u'{title} {link} {pagemap} {snippet}'.format(
            **convert_pagemap_dict(i)).lower().encode('utf-8')
        for i in r['items']
    ] for r in results]
    for ri in range(len(search_links)):
        for si in range(len(search_links[ri])):
            for r in precise_results:
                if r.has_key('items'):
                    for i in r['items']:
                        if conversions.child_or_equal_page(
                                search_links[ri][si], i['link'].lower(), True):
                            search_text[ri][
                                si] += ' bipspecialappearsinprecise'
    child_links = [
        i['link'].lower() for r in child_results if r.has_key('items')
        for i in r['items']
    ]
    child_text = [
        u'{title} {link} {pagemap} {snippet}'.format(
            **convert_pagemap_dict(i)).lower().encode('utf-8')
        for r in child_results if r.has_key('items') for i in r['items']
    ]
    #search_text = [[u'{title} {link} {pagemap} {snippet}'.format(**i).lower().encode('utf-8') for i in r['items']] for r in results]
    search_class = [
        map(lambda s: conversions.page_relation(s, True, webpage, old_webpage),
            sl) for sl in search_links
    ]
    #search_class = [map(lambda s: 'True' if patt.match(s) != None or old_patt.match(s) != None else ('Child' if child_patt.match(s) != None or old_child_patt.match(s) != None else 'False'),sl) for sl in search_links]
    #print search_text
    #TODO Clean up ssv code
    ssv = [
        any(map(patt.match, sl)) or any(map(old_patt.match, sl))
        for sl in search_links
    ]
    non_websites = [[
        i['link'] for i in r['items'] if webpage not in i['link']
    ] for r in results]
    cs, ct, cc = zip(*[
        combine_children(search_links[i], search_text[i], search_class[i],
                         child_links, child_text)
        for i in range(len(search_links))
    ])
    print 'got there', len(results[0]['items'])
    return non_websites, ssv, webpage_stripped, search_links, search_text, [
        r['items'] for r in results
    ], search_class, cs, ct, cc, child_links, child_text