コード例 #1
0
    def save_top_urls_to_mongo(cls,webpages):

        for webpage in webpages:
            #dictionary of webpage properties
            webpage_dict=cls.__get_url_properties_and_sanitize(webpage)
            MongoDB.save_modify_url(**webpage_dict)

        return cls
コード例 #2
0
ファイル: web_scrape_oldr.py プロジェクト: wangk1/research
    def scrape_links(self,pos):

        doc_object=MongoDB.get(URLQueue,'document',number=pos)

        while doc_object is not None:
            self.scrape_link_and_child(doc_object['url'])
            pos=MongoDB.increment_url_counter()

            doc_object=MongoDB.get(URLQueue,'document',number=pos)
コード例 #3
0
ファイル: web_scrape_oldr.py プロジェクト: wangk1/research
    def scrape_links_from_position(self,pos):
        MongoDB.connect(settings.HOST_NAME,settings.PORT)
        links=self.__get_next_urls(pos)


        Logger.info(links)
        for link in links:
            self.scrape_link_and_child(link)

        Logger.debug('Process job completed')
        return 0
コード例 #4
0
    def scrape_urls(cls):

        position=MongoDB.get(MetaData,'position',type='queue')

        WebScraper().scrape_links(position)

        return cls
コード例 #5
0
    def create_url_queue(cls):
        sorted_coll=URLToGenre.objects.order_by('_id')
        counter=391417#96461
        total=391417

        try:
            while counter<total:
                MongoDB.push_to_queue(counter,sorted_coll[counter])
                if counter %10000==0:
                    print('{} done'.format(counter))
                counter+=1
        except:
            pass

        print('stopped at {}'.format(counter))
        print('last url: {}'.format(sorted_coll[counter]['url']))

        return cls
コード例 #6
0
    def scrape_urls_multiproc(cls):
        #current position
        pos=MongoDB.get(MetaData,'position',type='queue')
        #current cap
        cap=pos

        process_queue=queue.Queue(maxsize=settings.NUM_PROCESSES)

        #creates all the necessary processes
        for p_num in range(0,settings.NUM_PROCESSES):
            p=mp.Process(target=WebScraper().scrape_links_from_position,args=[cap])
            #get curresponding objects
            process_queue.put(p)

            cap+=settings.NUM_URLS_PER_PROCESS

            #now start
            p.start()

        head=process_queue.get()
        #wait and create new processes as needed
        while(pos<MongoDB.count(URLQueue)):
            head.join()

            if not head.exitcode ==0:
                Logger.error('Error with Process, terminating')
                return

            #update counter
            MongoDB.increment_url_counter(settings.NUM_URLS_PER_PROCESS)

            p=mp.Process(target=WebScraper().scrape_links_from_position,args=[cap])
            process_queue.put(p)
            p.start()

            #increase both cap and current position
            cap+=settings.NUM_URLS_PER_PROCESS
            pos+=settings.NUM_URLS_PER_PROCESS
            head=process_queue.get()


        print(p.exitcode)

        return cls
コード例 #7
0
ファイル: dmoz_scraper.py プロジェクト: wangk1/research
    def scrape(self):
        home=self.http.get(dmoz_home)

        home_page_links=self._scrapeHomeAndGetLinks(home.data)

        #visit each link in homepage and dig down
        #for url in home_page_links:
        i=0
        while i<settings.NUM_RANDOM_WEBPAGE:
            result=self._scrapPage(home_page_links[random.randint(0,len(home_page_links)-1)])

            if result is not None and MongoDB.get_url_object(result['url']) is None:
                i+=1
                try:
                    page=utf_8_safe_decode(self.http.get(result['url']).data)

                    MongoDB.save_modify_url(page=page,**result)

                    Logger.info("Completed: "+result['url'])
                except Exception as ex:
                    Logger.error(ex)
コード例 #8
0
ファイル: web_scrape_oldr.py プロジェクト: wangk1/research
    def scrape_link_and_child(self,parent_url):
        parent_url=base_util.replace_dot_url(parent_url)
        webpage_body,parent_url=self.scrape(base_util.unreplace_dot_url(parent_url),None)

        #exit if failed to scrap website
        if webpage_body is None:
            return

        Logger.debug('Saving Parent')
        MongoDB.save_page(url=parent_url,page=webpage_body)
        Logger.info('Completed page: '+parent_url)

        #Now, we grab the childs of this webpage
        all_ahref=[base_util.combine_parent_rel_link(parent_url,a.attrs['href']) for a in BeautifulSoup(webpage_body,'html.parser', from_encoding="utf-8").find_all('a') if 'href' in a.attrs]

        child_urls=random.sample(all_ahref,settings.GET_X_CHILD) if len(all_ahref)>=settings.GET_X_CHILD else all_ahref

        #get rid of bad normalization
        if not re.match('^www[.].*$',parent_url):
            Logger.info('Updating bad url for {}'.format(parent_url))
            MongoDB.update_url(base_util.normalize_url(parent_url),parent_url)

        if len(child_urls) > 0:

            #get the childs, child urls is a subset of all urls
            for child_url in child_urls:
                Logger.debug('Get Child {}'.format(child_url))
                child_page=self.scrape(child_url,parent_url)

                if child_page is None:
                    exploredset=set()
                    tries=0
                    for url in set(all_ahref)^(exploredset):
                        if tries==settings.MAX_RETRIES:
                            Logger.info('Max retrie number exceeded')
                            break

                        Logger.info("trying new url: "+url)

                        child_page=self.scrape(url,parent_url)

                        if child_page is not None:
                            break
                        exploredset.add(url)

                        tries+=1

                if child_page is not None:
                    Logger.debug('Saving Child {}'.format(child_url))
                    MongoDB.save_modify_url(url=base_util.replace_dot_url(child_url),parent=[MongoDB.get_url_object(parent_url)],genre=[],page=child_page)
                    Logger.info('Completed page: '+child_url)
コード例 #9
0
    def start(cls):
        MongoDB.connect(settings.HOST_NAME,settings.PORT)

        return cls
コード例 #10
0
ファイル: web_scrape_oldr.py プロジェクト: wangk1/research
    def __get_next_urls(self,curr_pos):
        doc_objs=MongoDB.get_m(URLQueue,'document',number__in=list(range(curr_pos,curr_pos+settings.NUM_URLS_PER_PROCESS)))

        return [doc_obj['url'] for doc_obj in doc_objs]