示例#1
0
 def generate_volume_links(self):
     if self.JournalObj.volume_links_got:
         pass
     #假如数据库中已保存,直接读取即可,无需生成
     for volume_area_link in ElsevierAllItemsPageParser(
             html_source=request_with_random_ua(
                 self.url).text).volume_area_links:
         #先分volume年份区间(十年)
         #print('Elsevier Volume Area link:%s'%volume_area_link)
         area_volume_links = ElsevierAllItemsPageParser(
             html_source=request_with_random_ua(
                 volume_area_link).text).volume_links
         area_volume_links.append(volume_area_link)
         #得到该区间所有年份的page_url
         self.volume_links.extend(area_volume_links)
示例#2
0
 def crawl_volume_page(self,
                       volume_item,
                       AllItemsPageParser,
                       JournalArticle,
                       use_tor=False,
                       check_pdf_url=True):
     volume_link = self.handle_volume_link_for_multi_results(volume_item[0])
     volume_db_id = volume_item[1]
     if use_tor:
         #此情况属于完全不可能直接获取pdf_url时使用,用远程服务器加速
         html_source = request_with_proxy(volume_link).text
     else:
         html_source = request_with_random_ua(volume_link).text
     parser = AllItemsPageParser(html_source)
     try:
         sections = parser.sections
     except Exception as e:
         print('[Error] JournalSpider:Page Invalid {}\n\
             error_url {}'.format(str(e), volume_link))
         return False
     try:
         volume_year = parser.volume_year
         print('Volume_year:{}'.format(volume_year))
     except AttributeError:
         volume_year = None
     print('\nPage Url: %s ' % volume_link)
     if self.crawl_articles(sections, volume_year, volume_db_id,
                            JournalArticle, check_pdf_url) == False:
         return False
     if len(parser.sections) > 0 and self.debug == False:
         self.mark_volume_ok(volume_db_id)
         return True
     return False
示例#3
0
 def generate_volume_links(self):
     if self.JournalObj.volume_links_got:
         return
     #假如数据库中已保存,直接读取即可,无需生成
     self.volume_links = IEEE_AllItemsPageParser(
         html_source = request_with_random_ua(self.url).text
     ).volume_links
示例#4
0
 def page_amount(self):
     if self.keyword:
         self.result_amount = SearchPageParser(
             html_source=request_with_random_ua(
                 self.url).text).result_amount
     else:
         self.result_amount = RankPageParser(self.url).result_amount
     return int(self.result_amount / 50) + 1
示例#5
0
 def generate_volume_links(self):
     if self.JournalObj.volume_links_got:
         return
     self.volumes_page_url = 'http://onlinelibrary.wiley.com' + BeautifulSoup(
         request_with_random_ua(self.url).text, 'lxml').find(
             'a', text='See all')['href']
     #self.volumes_page_url = 'http://onlinelibrary.wiley.com/journal/10.1002/(ISSN)1096-987X/issues'
     soup = BeautifulSoup(
         request_with_random_ua(self.volumes_page_url).text, 'lxml')
     years = [
         int(a['href'].split('=')[-1])
         for a in soup.find_all('a', id=re.compile('year_[0-9]+_link'))
     ]
     pool = ThreadPool(16)
     pool.map(self.get_volume_links_by_year, years)
     pool.close()
     pool.join()
示例#6
0
 def generate_abstract(self):
     #访问abstract_url直接生成
     try:
         resp = request_with_random_ua(self.abstract_url)
         self.abstract = BeautifulSoup(
             resp.text, 'lxml').select_one('.paraText').text.strip()
     except Exception as e:
         pass
示例#7
0
 def generate_volume_links(self):
     id = self.url.split('id=')[-1]
     volumes_page_link = 'http://www.emeraldinsight.com/loi/{}'.format(id)
     for i in range(10):
         resp = request_with_random_ua(
             url=volumes_page_link,timeout=10)
         soup = BS(resp.text,'lxml')
         self.volume_links = [ a['href'] for a in soup.select('.tocIssueLink') ]
         if self.volume_links!=[]:
             break
示例#8
0
 def get_volume_link_by_year(self,year):
     volumes_page_url = '{}&year={}'.format(self.url,year)
     print(volumes_page_url)
     volume_page_links = [
         'http://journals.lww.com{}'.format(a['href'])
           for a in BS(request_with_random_ua(volumes_page_url).text,'lxml')\
                 .select_one('#ej-past-issues-detail-list')\
                     .find_all('a',text=re.compile('Volume'))
     ]
     self.volume_links.extend(volume_page_links)
示例#9
0
 def generate_volume_links(self):
     if self.JournalObj.volume_links_got:
         return
     #假如数据库中已保存,直接读取即可,无需生成
     for page_num in range(1,SpringParser(
         html_source=request_with_random_ua(self.url).text
     ).pages_amount+1):
         page_url = 'http://link.springer.com/search/page/{}?facet-journal-id={}&showAll=true&facet-content-type=Article&sortOrder=newestFirst'\
                         .format(page_num,self.spring_journal_id)
         self.volume_links.append(page_url)
示例#10
0
 def generate_volume_links(self):
     #http://pubs.acs.org/journal/ancac3
     if self.JournalObj.volume_links_got:
         return
     index = self.url.split('/')[-1]
     volumes_page_url = 'http://pubs.acs.org/loi/{}'.format(index)
     #print(volumes_page_url)
     a_list = BeautifulSoup(
         request_with_random_ua(volumes_page_url).text,
         'lxml').select('.publicationTitle')
     for i in range(20):
         self.volume_links = [
             a['href'] for a in BeautifulSoup(
                 request_with_random_ua(volumes_page_url).text,
                 'lxml').select('.publicationTitle')
         ]
         print(self.volume_links)
         if self.volume_links:
             print('length:', len(self.volume_links))
             break
示例#11
0
 def __init__(self, url, area_id=None, category_id=None, driver=None):
     self.url = url
     if category_id and area_id:
         self.url += '?area={}&category={}&order=tr&type=j'.format(
             area_id, category_id)
     if driver:
         driver.get(self.url)
         time.sleep(2)
         self.soup = BeautifulSoup(driver.page_source, 'lxml')
     else:
         resp = request_with_random_ua(self.url, timeout=20)
         self.soup = BeautifulSoup(resp.text, 'lxml')
示例#12
0
 def generate_volume_links(self):
     if self.JournalObj.volume_links_got:
         return
     name = self.url[:-1].split('/')[-1]
     items_fist_page_url = 'http://{}.biomedcentral.com/articles?searchType=journalSearch&sort=PubDate&page=1'.format(
         name)
     print(items_fist_page_url)
     pages_num = BioMedParser(html_source=request_with_random_ua(
         url=items_fist_page_url, timeout=3).text).pages_amount
     for i in range(1, pages_num + 1):
         page_url = 'http://{}.biomedcentral.com/articles?searchType=journalSearch&sort=PubDate&page={}'\
             .format(name,i)
         self.volume_links.append(page_url)
示例#13
0
 def __init__(self, journal_sjr_id):
     url = 'http://www.scimagojr.com/journalsearch.php?q={}&tip=sid&clean=0'.format(
         journal_sjr_id)
     print(url)
     resp = request_with_random_ua(url)
     self.journal_id = journal_sjr_id
     self.soup = BeautifulSoup(resp.text, 'lxml')
     self.trs = self.soup.find('tbody').find_all('tr')
     self.info_dict = {}
     for tr in self.trs:
         tds = tr.find_all('td')
         self.info_dict[tds[0].text] = tds[1]
     #print(self.info_dict.keys())
     self.cur = DB_CONNS_POOL.new_db_cursor()
示例#14
0
 def generate_volume_links(self):
     if self.JournalObj.volume_links_got:
         return
     self.url = (self.url+'?mobile=0&desktopMode=true')\
         .replace('default','issuelist')
     soup = BS(request_with_random_ua(self.url).text,'lxml')
     years = [
         int(option.text) for option in
             soup.select_one('#ej-article-action-toolbar-select').select('option')
     ]
     pool = ThreadPool(8)
     pool.map(self.get_volume_link_by_year,years)
     pool.close()
     pool.join()
示例#15
0
 def get_volume_links_by_year(self, year):
     ajax_url = '{}/fragment?activeYear={}&SKIP_DECORATION=true'\
         .format(self.volumes_page_url,year)
     for i in range(5):
         a_list = BeautifulSoup(
             request_with_random_ua(ajax_url).text,
             'lxml').select('.issue > a')
         if a_list:
             break
     volume_links_by_year = [
         'http://onlinelibrary.wiley.com' + a['href'] for a in a_list
     ]
     if volume_links_by_year == []:
         print('error ajax_url:{}'.format(ajax_url))
     self.volume_links.extend(volume_links_by_year)
示例#16
0
 def crawl_per_page_result(self, url):
     print(url)
     if self.crawl_all:
         if self.driver_pool_size > 0:
             driverObj = self.drivers.get_one_free_driver()
             drvier = driverObj.engine
         else:
             drvier = None
         try:
             parser = RankPageParser(url, drvier)
         except Exception as e:
             print(str(e))
         try:
             driverObj.status = 'free'
         except:
             pass
         JournalItem = RankJournal
     else:
         html_source = request_with_random_ua(url).text
         parser = SearchPageParser(html_source)
         JournalItem = PublisherJournal
     print(len(parser.sections))
     for sec in parser.sections:
         JournalItem(sec).save_to_db()
示例#17
0
 def handle_sciencedirect_url(self):
     #print(self.url)
     if 'sciencedirect' in self.url:
         return
     resp = request_with_random_ua(self.url)
     soup = BeautifulSoup(resp.text, 'lxml')
     print('---------------------')
     if soup.find_all(text='404'):
         raise Exception('404')
     temp_url = soup.select_one('.cta-primary')['href']
     temp_url2 = soup.find_all("a", href=re.compile("guide-for-authors"))
     if temp_url2:
         temp_url2 = temp_url2[0]['href']
     print('temp_url:{}'.format(temp_url))
     print('temp_url2:{}'.format(temp_url2))
     for url in [temp_url2, temp_url]:
         try:
             journal_index = url.split('/')[0].split('-')
             self.url = 'http://www.sciencedirect.com/science/journal/{}'\
                 .format(journal_index[0]+journal_index[1])
             return
         except Exception as e:
             continue
             print(str(e))
示例#18
0
 def generate_volume_links(self):
     if self.JournalObj.volume_links_got:
         return
     self.volume_links = TaylorFrancisParser(
         html_source=request_with_random_ua(self.url).text).volume_links