def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = '' t = '' t_stamp = 0 category = '' author = '' content = '' if instant_pattern.match(task.url): title = util.get_filtered_title(doc, {'title'}, ur' - 信報網站 hkej.com') t = util.get_time_string_from_selectors(doc, {'span.date'}) time_part = min_sec_pattern.findall(t)[0] t_stamp = util.get_timestamp_from_string(time_part) + time.localtime().tm_sec category = doc('span.cate').text() content = util.get_paragraphs_from_selector(doc, '#article-content p') elif daily_pattern.match(task.url) or headline_article_pattern.match(task.url): title = util.get_filtered_title(doc, {'title'}, ur' - .+') t = util.get_time_string_from_selectors(doc, {'#date'}) t_stamp = util.get_timestamp_from_string(t) + time.localtime().tm_hour*3600 + time.localtime().tm_min*60 + time.localtime().tm_sec category = doc('#hkej_navSubMenu_2014 .on').text() content = util.get_paragraphs_from_selector(doc, '#article-content p') if content == '': content = util.get_paragraphs_from_selector(doc, '#article-detail-wrapper') content = re.sub(ur'(節錄)(.|\n|\t|\r)*', u'', content, re.M | re.I | re.U) item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'HKEJ' item.task_no = self.BATCH_NUMBER for img in doc('#article-detail-wrapper p img, #article-detail-wrapper .hkej_detail_thumb_2014 img').items(): if img.parent('a').attr('href') != '': des = '' if img.parent('a') and img.parent('a').attr('title'): des = img.parent('a').attr('title') media = self.NewsItem.MediaItem(media_url=img.parent('a').attr('href'), type='image', description=des, created_at=item.fetched_at) item.media_list.append(media) for a in doc('iframe').items(): if a.attr('src') and re.match(r'.*youtube\.com.*', a.attr('src')): media_u = a.attr('src') if re.match(r'//.+', media_u): media_u = 'http:' + media_u media = self.NewsItem.MediaItem(media_url=media_u, type='youtube', description='youtube', created_at=item.fetched_at) item.media_list.append(media)
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'h1.entry-title'}) t = doc('meta[property="article:published_time"]').attr('content') t_stamp = 0 if t: t_stamp = util.get_timestamp_from_string(t) category = '' cat_find_res = cat_pattern.findall(task.url) if cat_find_res: category = cat_find_res[0] category = u'科技/' + category author = '' content = util.get_paragraphs_from_selector(doc, '#content div.entry p:not(.meta)') content = re.sub(ur'(來源:|來源:|Tags:).+', u'', content) item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'Unwire' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = doc('h1').text() meta_txt = doc('.metaStuff').text() t = re.findall(ur'[^\s]+月.+', meta_txt)[0] t_stamp = util.get_timestamp_from_string( t) + time.localtime().tm_hour * 3600 + time.localtime( ).tm_min * 60 + time.localtime().tm_sec category = doc('#crumbs a').text().split(' ')[-1] author = meta_txt.split(' ')[0] content = util.get_paragraphs_from_selector(doc, '.entry p') if content == '': content = util.get_paragraphs_from_selector(doc, '.entry div') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'Savantas' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'h1 a'}) t = util.get_time_string_from_selectors(doc, {'span.postdate'}) t_stamp = util.get_timestamp_from_string(t) + int( time.localtime().tm_sec) category = doc('span.postcat a').text() author = doc('span.postauthor a').text() content = util.get_paragraphs_from_selector(doc, 'div p') content = re.sub(ur'投稿:[.\n\r\t]*.*', u'', content, re.M | re.U | re.I) content = re.sub(ur'則留言[.\n\r\t]*', u'', content, re.M | re.U | re.I) content = re.sub(ur'大道之行也,天下為公,選賢與能,講信修睦。---《禮運.大同》[.\n\r\t]*', u'', content, re.M | re.U | re.I) item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'VJMedia' item.task_no = self.BATCH_NUMBER for img in doc( '#container img.size-full, #container img.size-large').items(): if img.attr('src') != '': des = '' if img.attr('alt'): des = img.attr('alt') elif img.siblings('p'): des = img.siblings('p').text() media = self.NewsItem.MediaItem(media_url=img.attr('src'), type='image', description=des, created_at=item.fetched_at) item.media_list.append(media) for iframe in doc('iframe').items(): if iframe.attr('src') and re.match(r'.*youtube\.com.+', iframe.attr('src')): media = self.NewsItem.MediaItem(media_url=iframe.attr('src'), type='youtube', description='youtube', created_at=item.fetched_at) item.media_list.append(media) if util.within_active_interval(6, 1200): _comments = util.get_filtered_facebook_comments_data( '214585295294555', doc('div.fb-comments').attr('data-href'), task.url) if _comments: for _comment in _comments: item.media_list.append( self.NewsItem.MediaItem( media_url=_comment['json_string'], type='comments', description='comments', created_at=item.fetched_at))
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'title'}, ur' – 都市日報') author = '' category = doc('.mobile-page-name span').text() # tags = doc('meta[name=keywords]').attr('content') content = util.get_paragraphs_from_selector( doc, '.main-content .content p') t = doc('.main-content .date p').text() t = re.sub(ur'\(.+?\)', '', t) t = re.sub(ur'上午', 'AM', t) t = re.sub(ur'下午', 'PM', t) t_stamp = util.get_timestamp_from_string(t) item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'MetroHK' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'.PlaylistRow kanhanpass'}) t = util.get_time_string_from_selectors(doc, {'.PlaylistRow td'}, date_patterns={date_pattern}) t_stamp = util.get_timestamp_from_string(t, '%Y/%m/%d %I:%M %p') + int( time.localtime().tm_sec) category = doc('#topMenu a.on').text() author = '' c_id = re.findall(url_id_pattern, task.url)[0] r = requests.get( 'http://webcast.legco.gov.hk/Public_uat_embedded/Service1.asmx/GetTimeMarker?meetingID=' + c_id + '&lang=tc') j_obj = json.loads(r.text) content = '' for agenda in j_obj['TimeMarkerItems']: a_time = agenda['AgendaTime'] a_item = agenda['AgendaItem'] content = content + a_time + u' - ' + a_item + u'\n' item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'LegislativeCouncil' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'h1'}) t = doc('meta[property="article:published_time"]').attr('content') t_stamp = 0 if t: t_stamp = util.get_timestamp_from_string(t) category = '' author = '' content = util.get_paragraphs_from_selector(doc, '#main-content .entry-content p') content = re.sub(ur'繼續閱讀[\n\s\S.]*', '', content) item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'UBeat' item.task_no = self.BATCH_NUMBER for img in doc('figure.entry-thumbnail img').items(): if img.attr('src') != '': media_u = img.attr('src') des = '' if img.attr('alt'): des = img.attr('alt') media = self.NewsItem.MediaItem(media_url=media_u, type='image', description=des, created_at=item.fetched_at) item.media_list.append(media)
def normal_item_solver(self, item, task, response): response.encoding = 'utf-8' doc = self.get_doc(response) title = doc('#page-h1').text() t = util.get_time_string_from_selectors(doc, {'h5 small'}, {date_pattern}) t_stamp = util.get_timestamp_from_string( t) + time.localtime().tm_hour * 3600 + time.localtime( ).tm_min * 60 + time.localtime().tm_sec category = doc('h5 small a').text() author = '' content = util.get_paragraphs_from_selector(doc, '.content-show p') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'Bauhinia' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): response.encoding = 'big5' doc = self.get_doc(response) title = doc('p.bigheading').text().split(' ')[-1] with self.url_time_dict_lock: t = self.url_time_dict[task.url] t_stamp = util.get_timestamp_from_string(t) + time.localtime( ).tm_hour * 3600 + time.localtime().tm_min * 60 category = '' author = '' content = util.get_paragraphs_from_selector(doc, 'p:not(.bigheading)') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'Y28' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'title'}, u'881903.com 商業電台 - ') t = util.get_time_string_from_selectors( doc, {'#divnewsTextDate', '#part6808_ctl00_lblDetailDate'}) t_stamp = util.get_timestamp_from_string(t, '%d.%m.%Y %H:%M') + int( time.localtime().tm_sec) category = doc('#part8425_ctl00_divtitle').text() author = '' content = util.get_paragraphs_from_selector(doc, '#divnewsTextContent p') if content == '': content = util.get_paragraphs_from_selector(doc, '#tdContent p') if content == '': content = util.get_paragraphs_from_selector( doc, '.newsTextContent2') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'CommercialRadio' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'h2'}) t = util.get_time_string_from_selectors(doc, {'div.dateforarticle'}) t_stamp = 0 if relative_time_pattern.match(t): t_stamp = self._get_timestamp_from_relative_time_str(t) elif absolute_time_pattern.match(t): t_stamp = util.get_timestamp_from_string(t) + int( time.localtime().tm_sec) category = '新聞' author = '' content = util.get_paragraphs_from_selector(doc, '#mymain') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'AM730' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'h1.conttit'}) t = util.get_time_string_from_selectors(doc, {'div.pubtime'}) t_stamp = util.get_timestamp_from_string(t) category = 'hk' author = '' content = util.get_paragraphs_from_selector(doc, 'div.contentbox p') content = re.sub(r'READMORE\: .+\n', '', content) item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'ChinaDaily' item.task_no = self.BATCH_NUMBER for img in doc('div.contentbox img').items(): if img.attr('src') != '': media_u = 'http://www.chinadailyasia.com/' + re.sub(r'.+(?=attachement)', '', img.attr('src')) des = '' if img.attr('alt'): des = img.attr('alt') elif img.siblings('p'): des = img.siblings('p').text() media = self.NewsItem.MediaItem(media_url=media_u, type='image', description=des, created_at=item.fetched_at) item.media_list.append(media)
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = doc('h1').text() t = '' category = '' with self.time_cat_dict_lock: if task.url in self.time_cat_dict: t = self.time_cat_dict[task.url][0] category = self.time_cat_dict[task.url][1] t_stamp = util.get_timestamp_from_string( t) + time.localtime().tm_hour * 3600 + time.localtime( ).tm_min * 60 + time.localtime().tm_sec author = '' content = util.get_paragraphs_from_selector(doc, '#content p') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'HKEDB' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'.article-title'}) t = '' with self.url_time_dict_lock: t = self.url_time_dict[task.url] print t t_stamp = util.get_timestamp_from_string(t) + time.localtime().tm_hour*3600 + time.localtime().tm_min*60 + time.localtime().tm_sec category = '新聞發佈' author = '' content = util.get_paragraphs_from_selector(doc, '.article-content') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'NewCenturyForum' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = doc('h4').text() t = doc('#ContentPlaceHolder1_IndividualNewsList_lblTime_0').text() t_stamp = util.get_timestamp_from_string(t, time_format='%d/%m/%Y %H:%M') category = '' cat = doc('title').text() if cat_pattern.findall(cat): category = cat_pattern.findall(cat)[0] author = '' content = util.get_paragraphs_from_selector( doc, '#ContentPlaceHolder1_IndividualNewsList_lblContent_0') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'MetroFinance' item.task_no = self.BATCH_NUMBER item.id = '' if news_id_pattern.findall(task.url): item.id = (news_id_pattern.findall(task.url)[0])[2:]
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'h1'}) t_divs = doc('h1').siblings('div').items() t = '' t_stamp = 0 for _div in t_divs: if _div.css('color') == 'rgb(128, 128, 128)': t = _div.text() t_stamp = util.get_timestamp_from_string(t) + int( time.localtime().tm_sec) break category = u'电子' author = '' content = util.get_paragraphs_from_selector(doc, '.href_txt_blog2') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'HKGolden' item.task_no = self.BATCH_NUMBER
def task_filter(self, doc, url, doc_url): for reg_pattern in self.reg_patterns: if reg_pattern.match(url): if year_date_pattern.findall(url): t = year_date_pattern.findall(url)[0] t_stamp = util.get_timestamp_from_string(t) else: if doc('time') or doc('span.date'): t = util.get_time_string_from_selectors(doc, {'time', 'span.date'}) t_stamp = util.get_timestamp_from_string(t) else: return True if t_stamp >= util.get_day_stamp(self.OFFSET): return True return False return False
def normal_item_solver(self, item, task, response): response.encoding = 'utf-8' doc = self.get_doc(response) title = re.sub(u'香港金融管理局 - ', '', re.findall(title_pattern, response.text)[0]) t = re.sub(u'修訂日期: ', '', doc('#lastUpdate').text()) t_stamp = util.get_timestamp_from_string( t) + time.localtime().tm_hour * 3600 + time.localtime( ).tm_min * 60 + time.localtime().tm_sec category = doc('#content').text().split(' ')[0] author = '' content = doc('.item').text() item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'HKMA' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = doc('#News_Body_Title').text() t = '' if doc('#News_Body_Time'): t = date_pattern.findall(str(doc('#News_Body_Time')))[0] t_stamp = util.get_timestamp_from_string( t) + time.localtime().tm_hour * 3600 + time.localtime( ).tm_min * 60 + time.localtime().tm_sec category = doc('.Top_Index_A a:last-child').text() author = '' content = util.get_paragraphs_from_selector(doc, '#News_Body_Txt_A p') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'FMCOPRC' item.task_no = self.BATCH_NUMBER
def normal_item_solver(self, item, task, response): response.encoding = 'utf-8' doc = self.get_doc(response) title = util.get_filtered_title(doc, {'.article-header h1'}) t = doc('.date').text() if date_pattern.findall(t): t = date_pattern.findall(t)[0] t_stamp = util.get_timestamp_from_string(t) category = doc('.now-here').text() author = '' content = util.get_paragraphs_from_selector(doc, '.article p') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'JD Online' item.task_no = self.BATCH_NUMBER
def task_filter(self, doc, url, doc_url): wanted = False for reg_pattern in self.reg_patterns: if reg_pattern.match(url): t = full_article_pattern.findall(url)[0] if util.get_timestamp_from_string(t, '%m-%d-%Y') >= util.get_day_stamp(offset=self.OFFSET): wanted = True return wanted
def page_filter(self, doc, url): for reg_pattern in self.reg_patterns: if reg_pattern.match(url): t = util.get_time_string_from_selectors(doc, {'span.time'}) t_stamp = util.get_timestamp_from_string(t) if t_stamp >= util.get_day_stamp(self.OFFSET): return True return False return False
def page_filter(self, doc, url): for reg_pattern in self.reg_patterns: if reg_pattern.match(url): t = doc('div[itemprop="dateCreated"]').attr('datetime') t_stamp = util.get_timestamp_from_string(t) + 8 * 3600 if t_stamp >= util.get_day_stamp(self.OFFSET): return True return False return False
def page_filter(self, doc, url): for reg_pattern in self.reg_patterns: if reg_pattern.match(url): t = doc('.updated').text() t_stamp = util.get_timestamp_from_string(t) if t_stamp >= util.get_day_stamp(self.OFFSET): return True return False return False
def page_filter(self, doc, url): for reg_pattern in self.reg_patterns: if reg_pattern.match(url): meta_txt = doc('.metaStuff').text() t = re.findall(ur'[^\s]+月.+', meta_txt)[0] t_stamp = util.get_timestamp_from_string(t) if t_stamp >= util.get_day_stamp(self.OFFSET): return True return False return False
def page_filter(self, doc, url): for reg_pattern in self.reg_patterns: if reg_pattern.match(url): t = util.get_time_string_from_selectors( doc, {'#divnewsTextDate', '#part6808_ctl00_lblDetailDate'}) t_stamp = util.get_timestamp_from_string(t, '%d.%m.%Y %H:%M') if t_stamp >= util.get_day_stamp(self.OFFSET): return True return False return False
def page_filter(self, doc, url): wanted = False for reg_pattern in self.reg_patterns: if reg_pattern.match(url): if doc('div.post_time'): t = util.get_time_string_from_selectors(doc, {'div.post_time'}) t_stamp = util.get_timestamp_from_string(t) if t_stamp >= util.get_day_stamp(): return True return wanted
def normal_item_solver(self, item, task, response): doc = self.get_doc(response) title = util.get_filtered_title(doc, {'title'}, u' - BBC 中文网| BBC Zhongwen') t = '' t_stamp = 0 if doc('.story-body .mini-info-list .date').attr('data-datetime'): t = doc('.story-body .mini-info-list .date').attr('data-datetime') t_stamp = util.get_timestamp_from_string( t) + time.localtime().tm_sec elif doc('.timeline-status h3') and date_pattern.findall( doc('.timeline-status h3').text()): t = date_pattern.findall(doc('.timeline-status h3').text())[0] t_stamp = util.get_timestamp_from_string( t) + time.localtime().tm_sec elif doc('.story-body .date strong') and date_pattern.findall( doc('.story-body .date strong').text()): t = date_pattern.findall(doc('.story-body .date strong').text())[0] t_stamp = util.get_timestamp_from_string( t) + time.localtime().tm_sec category = doc('meta[property="article:section"]').attr('content') author = doc('span.byline__name').text() content = util.get_paragraphs_from_selector( doc, 'div[property="articleBody"] p') if content == '': content = util.get_paragraphs_from_selector( doc, '.article-wrapper p') if content == '': content = util.get_paragraphs_from_selector(doc, '.map-body p') item.raw = doc.text() item.title = title item.t = t item.t_stamp = t_stamp item.fetched_at = task.fetched_at item.category = category item.author = author item.content = content item.url = task.url item.source = 'BBC Chinese' item.task_no = self.BATCH_NUMBER
def task_filter(self, doc, url, doc_url): for reg_pattern in self.reg_patterns: if reg_pattern.match(url): if not reg_pattern.match(doc_url): return True t = util.get_time_string_from_selectors(doc, {'#article_date'}) t_stamp = util.get_timestamp_from_string(t) if t_stamp >= util.get_day_stamp(self.OFFSET): return True return False
def page_filter(self, doc, url): for reg_pattern in self.reg_patterns: if reg_pattern.match(url): t = doc('meta[property="article:published_time"]').attr( 'content') t_stamp = util.get_timestamp_from_string(t) if t_stamp >= util.get_day_stamp(self.OFFSET): return True return False return False