def test_css_scraper_reject_type(self): scraper = CSSScraper() request = Request('http://example.com/styles.css') response = Response(200, 'OK') response.body = Body() with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'styles.css') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response, link_type=LinkType.html) self.assertFalse(scrape_result)
def test_html_not_quite_charset(self): element_walker = ElementWalker( css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(self.get_html_parser(), element_walker) request = Request('http://example.com/') response = Response(200, '') response.body = Body() with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'videogame_top.htm') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertIn( 'http://example.com/copyright_2001_2006_rtype.gif', inline_urls ) self.assertIn( 'http://www.geocities.jp/gamehouse_grindcrusher/', linked_urls )
def test_xhtml_converter(self): url_table = URLTable() image_filename = os.path.join(self.temp_dir.name, 'image.png') tubes_filename = os.path.join(self.temp_dir.name, 'tubes.html') ferret_filename = os.path.join(self.temp_dir.name, 'ferret.jpg') url_table.add_many([ AddURLInfo('http://example.com/styles.css', None, None), AddURLInfo('http://example.com/image.png', None, None), AddURLInfo('http://example.com/cat.jpg', None, None), AddURLInfo('http://example.com/fox.jpg', None, None), AddURLInfo('http://example.com/ferret.jpg', None, None), AddURLInfo('http://example.com/tubes.html', None, None), ]) url_table.update_one('http://example.com/styles.css', status=Status.done.value, link_type='css') url_table.update_one('http://example.com/image.png', status=Status.done.value, filename=os.path.relpath(image_filename, self.temp_dir.name)) url_table.update_one('http://example.com/tubes.html', status=Status.done.value, filename=os.path.relpath(tubes_filename, self.temp_dir.name)) url_table.update_one('http://example.com/ferret.jpg', status=Status.done.value, filename=os.path.relpath(ferret_filename, self.temp_dir.name)) html_filename = os.path.join(self.temp_dir.name, 'index.html') new_html_filename = os.path.join(self.temp_dir.name, 'index.html-new') with open(html_filename, 'w') as out_file: out_file.write(XHTML_TEXT) for filename in [image_filename, tubes_filename, ferret_filename]: with open(filename, 'wb'): pass element_walker = ElementWalker(css_scraper=CSSScraper()) converter = HTMLConverter(self.get_html_parser(), element_walker, url_table) converter.convert(html_filename, new_html_filename, base_url='http://example.com/index.html') with open(new_html_filename, 'r') as in_file: converted_text = in_file.read() self.assertIn("url('image.png')", converted_text) self.assertIn("url('http://example.com/cat.jpg')", converted_text) self.assertIn('"tubes.html"', converted_text) self.assertIn('"http://example.com/lol.html"', converted_text) self.assertIn("url('http://example.com/fox.jpg')", converted_text) self.assertIn("url('ferret.jpg')", converted_text) self.assertIn("hello world!!", converted_text) self.assertIn("<hr/>", converted_text)
def test_html_soup(self): element_walker = ElementWalker(css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(HTMLParser(), element_walker) request = Request('http://example.com/') response = Response(200, '') response.body = Body() response.fields['Refresh'] = 'yes' with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'soup.html') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertEqual({'http://example.com/ABOUTM~1.JPG'}, inline_urls) self.assertEqual( { 'http://example.com/BLOG', 'http://example.com/web ring/Join.htm', }, linked_urls)
def test_html_scraper_links_base_href(self): element_walker = ElementWalker(css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(HTMLParser(), element_walker) request = Request('http://example.com/') response = Response(200, 'OK') response.body = Body() with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'basehref.html') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertEqual('utf-8', scrape_result.encoding) self.assertEqual( { 'http://cdn.example.com/stylesheet1.css', 'http://www.example.com/stylesheet2.css', 'http://example.com/a/stylesheet3.css', 'http://example.com/a/dir/image1.png', 'http://example.com/dir/image2.png', 'http://example.net/image3.png', 'http://example.com/dir/image4.png', }, inline_urls) self.assertEqual({'http://example.com/a/'}, linked_urls)
def test_scrape_import_urls(self): text = ''' @import url("fineprint.css") print; @import url("bluish.css") projection, tv; @import 'custom.css'; @import url("chrome://communicator/skin/"); @import "common.css" screen, projection; @import url('landscape.css') screen and (orientation:landscape); @import url(cool.css); @import warm.css; ''' + '@import url("' + ('a' * 1000) + '");' urls = set(CSSScraper().scrape_links(text)) self.assertEqual( { 'fineprint.css', 'bluish.css', 'chrome://communicator/skin/', 'landscape.css', 'cool.css', 'custom.css', 'common.css', 'warm.css', }, urls)
def test_xhtml_invalid(self): element_walker = ElementWalker( css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(self.get_html_parser(), element_walker) request = Request('http://example.com/') response = Response(200, '') response.body = Body() with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'xhtml_invalid.html') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertEqual( { 'http://example.com/image.png', 'http://example.com/script.js', }, inline_urls ) self.assertEqual( { 'http://example.com/link' }, linked_urls )
def test_html_krokozyabry(self): element_walker = ElementWalker( css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(self.get_html_parser(), element_walker) request = Request('http://example.com/') response = Response(200, '') response.body = Body() response.fields['content-type'] = 'text/html; charset=KOI8-R' with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'krokozyabry.html') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertEqual('koi8-r', scrape_result.encoding) self.assertEqual( set(), inline_urls ) self.assertEqual( {'http://example.com/Кракозябры'}, linked_urls )
def test_rss_as_html(self): element_walker = ElementWalker(css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(HTMLParser(), element_walker) request = Request('http://example.com/') response = Response(200, '') response.body = Body() response.fields['content-type'] = 'application/rss+xml' with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'rss.xml') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) self.assertTrue(scrape_result) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertFalse(inline_urls) self.assertEqual( { 'http://www.someexamplerssdomain.com/main.html', 'http://www.wikipedia.org/' }, linked_urls)
def test_css_scraper_mojibake(self): scraper = CSSScraper() request = Request('http://example.com/styles.css') response = Response(200, 'OK') response.body = Body() with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'mojibake.css') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertEqual({ 'http://example.com/文字化け.png', }, inline_urls) self.assertFalse(linked_urls)
def test_css_scraper_mojibake(self): scraper = CSSScraper() request = Request('http://example.com/styles.css') response = Response(200, 'OK') response.body = Body() with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'mojibake.css') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertEqual({ 'http://example.com/文字化け.png', }, inline_urls ) self.assertFalse(linked_urls)
def test_bad_xml(self): element_walker = ElementWalker(css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(HTMLParser(), element_walker) request = Request('http://example.com/') response = Response(200, 'OK') response.body = Body() with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'foxstripcomics_bad_xml.html') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) # No crash scraper.scrape(request, response, link_type=LinkType.html)
def convert_by_record(self, url_record): '''Convert using given URL Record.''' filename = url_record.filename if not os.path.exists(filename): return if url_record.link_type: if url_record.link_type not in ('css', 'html'): return else: link_type = url_record.link_type else: with open(filename, 'rb') as in_file: if HTMLScraper.is_supported(file=in_file, url_info=url_record.url_info): link_type = 'html' elif CSSScraper.is_supported(file=in_file, url_info=url_record.url_info): link_type = 'css' else: link_type = None _logger.info( __(_('Converting links in file ‘{filename}’ (type={type}).'), filename=filename, type=link_type)) if self._backup_enabled: shutil.copy2(filename, filename + '.orig') temp_filename = filename + '-new' if link_type == 'css': self._css_converter.convert(filename, temp_filename, base_url=url_record.url) elif link_type == 'html': self._html_converter.convert(filename, temp_filename, base_url=url_record.url) else: raise Exception('Unknown link type.') os.remove(filename) os.rename(temp_filename, filename)
def test_html_encoding_lxml_name_mismatch(self): '''It should accept encoding names with underscore.''' element_walker = ElementWalker(css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(HTMLParser(), element_walker) request = Request('http://example.com/') response = Response(200, '') response.body = Body() response.fields['content-type'] = 'text/html; charset=EUC_KR' with wpull.util.reset_file_offset(response.body): response.body.write('힖'.encode('euc_kr')) scrape_info = scraper.scrape(request, response) self.assertTrue(scrape_info) self.assertEqual('euc_kr', scrape_info['encoding'])
def test_html_garbage(self): element_walker = ElementWalker(css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(HTMLParser(), element_walker) request = Request('http://example.com/') response = Response(200, '') response.body = Body() response.fields['content-type'] = 'text/html' with wpull.util.reset_file_offset(response.body): response.body.write( b'\x01\x00\x01\x00l~Z\xff\x0f`y\x80\x00p<\x7f' b'\xffndo\xff\xff-\x83{d\xec</\xfe\x80\x00\xb4Bo' b'\x7f\xff\xff\xffV\xc1\xff\x7f\xff7') scrape_info = scraper.scrape(request, response) self.assertTrue(scrape_info)
def test_html_scraper_reject_type(self): element_walker = ElementWalker(css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(HTMLParser(), element_walker) request = Request('http://example.com/') response = Response(200, 'OK') response.body = Body() with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'many_urls.html') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response, link_type=LinkType.css) self.assertFalse(scrape_result)
def convert_by_record(self, url_record): '''Convert using given URL Record.''' filename = url_record.filename if not os.path.exists(filename): return if url_record.link_type: if url_record.link_type not in ('css', 'html'): return else: link_type = url_record.link_type else: with open(filename, 'rb') as in_file: if HTMLScraper.is_supported( file=in_file, url_info=url_record.url_info): link_type = 'html' elif CSSScraper.is_supported( file=in_file, url_info=url_record.url_info): link_type = 'css' else: link_type = None _logger.info(__( _('Converting links in file ‘{filename}’ (type={type}).'), filename=filename, type=link_type )) if self._backup_enabled: shutil.copy2(filename, filename + '.orig') temp_filename = filename + '-new' if link_type == 'css': self._css_converter.convert( filename, temp_filename, base_url=url_record.url) elif link_type == 'html': self._html_converter.convert( filename, temp_filename, base_url=url_record.url) else: raise Exception('Unknown link type.') os.remove(filename) os.rename(temp_filename, filename)
def test_html_wrong_charset(self): element_walker = ElementWalker( css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(self.get_html_parser(), element_walker) request = Request('http://example.com/') response = Response(200, '') response.body = Body() with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'kcna.html') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertEqual('utf-16-le', scrape_result.encoding) self.assertEqual( { 'http://example.com/utm/__utm.js', 'http://example.com/Knewskage.gif', 'http://example.com/Lline.gif', 'http://example.com/Sline.gif', 'http://example.com/korean01.gif', 'http://example.com/korean02.gif', 'http://example.com/english01.gif', 'http://example.com/english02.gif', 'http://example.com/Tongsinkage.gif', 'http://example.com/Knewskage.gif', }, inline_urls ) self.assertEqual( { 'http://example.com/index-k.htm', 'http://example.com/index-e.htm', }, linked_urls )
def test_html_serious_bad_encoding(self): element_walker = ElementWalker(css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(HTMLParser(), element_walker, encoding_override='utf8') request = Request('http://example.com/') response = Response(200, '') response.body = Body() response.fields['content-type'] = 'text/html; charset=utf8' with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'xkcd_1_evil.html') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_info = scraper.scrape(request, response) self.assertTrue(scrape_info)
def test_html_scraper_links(self): element_walker = ElementWalker(css_scraper=CSSScraper(), javascript_scraper=JavaScriptScraper()) scraper = HTMLScraper(HTMLParser(), element_walker) request = Request('http://example.com/') response = Response(200, 'OK') response.body = Body() response.fields['Refresh'] = '3; url=header_refresh.html' with wpull.util.reset_file_offset(response.body): html_file_path = os.path.join(ROOT_PATH, 'testing', 'samples', 'many_urls.html') with open(html_file_path, 'rb') as in_file: shutil.copyfileobj(in_file, response.body) scrape_result = scraper.scrape(request, response) inline_urls = scrape_result.inline_links linked_urls = scrape_result.linked_links self.assertEqual('utf-8', scrape_result.encoding) self.assertEqual( frozenset({ 'http://example.com/style_import_url.css', 'http://example.com/style_import_quote_url.css', 'http://example.com/style_single_quote_import.css', 'http://example.com/style_double_quote_import.css', 'http://example.com/bg.png', 'http://example.com/link_href.css', 'http://example.com/script.js', 'http://example.com/body_background.png', 'http://example.com/images/table_background.png', 'http://example.com/images/td_background.png', 'http://example.com/images/th_background.png', 'http://example.com/style_url1.png', 'http://example.com/style_url2.png', 'http://example.com/applet/', # returned by lxml 'http://example.com/applet/applet_code.class', 'http://example.com/applet/applet_src.class', 'http://example.com/bgsound.mid', 'http://example.com/audio_src.wav', 'http://example.com/audio_poster.jpeg', 'http://example.net/source_src.wav', 'http://example.com/video_src.webm', 'http://example.com/video_poster.jpeg', 'http://example.net/track_src.vtt', 'http://example.net/source_src.webm', 'http://example.com/embed_src.mov', 'http://example.com/fig_src.png', 'http://example.com/frame_src.html', 'http://example.com/iframe_src.html', 'http://example.com/img_href.png', 'http://example.com/img_lowsrc.png', 'http://example.com/img_src.png', 'http://example.com/img_data.png', 'http://example.com/img_srcset_1.jpeg', 'http://example.com/img_srcset_2.jpeg', 'http://example.com/img_srcset_3.jpeg', 'http://example.com/input_src.png', 'http://example.com/layer_src.png', 'http://example.com/object/', # returned by lxml 'http://example.com/object/object_data.swf', 'http://example.com/object/object_archive.dat', 'mailto:internet', 'object_not_url_codebase', 'http://example.com/param_ref_value.php', 'http://example.com/overlay_src.html', 'http://example.com/script_variable.png', }), inline_urls) self.assertEqual( frozenset({ 'http://example.com/og_image.png', 'http://example.com/og_url.html', 'http://example.com/og_audio.mp3', 'http://example.com/og_video.webm', 'http://example.com/twitter_image.png', 'http://example.com/twitter_image0.png', 'http://example.com/twitter_image1.png', 'http://example.com/twitter_image2.png', 'http://example.com/twitter_image3.png', 'http://example.com/twitter_player.html', 'http://example.com/twitter_stream.mp4', 'http://example.net/soup.html', 'http://example.com/a_href.html', 'http://example.com/area_href.html', 'http://example.com/frame_src.html', 'http://example.com/embed_href.html', 'http://example.com/embed_src.mov', 'http://example.com/form_action.html', 'http://example.com/iframe_src.html', 'http://example.com/layer_src.png', 'http://example.com/overlay_src.html', 'ftp://ftp.protocol.invalid/', 'mailto:[email protected]', 'http://a-double-slash.example', 'http://example.com/header_refresh.html', 'https://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8080/ipv6', 'http://example.com/document_write.html', 'http://example.com/http_document_write.html', 'http://example.com/http_document_write2.html', 'http://example.com/http document write.html', 'http://example.com/script_variable.html', 'http://example.com/http_script_variable.html', 'https://example.com/https_script_variable.html', 'ftp://example.com/ftp_script_variable.html', 'http://example.com/end_dir_script_variable/', 'http://example.com/start_dir_script_variable', 'http://example.com/../relative_dir_script_variable' if sys.version_info < (3, 5) else 'http://example.com/relative_dir_script_variable', 'http://example.com/script_json.html', 'http://example.com/http_script_json.html?a=b', 'http://example.com/a_javascript_link.html', 'http://example.com/a_onclick_link.html', }), linked_urls) for url in inline_urls | linked_urls: self.assertIsInstance(url, str)