def set_url():  # 通过 set_url() 方法设置 robots.txt 的链接。
    rp = RobotFileParser()
    rp.set_url('http://www.bilibili.com/robots.txt')
    rp.read()
    # 使用 can_fetch() 方法判断网页是否可以被抓取。
    print(rp.can_fetch('*', 'http://www.bilibili.com/vide/BV15J411T7WQ'))
    print(
        rp.can_fetch(
            '*',
            'http://www.bilibili.com/search?q=python&page=1&type=collections'))
def parse():  # 使用 parse() 方法执行读取和分析
    rp = RobotFileParser()
    rp.parse(
        urlopen('http://www.bilibili.com/robots.txt').read().decode(
            'utf-8').split('\n'))
    print(rp.can_fetch('*', 'http://www.bilibili.com/vide/BV15J411T7WQ'))
    print(
        rp.can_fetch(
            '*',
            'http://www.bilibili.com/search?q=python&page=1&type=collections'))
예제 #3
0
    def test_robots_txt(self):
        parser = RobotFileParser(self.live_server_url + '/robots.txt')
        parser.read()

        url = self.live_server_url + '/index.html'
        for robot in self.robots:
            self.assertTrue(parser.can_fetch(robot, url))

        url = self.live_server_url + '/admin/'
        for robot in self.robots:
            self.assertFalse(parser.can_fetch(robot, url))
예제 #4
0
def filter_googlebot(entries):
    """ Given a bunch of robots.txt entries, figure out if googlebot is allowed
    but other random bots are banned. yields tuples of (entry, reppy.Robots) objects
    that match this condition"""
    for entry in entries:
        if entry.status_code != 200:
            continue

        parser = RobotFileParser(entry.url)
        parser.parse(entry.body.split("\n"))

        if parser.can_fetch("GoogleBot", "/") and not parser.can_fetch("BensCoolBot", "/"):
            yield entry, parser
예제 #5
0
def filter_googlebot(entries):
    """ Given a bunch of robots.txt entries, figure out if googlebot is allowed
    but other random bots are banned. yields tuples of (entry, reppy.Robots) objects
    that match this condition"""
    for entry in entries:
        if entry.status_code != 200:
            continue

        parser = RobotFileParser(entry.url)
        parser.parse(entry.body.split("\n"))

        if parser.can_fetch("GoogleBot",
                            "/") and not parser.can_fetch("BensCoolBot", "/"):
            yield entry, parser
예제 #6
0
def allowed_to_crawl(url, host_url, scheme):
    '''
    url: the full url, string
    host_url: the domain url, string (eg. wikipedia.org)
    scheme: the communication protocol, string (eg. https)
    '''

    # if host URL is google, assume we are allowed to crawl
    if host_url == 'google.com':
        return True

    # if it is not a link, return False
    if host_url == '' or scheme == '':
        return False

    try:
        # get the robots.txt
        rp = RobotFileParser()
        rp.set_url(scheme + "://" + host_url + "/robots.txt")
        rp.read()

        return rp.can_fetch("*", url)

    except:
        pass

    return True
예제 #7
0
def robotExclusion(link):
    # print("Robot exclusion.....")
    rp=RobotFileParser();
    rp.set_url(urljoin(link, '/robot.txt'))
    rp.read()
    # print(rp.can_fetch("*", link))
    return rp.can_fetch("*", link)
예제 #8
0
파일: website.py 프로젝트: rebryk/spbau-ir
class Website:
    def __init__(self, scheme: str, hostname: str):
        self.scheme = scheme
        self.hostname = hostname
        self.last_time = 0

        self._urls = set()
        self._queue = deque()

        # parse robots.txt
        self._robot_parser = RobotFileParser()
        self._robot_parser.set_url("{}://{}/robots.txt".format(
            scheme, hostname))
        self._robot_parser.read()

    def can_fetch(self, user_agent: str, url: str) -> bool:
        return self._robot_parser.can_fetch(user_agent, url)

    def add_url(self, url: str, depth: int = 0):
        if url not in self._urls:
            self._urls.add(url)
            self._queue.append((url, depth))

    def get_url(self) -> (str, int):
        return self._queue.popleft()

    def crawl_delay(self, user_agent: str) -> int:
        delay = self._robot_parser.crawl_delay(user_agent)
        return delay * 300 if delay is not None else None

    def is_empty(self) -> bool:
        return len(self._queue) == 0
예제 #9
0
def can_fetch(url, robots_url, useragent):
    from urllib.robotparser import RobotFileParser

    parser = RobotFileParser()
    parser.set_url(robots_url)
    parser.read()
    return parser.can_fetch(useragent, url)
예제 #10
0
파일: crawler.py 프로젝트: axce1/crawler
 def http_open(self, request):
     url = request.get_full_url()
     host = urlsplit(url)[1]
     robots_url = urlunsplit(('http', host, '/robots.txt', '', ''))
     rp = RobotFileParser(robots_url)
     rp.read()
     if not rp.can_fetch(self.agentname, url):
         raise RuntimeError('Forbidden by robots.txt')
     return urllib2.HTTPHandler.http_open(self, request)
예제 #11
0
class Crawler(Thread):
	def __init__(self, scheduler, id):
		Thread.__init__(self)
		self.scheduler = scheduler
		self.robot_parser = RobotFileParser()
		self.running = True
		self.id = id

	def run(self):
		global RUNNING
		print(self.id,"running!")
		while RUNNING and self.running:
			url = self.scheduler.next()
			while url is None:
				sleep(2)
				url = self.scheduler.next()
				
			try:
				''' Check robots.txt '''
				parsed_url = urlparse(url)
				robots_url = parsed_url.scheme + "://" + parsed_url.netloc + "/robots.txt"
				robots_response = urlopen(robots_url, timeout=2)
				robots_file = robots_response.read()
				self.robot_parser.parse(robots_file.decode('utf-8').splitlines())
				if not self.robot_parser.can_fetch("*", url):
					#print(self.id,"is not allowed to fetch",url)
					continue
			
				''' Fetch the url '''
				print(self.id,"->",url)
				response = urlopen(url, timeout=2)
				data = response.read().decode('utf-8', errors='ignore')
				data = data.split('href="')
				del data[0]
				data = sorted(data, key=len)
			except (HTTPError,TimeoutError,ConnectionResetError,UnicodeDecodeError,BaseException):
				data = []
			except URLError as e:
				print(url,"->",str(e))
				data = []
				
			for d in data:
				d = d[:d.find('"')]
				if d.endswith('/'):
					d = d[:-1]
				''' Format relative URLs '''
				if not d.startswith("http://"):
					path = parsed_url.path
					url = path[:path.rfind("/")]
					while d.startswith("../"):
						url = url[:url.rfind("/")]
						d = d[3:]
					self.scheduler.add(parsed_url.scheme + \
										"://" + parsed_url.netloc + \
										url + "/" + d)
				else:
					self.scheduler.add(d)
예제 #12
0
 def get_robot_txt(cls, url):
     try:
         rp = RobotFileParser()
         parsed_url = urlparse(url)
         robots_url = "{0.scheme}://{0.netloc}/robot.txt".format(parsed_url)
         rp.set_url(robots_url)
         rp.read()
         return rp.can_fetch("*", url)
     except Exception as e:
         raise Exception(e.args[0])
예제 #13
0
 def check_robots_txt(self, parsed_link):
     """
     Checks the site's robots.txt file to make sure our user agent is allowed to visit that url.
     :param parsed_link:
     :return: boolean . True if we're allowed to visit (or there's no robots.txt)
     """
     rp = RobotFileParser()
     rp.set_url(urljoin(parsed_link.geturl(), '/robots.txt'))
     rp.read()
     return rp.can_fetch(self.user_agent, parsed_link.geturl())
예제 #14
0
class Driver:
    def __init__(self,
                 root_url: str,
                 header: str,
                 access_delay: int = 3,
                 cookies: dict = None,
                 logger=None):
        self.logger = logger
        self.root_url = root_url
        self.cookies = cookies
        self.header = header
        self.access_delay = access_delay
        self.now_content = None
        self.robots = None
        self.load_robots_txt()

    def load_robots_txt(self):
        self.robots = RobotFileParser()
        self.robots.set_url(self.root_url + '/robots.txt')
        self.robots.read()

    def get(self, path):
        try:
            sleep(self.access_delay)
            url = f'{self.root_url}/{path}'
            if self.robots.can_fetch("*", url):
                res = requests.get(url,
                                   headers=self.header,
                                   cookies=self.cookies)
                if self.logger is not None:
                    self.logger.debug(f"Access to {url}.")
                self.now_content = BeautifulSoup(res.text, 'html.parser')
            else:
                if self.logger is not None:
                    self.logger.warning(
                        f"Access to this url is prohibited by robots.txt.\n<*>[URL={url}]"
                    )
        except Exception as e:
            if self.logger is not None:
                self.logger.warning(e)

    def find_element_by_class_name(self, name):
        return self.now_content.select('.' + name)[0]

    def find_elements_by_class_name(self, name):
        return self.now_content.select('.' + name)

    def find_element_by_id(self, name):
        return self.now_content.select('#' + name)[0]

    def find_elements_by_id(self, name):
        return self.now_content.select('#' + name)

    def find_element_by_tag(self, name):
        return self.now_content.find_all(name)
예제 #15
0
def check_robot_txt(url):
    try:
        # proxy_handler = ProxyHandler(
        #     {
        #         'http' : 'http://127.0.0.1:63231',
        #         'https' : 'http://127.0.0.1:63231'
        #     }
        # )
        #
        # opener = build_opener(proxy_handler)

        rp = RobotFileParser(url + '/robots.txt')
        rp.read()
        #  rp.parse(urlopen(url.read().decode('utf-8').split('\n')))
        print(rp.can_fetch('*', url + '/stock/'))

        # print("***************1")
        # response = opener.open('https://www.baidu.com')
        # print("***************2")
        # print(response.read().decode('utf-8'))
        # headers = {'User-Agent': 'Mozilla / 5.0(Macintosh;Intel Mac OS X) AppleWebKit / 537.36(KHTML, like Gecko)',
        #            'Host': 'httpbin.org'}
        # dict = {'name':'Tobi'}
        #
        # username= '******'
        # password= '******'
        #
        # url = 'http://localhost:5000'
        #
        # p = urllib.request.HTTPPasswordMgrWithDefaultRealm()
        # p.add_password(None,url,username,password)
        # auth_handler = urllib.request.HTTPBasicAuthHandler(p)
        # opener = urllib.request.build_opener(auth_handler)
        #
        # response = opener.open(url)
        # html = response.read().decode('utf-8')
        # print(html)
        # request = urllib.request.Request('http://httpbin.org/post', headers=headers, data=bytes(urllib.parse.urlencode(dict),encoding='utf8'), method='POST')
        # print("***************")
        # response = urllib.request.urlopen(request, context=ssl._create_unverified_context())
        # if  response.status != 200:
        #     print('status : ' + response.status)
        # print("*******result********")
        # print(response.read().decode('utf-8'))
        # print(type(response))
        # print(response.status)
        # print(response.getheaders())
        # print(response.getheader('Server'))

    except URLError as e:
        # if  isinstance(e.reason, socket.timeout):
        #     print('time out!!!')

        print(e.reason)
    return
예제 #16
0
 def can_fetch(self, url):
     parsed_url = urlparse(url)
     # Fetching and parsing the robots.txt file can be expensive in it-self.
     # Let's cache the RobotFileParser instances, one per host, on the
     # scraper itself to reuse them for consecutive queries.
     rfp = self.robot_file_parsers.get(parsed_url.hostname)
     if rfp is None:
         rfp = RobotFileParser(self.get_robot_url(url))
         rfp.read()
         self.robot_file_parsers[parsed_url.hostname] = rfp
     return rfp.can_fetch(self.user_agent, parsed_url.path)
예제 #17
0
def robots_get(url, *args, **kwargs):
    u = urlparse(url)
    robot_url = '{scm}://{loc}/robots.txt'.format(scm=u.scheme, loc=u.netloc)
    robot = RobotFileParser(robot_url)
    robot.read()
    ua = kwargs.get('headers', dict()).get('User-Agent', '*')
    if not robot.can_fetch(ua, url):
        return 'Not Allowed By robots.txt'
    delay = robot.crawl_delay(ua)
    if delay:
        time.sleep(delay)
    return requests.get(url, *args, **kwargs)
def check_robots(base_url, ext_url):
    '''
    Check the robots.txt
    Prints note if base_url + ext_url is legal for crawling
    '''
    bot = RobotFileParser(base_url + '/robots.txt')
    bot.read()
    if bot.can_fetch('*', base_url + ext_url):
        print('robots.txt permits parsing')
    else:
        print('Do not parse')
    return bot
예제 #19
0
    def is_scraping_allowed(self):
        """
        Megnezi, hogy a robots.txt nem tiltja-e a scrapelest. Nem igazan teljes
        az ellenorzes, mert csak az all job url-t vizsgalja.

        :return:
        """
        robot_parser = RobotFileParser()
        robots_url = urljoin(self.base_url, 'robots.txt')
        robot_parser.set_url(robots_url)
        robot_parser.read()
        return robot_parser.can_fetch('*', urljoin(
                self.base_url, self.all_job_url))
예제 #20
0
 def _robot_parser(self, txt, url):
     """Parses robots.txt with user-agent="*".
     :param txt: robots.txt to parse
     :param url: URL to check
     :returns: if url is allowed in robots.txt
     :rtype: bool
     """
     parser = RobotFileParser()
     if txt:
         parser.parse(txt.decode("ascii", "replace").splitlines())
         return parser.can_fetch("*", url)
     else:
         return True
예제 #21
0
class Exclusion(object):
    def __init__(self):
        self.robot_cache = {}
        self.rp = RobotFileParser()

    # Broken? disallows anything in the robots.txt.
    # Even if it is marked with Allow:
    def test_url(self, url):
        self.rp = RobotFileParser()
        robot_url = uu.domain_name(url) + '/robots.txt'
        self.rp.set_url(robot_url)
        self.rp.read()
        return self.rp.can_fetch('*', url)
예제 #22
0
def check_delay(user_agent, url):
	robots_url = to_robots(url)

	# Fetch the delay for all programs, ond this one in particular.
	general_delay, have_general = get_delay(GENERAL_AGENT, robots_url)
	agent_delay, have_agent = get_delay(user_agent, robots_url)

	# The next part is straightforward:
	# Check the general rules, and then check the program's rules,
	# if there are rules.
	robots_parser = RobotFileParser(robots_url)
	robots_parser.read()
	if (have_general and not robots_parser.can_fetch("*", url)):
		return None
	if (have_agent and not robots_parser.can_fetch(user_agent, url)):
		return None

	# Use the larger delay.
	if general_delay < agent_delay:
		return agent_delay
	else:
		return general_delay
예제 #23
0
파일: robot.py 프로젝트: mattgu74/ic05
	def can_fetch(self, url):
		parse = urllib.parse.urlparse(url)
		hostname = parse.hostname
		try:
			robot = self.robot[hostname]
		except Exception:
			roboturl = urllib.parse.urlunparse((parse.scheme,parse.netloc,"robots.txt","","",""))
			robot = RobotFileParser(roboturl)
			try:
				robot.read()
			except Exception:
				robot = _RobotAllowAll()
			self.__robots[hostname] = robot
		return robot.can_fetch("*", url)
예제 #24
0
def can_fetch_url(robots_url, site_url, useragent="*"):
    """
    Using robots.txt found at robots_url, decides if useragent can fetch
    site url

    :param robots_url: robots.txt url
    :param site_url: to be fetched url
    :param useragent: useragent
    :return: True, if fetching is allowed
    """
    rfp = RobotFileParser()
    rfp.set_url(robots_url)
    rfp.read()
    return rfp.can_fetch(useragent=useragent, url=site_url)
예제 #25
0
class RobotsTxt:
    def __init__(self, base_url):
        try:
            url = urljoin(base_url, 'robots.txt')
            self.rp = RobotFileParser()
            self.set_url(url)
            self.rp.read()
        except:
            self.rp = None

    def canFetch(self, url):
        if self.rp is None:
            return True
        else:
            return self.rp.can_fetch('*', url)
예제 #26
0
 def get_robots_text(self,url):
     """get_robots_txt
     url: robots.txt を確認するサイトURL
     """
     try:
         rp = RobotFileParser()
         # robots の url 取得
         parsed_url = urlparse(url)
         robots_url = "{0.scheme}://{0.netloc}/robots.txt".format(parsed_url)
         # robots.txt 取得
         rp.set_url(robots_url)
         rp.read()
         # 取得していいか確認
         return rp.can_fetch("*",url)
     except:
         return False
예제 #27
0
class PythonRobotParser(RobotParser):
    def __init__(self, robotstxt_body, spider):
        from urllib.robotparser import RobotFileParser
        self.spider = spider
        robotstxt_body = decode_robotstxt(robotstxt_body, spider, to_native_str_type=True)
        self.rp = RobotFileParser()
        self.rp.parse(robotstxt_body.splitlines())

    @classmethod
    def from_crawler(cls, crawler, robotstxt_body):
        spider = None if not crawler else crawler.spider
        o = cls(robotstxt_body, spider)
        return o

    def allowed(self, url, user_agent):
        user_agent = to_unicode(user_agent)
        url = to_unicode(url)
        return self.rp.can_fetch(user_agent, url)
예제 #28
0
파일: arana.py 프로젝트: eksop/arana
def parse_robotstxt(url):
    """
    Parse robots.txt
    """

    parsed = urlsplit(url)

    if parsed.scheme not in ['http', 'https']:
        return False

    if parsed.netloc == '':
        return False

    robot = RobotFileParser()
    robot.set_url(parsed.scheme + "://" + parsed.netloc + "/robots.txt")
    robot.read()

    return dict(
        allowed=robot.can_fetch('*', url),
        rate=robot.request_rate('*'),
        delay=robot.crawl_delay('*'),
    )
예제 #29
0
    def can_fetch(self, url):
        parsed = urlparse(url)

        if parsed.netloc in self.robots_txt_dict:
            if self.robots_txt_dict[
                    parsed.
                    netloc] == 0:  # 0 means that a robots.txt was not able to be gotten from that site so it assumes its all able to be crawled
                return True
            return self.robots_txt_dict[parsed.netloc].can_fetch('*', url)

        try:
            rp = RobotFileParser()
            rp.set_url('{}://{}/robots.txt'.format(parsed.scheme,
                                                   parsed.netloc))
            rp.read()
            self.robots_txt_dict[parsed.netloc] = rp

            return rp.can_fetch('*', url)
        except:
            print('error getting robots.txt')
            self.robots_txt_dict[parsed.netloc] = 0
            return True
예제 #30
0
def scrape_urls(URL, destfile):
    ''' This function will scrape all of the URLs
        from a given site and store them in a file.
        This function accepts two arguments, the
        target URL and a file name of your choosing
        to store the links in, both as strings.'''

    links = []

    #open the robots.txt file, which contains scraping permissions
    robotparser = RobotFileParser()
    robotparser.set_url(parse.urljoin(URL_base, 'robots.txt'))
    robotparser.read()

    #determine whether you're allowed to scraoe
    if robotparser.can_fetch(useragent='Python-urllib/3.6', url=URL):
        #open the page as a file object and save its contents
        with request.urlopen(URL) as response:
            content = response.read()

        #make a beautifulSoup object from the content, to be read as HTML
        soup = BeautifulSoup(content, 'html.parser')

        #'a' is a link in HTML
        for a in soup.find_all('a'):
            #below is necessary to make sure your link has
            #a web address associated with it
            if a.has_attr('href'):
                links.append(a['href'])

        #only proceed if the "links" list was populated in previous step
        if links:
            #save all of the link URLS in an external file
            with open(destfile, 'w') as outfile:
                for link in links:
                    #newline character makes the links separate!
                    outfile.write(link + '\n')
예제 #31
0
class Crawler():
	
	# Variables
	parserobots = False
	output 	= None
	report 	= False

	config 	= None
	domain	= ""

	exclude = []
	skipext = []
	drop    = []
	
	debug	= False

	tocrawl = set([])
	crawled = set([])
	excluded = set([])
	# TODO also search for window.location={.*?}
	linkregex = re.compile(b'<a href=[\'|"](.*?)[\'"].*?>')

	rp = None
	response_code={}
	nb_url=1 # Number of url.
	nb_rp=0 # Number of url blocked by the robots.txt
	nb_exclude=0 # Number of url excluded by extension or word
	
	output_file = None

	target_domain = ""

	def __init__(self, parserobots=False, output=None, report=False ,domain="", exclude=[], skipext=[], drop=[], debug=False):
		self.parserobots = parserobots
		self.output 	= output
		self.report 	= report
		self.domain 	= domain
		self.exclude 	= exclude
		self.skipext 	= skipext
		self.drop		= drop
		self.debug		= debug

		if self.debug:
			logging.basicConfig(level=logging.DEBUG)

		self.tocrawl = set([domain])

		try:
			self.target_domain = urlparse(domain)[1]
		except:
			raise ("Invalid domain")


		if self.output:
			try:
				self.output_file = open(self.output, 'w')
			except:
				logging.debug ("Output file not available.")
				exit(255)

	def run(self):
		print (config.xml_header, file=self.output_file)

		logging.debug("Start the crawling process")
		self.__crawling()
		logging.debug("Crawling as reach the end of all found link")

		print (config.xml_footer, file=self.output_file)


	def __crawling(self):
		crawling = self.tocrawl.pop()

		url = urlparse(crawling)
		self.crawled.add(crawling)
		request = Request(crawling, headers={"User-Agent":config.crawler_user_agent})
		
		try:
			response = urlopen(request)
		except Exception as e:
			if hasattr(e,'code'):
				if e.code in self.response_code:
					self.response_code[e.code]+=1
				else:
					self.response_code[e.code]=1
			logging.debug ("{1} ==> {0}".format(e, crawling))
			return self.__continue_crawling()

		# Read the response
		try:
			msg = response.read()
			if response.getcode() in self.response_code:
				self.response_code[response.getcode()]+=1
			else:
				self.response_code[response.getcode()]=1
			response.close()

			# Get the last modify date
			if 'last-modified' in response.headers:
				date = response.headers['Last-Modified']
			else:
				date = response.headers['Date']

			date = datetime.strptime(date, '%a, %d %b %Y %H:%M:%S %Z')

		except Exception as e:
			logging.debug ("{1} ===> {0}".format(e, crawling))
			return self.__continue_crawling()


		print ("<url><loc>"+url.geturl()+"</loc><lastmod>"+date.strftime('%Y-%m-%dT%H:%M:%S')+"</lastmod></url>", file=self.output_file)
		if self.output_file:
			self.output_file.flush()

		# Found links
		links = self.linkregex.findall(msg)
		for link in links:
			link = link.decode("utf-8")
			#logging.debug("Found : {0}".format(link))		
			if link.startswith('/'):
				link = 'http://' + url[1] + link
			elif link.startswith('#'):
				link = 'http://' + url[1] + url[2] + link
			elif not link.startswith('http'):
				link = 'http://' + url[1] + '/' + link
			
			# Remove the anchor part if needed
			if "#" in link:
				link = link[:link.index('#')]

			# Drop attributes if needed
			for toDrop in self.drop:
				link=re.sub(toDrop,'',link)

			# Parse the url to get domain and file extension
			parsed_link = urlparse(link)
			domain_link = parsed_link.netloc
			target_extension = os.path.splitext(parsed_link.path)[1][1:]

			if (link in self.crawled):
				continue
			if (link in self.tocrawl):
				continue
			if (link in self.excluded):
				continue
			if (domain_link != self.target_domain):
				continue
			if ("javascript" in link):
				continue
			
			# Count one more URL
			self.nb_url+=1

			# Check if the navigation is allowed by the robots.txt
			if (not self.can_fetch(link)):
				self.exclude_link(link)
				self.nb_rp+=1
				continue

			# Check if the current file extension is allowed or not.
			if (target_extension in self.skipext):
				self.exclude_link(link)
				self.nb_exclude+=1
				continue

			# Check if the current url doesn't contain an excluded word
			if (not self.exclude_url(link)):
				self.exclude_link(link)
				self.nb_exclude+=1
				continue

			self.tocrawl.add(link)

		return self.__continue_crawling()

	def __continue_crawling(self):
		if self.tocrawl:
			self.__crawling()

	def exclude_link(self,link):
		if link not in self.excluded:
			self.excluded.add(link)

	def checkRobots(self):
		if self.domain[len(self.domain)-1] != "/":
			self.domain += "/"
		request = Request(self.domain+"robots.txt", headers={"User-Agent":config.crawler_user_agent})
		self.rp = RobotFileParser()
		self.rp.set_url(self.domain+"robots.txt")
		self.rp.read()

	def can_fetch(self, link):
		try:
			if self.parserobots:
				if self.rp.can_fetch("*", link):
					return True
				else:
					logging.debug ("Crawling of {0} disabled by robots.txt".format(link))
					return False

			if not self.parserobots:
				return True

			return True
		except:
			# On error continue!
			logging.debug ("Error during parsing robots.txt")
			return True

	def exclude_url(self, link):
		for ex in self.exclude:
			if ex in link:
				return False
		return True

	def make_report(self):
		print ("Number of found URL : {0}".format(self.nb_url))
		print ("Number of link crawled : {0}".format(len(self.crawled)))
		if self.parserobots:
			print ("Number of link block by robots.txt : {0}".format(self.nb_rp))
		if self.skipext or self.exclude:
			print ("Number of link exclude : {0}".format(self.nb_exclude))

		for code in self.response_code:
			print ("Nb Code HTTP {0} : {1}".format(code, self.response_code[code]))
			
예제 #32
0
파일: crawler.py 프로젝트: nchikkam/hk
class SiteMap():
    """ This class composes of all the functionalities needed to generate site_map"""

    def __init__(self, main_page=None, robotrules=True, threadcount=1):
        """ctor that checks args and decides to enable single or multithreaded
           generation of sitemap
        """
        logging.info("Consider Robot.txt ? ==> "+str(robotrules))
        self.robotrules = robotrules
        self.site_map = {}

        self.unvisited = set([])
        self.start_page = None

        self.robot_txt_rules = None

        if main_page:
            self.unvisited.add(main_page)
            try:
                self.start_page = urlparse(main_page).netloc
            except:
                logging.error("Improper URL, Please provide a Valid Url:"+main_page)
                exit(0)

        if self.robotrules == "True":
            try:
                logging.info("robot.txt respected")
                self.robot_txt_rules = RobotFileParser()
                self.robot_txt_rules.set_url(main_page + "/robots.txt")
                self.robot_txt_rules.read()
            except:
                logging.error("Unable to read the robot.txt file")
                self.robotrules = False # error reading robot.txt, ignore it forever

        self.threadcount = int(threadcount)

    def execute(self):
        if self.threadcount <= 1: # if single threaded model is chosen, avoid threading
            self.generate()
        else:
            self.start()          # fasten by multi threads

    def start(self):
        """This creates a pool of chosen limit so as to have the control and
           spawns the main function and waits until process and subsequently
           spawned process finish.
        """
        self.pool = pool.Pool(self.threadcount)
        self.pool.spawn(self.generate_parallels)
        self.pool.join()

        self.generate_reports()


    def generate(self):
        """Non multithreaded model method that crawls until all pages are
           crawled and assets are extracted. Once its done, it creates the
           sitemap and assets json file for the given domain.
        """
        while self.unvisited:
            self.crawl()

        self.generate_reports()

    def generate_reports(self):
        """composes the xml tags with the keys in site_map member which are
           nothing but the sitemap urls
        """
        header = """<?xml version="1.0" encoding="UTF-8"?>
                            <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
                            xmlns:xhtml="http://www.w3.org/1999/xhtml"
                            xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
                            xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
                            http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
                        """
        footer = """\n</urlset>\n"""
        entry = "\t<url><loc>%s</loc></url>\n"

        xml = header
        for url in self.site_map.keys():
            xml += entry % (url)

        xml += footer
        name = self.start_page.replace(".", "_")
        self.create_file("%s.xml" % (name), xml)
        self.create_file("%s_assets.json" % (name), json.dumps(self.site_map, indent=2, sort_keys=True))


    def generate_parallels(self):
        """
            This method is similar to recursive in a way that crawls pages and clears
            the queue, which is self.unvisited. It stops when there are no urls to crawl
            and all threads in pool are empty i.e they are not active anymore due to
            finishing of crawling. Since its spawning a new thread and not calling
            directly, it is a nice way to go about it for now :)
            [Note:] There is a limit for recursion in Python and it can be increased by
            sys.setrecursionlimit(1500)

            An assumption has been made for this implementation that a website that has
            more than 500 nested links needs a bit higher design in terms to store the
            assets which might go above a hundred MB. In such cases, this can just be
            converted into a loop. More over, there is no extra stack variables.
        """
        self.crawl()
        while len(self.unvisited) > 0 and not self.pool.full():
            self.pool.spawn(self.generate_parallels)

    def create_file(self, file, content):
        """writes the given content to the file"""
        f = open(file, 'w')
        f.write(content)
        f.close()

    def compose_url_from_href(self, page, href):
        """composes a proper url from domainlink and intralinks with in the page"""
        url = urlparse(page)

        if href.startswith('/'):
            return "http://%s%s"%(url.netloc, href)
        elif href.startswith('#'):
            return "http://%s%s%s"%(url.netloc, url.path, href)
        elif href.startswith('./'):
            return "http://%s%s"%(url.netloc, href[1:])
        elif not href.startswith('http'):
            return "http://" + url.netloc + '/' + href
        elif href.endswith('/'):
            return href[:-1]

        return href

    def get_out_going_links(self, page, html_body):
        """extracts all the outgoing links and adds links that belong to
           main page domain for further crawling if they are not crawled yet
           This avoids:
            - links that are .zip files
            - links mentioned in href that are javascript methods
            - mailto: links

        """
        soup = BeautifulSoup(html_body, "html.parser")
        valid_links_for_this_page = []
        for a in soup.find_all('a', href=True):

            href = a['href'].lower()
            href = self.compose_url_from_href(page, href)

            # clean the href so that it will have legitimate urls instead of #cluttered ones and q=param prints
            href = urldefrag(href)[0]  # skip intra links [this took time to find out !] ##1
            # remove query params as only the path matters
            if href.find('?') != -1:
                href = href[:href.find('?')]  ##2

            new_page = urlparse(href)

            # add to the queue only it it doesn't cause a cycle
            # assumption: if a link ends with domain.com, assuming it can be crawled to make sitemap complete
            if  not str(new_page.netloc).endswith(self.start_page):          # doesn't belong to domain
                valid_links_for_this_page.append(href)
                continue

            if  self.robot_allows(href) and \
                not href in self.site_map            and \
                not href in self.unvisited                  and \
                not 'javascript:' in href           and \
                not 'mailto:' in href:
                if not ( href.endswith(".zip") or
                             href.endswith(".gz") or
                             href.endswith(".gzip") or
                             href.endswith(".tar") or
                             href.endswith(".bz2") or
                             href.endswith(".jpg") or
                             href.endswith(".png") or
                             href.endswith(".exe")
                         ):
                    self.unvisited.add(href)
                valid_links_for_this_page.append(href)

        return valid_links_for_this_page

    def get_assets(self, page, headers, html_body):
        """A nice feature of response header is that it reports the last-modified
           time of the link on the server. If we are doing regular crawling, we can
           avoid if the link is not updates since the last time. This method is
           useful for indexing the data so as to minimize the crawling effort to
           save execution time.
           It updates the site_map dictionary with the links, css, images and scripts
        """
        if 'last-modified' in headers:
            date = headers['Last-Modified']
        else:
            date = headers['Date']

        soup = BeautifulSoup(html_body, "html.parser")
        img = soup.findAll("img")
        css = soup.findAll("link", {"rel": "stylesheet"})
        js = soup.findAll('script')

        self.site_map[page] = {
            'date': date,
            'links': self.get_out_going_links(page, html_body),
            'css': [c['href'] for c in css],
            'img': [i['src'] for i in img],
            'js': [x.get('src', 'inline jscode') for x in js]
        }


    def crawl(self):
        """This actually opens the url and calls the assets method """
        if len(self.unvisited) <= 0:
            return
        page = self.unvisited.pop()
        if page in self.site_map:
            return
        logging.info("Starting to Crawl Page: " + page)

        try:
            response = self.access_page(page)
            if (response.status_code != 200):
                return None

            html_body = response.text

            self.get_assets(page, response.headers, html_body)
        except:
            logging.error("Issue while opening url: %s" + page)
            return None
        logging.debug("Crawled Pages: {}".format(len(self.site_map)))

    def access_page(self, url):
        """accesses the url from the server. This method was created
            to enable mock tests.
        """
        return requests.get(url)

    def get_site_map(self):
        """exposes site_map"""
        return self.site_map

    def set_start_page(self, url):
        """sets the start page for the crawler"""
        self.start_page = url

    def robot_allows(self, link):
        """method to check if link can be accessed as per robot rules"""
        if not self.robotrules: return True
        try:
            if self.robot_txt_rules.can_fetch("*", link):
                    return True
            return False
        except:
            return True
예제 #33
0
# Copyright(C) 2018 刘珅珅
# Environment: python 3.6.4
# Date: 2018.9.8
# robots协议测试
from urllib.robotparser import RobotFileParser
url = 'http://www.jianshu.com/robots.txt'
rp = RobotFileParser(url)
rp.read()
# 在scrapy中,如果希望爬取robots.txt禁止的页面,需要禁用robots
print(rp.can_fetch('*', 'http://www.jianshu.com/p/b67554025d7d'))  # False,不允许爬取
예제 #34
0
class Crawler():

	# Variables
	parserobots = False
	output 	= None
	report 	= False
	config 	= None
	domain	= ""
	
	exclude = []
	skipext = []
	drop    = []

	debug	= False

	tocrawl = set([])
	crawled = set([])
	excluded = set([])

	marked = {}

	not_parseable_ressources = (".epub", ".mobi", ".docx", ".doc", ".opf", ".7z", ".ibooks", ".cbr", ".avi", ".mkv", ".mp4", ".jpg", ".jpeg", ".png", ".gif" ,".pdf", ".iso", ".rar", ".tar", ".tgz", ".zip", ".dmg", ".exe")

	# TODO also search for window.location={.*?}
	linkregex = re.compile(b'<a [^>]*href=[\'|"](.*?)[\'"][^>]*?>')
	imageregex = re.compile (b'<img [^>]*src=[\'|"](.*?)[\'"].*?>')

	rp = None
	response_code={}
	nb_url=1 # Number of url.
	nb_rp=0 # Number of url blocked by the robots.txt
	nb_exclude=0 # Number of url excluded by extension or word

	output_file = None

	target_domain = ""
	scheme		  = ""

	def __init__(self, parserobots=False, output=None, report=False ,domain="",
				 exclude=[], skipext=[], drop=[], debug=False, verbose=False, images=False):
		self.parserobots = parserobots
		self.output 	= output
		self.report 	= report
		self.domain 	= domain
		self.exclude 	= exclude
		self.skipext 	= skipext
		self.drop		= drop
		self.debug		= debug
		self.verbose    = verbose
		self.images     = images

		if self.debug:
			log_level = logging.DEBUG
		elif self.verbose:
			log_level = logging.INFO
		else:
			log_level = logging.ERROR

		logging.basicConfig(level=log_level)

		self.tocrawl = set([self.clean_link(domain)])

		try:
			url_parsed = urlparse(domain)
			self.target_domain = url_parsed.netloc
			self.scheme = url_parsed.scheme
		except:
			logging.error("Invalide domain")
			raise ("Invalid domain")

		if self.output:
			try:
				self.output_file = open(self.output, 'w')
			except:
				logging.error ("Output file not available.")
				exit(255)

	def run(self):
		print(config.xml_header, file=self.output_file)

		if self.parserobots:
			self.check_robots()

		logging.info("Start the crawling process")

		while len(self.tocrawl) != 0:
			self.__crawling(first,domainname,all_link)

		logging.info("Crawling has reached end of all found links")

		print (config.xml_footer, file=self.output_file)


	def __crawling(self,frst,dmname,all_links):
		crawling = self.tocrawl.pop()
		if frst==0:
			dmname=obtaindomain(crawling)
			frst=1
		url = urlparse(crawling)
		self.crawled.add(crawling)
		logging.info("Crawling #{}: {}".format(len(self.crawled), url.geturl()))
		request = Request(crawling, headers={"User-Agent":config.crawler_user_agent})

		# Ignore ressources listed in the not_parseable_ressources
		# Its avoid dowloading file like pdf… etc
		if not url.path.endswith(self.not_parseable_ressources):
			try:
				response = urlopen(request)
			except Exception as e:
				if hasattr(e,'code'):
					if e.code in self.response_code:
						self.response_code[e.code]+=1
					else:
						self.response_code[e.code]=1

					# Gestion des urls marked pour le reporting
					if self.report:
						if e.code in self.marked:
							self.marked[e.code].append(crawling)
						else:
							self.marked[e.code] = [crawling]

				logging.debug ("{1} ==> {0}".format(e, crawling))
				return self.__continue_crawling()
		else:
			logging.debug("Ignore {0} content might be not parseable.".format(crawling))
			response = None

		# Read the response
		if response is not None:
			try:
				msg = response.read()
				if response.getcode() in self.response_code:
					self.response_code[response.getcode()]+=1
				else:
					self.response_code[response.getcode()]=1

				response.close()

				# Get the last modify date
				if 'last-modified' in response.headers:
					date = response.headers['Last-Modified']
				else:
					date = response.headers['Date']

				date = datetime.strptime(date, '%a, %d %b %Y %H:%M:%S %Z')

			except Exception as e:
				logging.debug ("{1} ===> {0}".format(e, crawling))
				return None
		else:
			# Response is None, content not downloaded, just continu and add
			# the link to the sitemap
			msg = "".encode( )
			date = None

		# Image sitemap enabled ?
		image_list = "";
		if self.images:
			# Search for images in the current page.
			images = self.imageregex.findall(msg)
			for image_link in list(set(images)):
				image_link = image_link.decode("utf-8", errors="ignore")

				# Ignore link starting with data:
				if image_link.startswith("data:"):
					continue

				# If path start with // get the current url scheme
				if image_link.startswith("//"):
					image_link = url.scheme + ":" + image_link
				# Append domain if not present
				elif not image_link.startswith(("http", "https")):
					if not image_link.startswith("/"):
						image_link = "/{0}".format(image_link)
					image_link = "{0}{1}".format(self.domain.strip("/"), image_link.replace("./", "/"))

				# Ignore image if path is in the exclude_url list
				if not self.exclude_url(image_link):
					continue

				# Ignore other domain images
				image_link_parsed = urlparse(image_link)
				if image_link_parsed.netloc != self.target_domain:
					continue


				# Test if images as been already seen and not present in the
				# robot file
				if self.can_fetch(image_link):
					logging.debug("Found image : {0}".format(image_link))
					image_list = "{0}<image:image><image:loc>{1}</image:loc></image:image>".format(image_list, self.htmlspecialchars(image_link))

		# Last mod fetched ?
		lastmod = ""
		if date:
			lastmod = "<lastmod>"+date.strftime('%Y-%m-%dT%H:%M:%S+00:00')+"</lastmod>"

		print ("<url><loc>"+self.htmlspecialchars(url.geturl())+"</loc>" + lastmod + image_list + "</url>", file=self.output_file)
		tempdom=obtaindomain(self.htmlspecialchars(url.geturl()))
		templink=self.htmlspecialchars(url.geturl())
		if tempdom==dmname:
			if templink not in all_links:
				outputprint(templink)
				all_links.append(templink)
		if self.output_file:
			self.output_file.flush()

		# Found links
		links = self.linkregex.findall(msg)
		for link in links:
			link = link.decode("utf-8", errors="ignore")
			link = self.clean_link(link)
			logging.debug("Found : {0}".format(link))

			if link.startswith('/'):
				link = url.scheme + '://' + url[1] + link
			elif link.startswith('#'):
				link = url.scheme + '://' + url[1] + url[2] + link
			elif link.startswith(("mailto", "tel")):
				continue
			elif not link.startswith(('http', "https")):
				link = url.scheme + '://' + url[1] + '/' + link

			# Remove the anchor part if needed
			if "#" in link:
				link = link[:link.index('#')]

			# Drop attributes if needed
			for toDrop in self.drop:
				link=re.sub(toDrop,'',link)

			# Parse the url to get domain and file extension
			parsed_link = urlparse(link)
			domain_link = parsed_link.netloc
			target_extension = os.path.splitext(parsed_link.path)[1][1:]

			if link in self.crawled:
				continue
			if link in self.tocrawl:
				continue
			if link in self.excluded:
				continue
			if domain_link != self.target_domain:
				continue
			if parsed_link.path in ["", "/"]:
				continue
			if "javascript" in link:
				continue
			if self.is_image(parsed_link.path):
				continue
			if parsed_link.path.startswith("data:"):
				continue

			# Count one more URL
			self.nb_url+=1

			# Check if the navigation is allowed by the robots.txt
			if not self.can_fetch(link):
				self.exclude_link(link)
				self.nb_rp+=1
				continue

			# Check if the current file extension is allowed or not.
			if (target_extension in self.skipext):
				self.exclude_link(link)
				self.nb_exclude+=1
				continue

			# Check if the current url doesn't contain an excluded word
			if (not self.exclude_url(link)):
				self.exclude_link(link)
				self.nb_exclude+=1
				continue

			self.tocrawl.add(link)

		return None

	def clean_link(self, link):
		l = urlparse(link)
		l_res = list(l)
		l_res[2] = l_res[2].replace("./", "/")
		l_res[2] = l_res[2].replace("//", "/")
		return urlunparse(l_res)

	def is_image(self, path):
		 mt,me = mimetypes.guess_type(path)
		 return mt is not None and mt.startswith("image/")

	def __continue_crawling(self):
		if self.tocrawl:
			self.__crawling(first,domainname,all_link)

	def exclude_link(self,link):
		if link not in self.excluded:
			self.excluded.add(link)

	def check_robots(self):
		robots_url = urljoin(self.domain, "robots.txt")
		self.rp = RobotFileParser()
		self.rp.set_url(robots_url)
		self.rp.read()

	def can_fetch(self, link):
		try:
			if self.parserobots:
				if self.rp.can_fetch("*", link):
					return True
				else:
					logging.debug ("Crawling of {0} disabled by robots.txt".format(link))
					return False

			if not self.parserobots:
				return True

			return True
		except:
			# On error continue!
			logging.debug ("Error during parsing robots.txt")
			return True

	def exclude_url(self, link):
		for ex in self.exclude:
			if ex in link:
				return False
		return True

	def htmlspecialchars(self, text):
		return text.replace("&", "&amp;").replace('"', "&quot;").replace("<", "&lt;").replace(">", "&gt;")

	def make_report(self):
		print ("Number of found URL : {0}".format(self.nb_url))
		print ("Number of link crawled : {0}".format(len(self.crawled)))
		if self.parserobots:
			print ("Number of link block by robots.txt : {0}".format(self.nb_rp))
		if self.skipext or self.exclude:
			print ("Number of link exclude : {0}".format(self.nb_exclude))

		for code in self.response_code:
			print ("Nb Code HTTP {0} : {1}".format(code, self.response_code[code]))

		for code in self.marked:
			print ("Link with status {0}:".format(code))
			for uri in self.marked[code]:
				print ("\t- {0}".format(uri))
예제 #35
0
def robot_can_fetch(robots_txt_content, url):
    parser = RobotFileParser()
    parser.parse(robots_txt_content.splitlines())
    return parser.can_fetch(USER_AGENT, urlparse(url).path)
예제 #36
0
파일: robot.py 프로젝트: mattgu74/ic05
class _RobotAllowAll:
	def can_fetch(self, a,b):
		return True
class Robot:
	def __init__(self):
		self.__robots = {}

	def can_fetch(self, url):
		parse = urllib.parse.urlparse(url)
		hostname = parse.hostname
		try:
			robot = self.robot[hostname]
		except Exception:
			roboturl = urllib.parse.urlunparse((parse.scheme,parse.netloc,"robots.txt","","",""))
			robot = RobotFileParser(roboturl)
			try:
				robot.read()
			except Exception:
				robot = _RobotAllowAll()
			self.__robots[hostname] = robot
		return robot.can_fetch("*", url)



if __name__ == "__main__":

	r = RobotFileParser("http://www.letudiant.fr", {})
	r.read()
	print(r.can_fetch("*", "http://www.letudiant.fr/"))
예제 #37
0
파일: crawler.py 프로젝트: danellis/cosmo
class Crawler(object):
    def __init__(self, database, fetcher, analyzer, verbose=False):
        self.database = database
        self.fetcher = fetcher
        self.analyzer = analyzer
        self.verbose = verbose
        self.queue = set()
        self.robot_parser = RobotFileParser()

    def crawl(self, url):
        """Begin recursively crawling pages starting from the given URL.

        :param url: Starting URL
        :returns: None
        """
        if self.database.is_page_stored(url):
            print("Page is already crawled. Use --flush to flush the database file.", file=sys.stderr)
        else:
            # Because crawling is restricted to pages on the same domain, the
            # robots.txt file can be loaded once at the beginning of the crawl
            self.load_robots_file(url)

            # Add the starting URL to the queue of pages to be crawled, and
            # then keep crawling while there are still URLs in the queue
            self.queue.add(url)
            while len(self.queue) > 0:
                self.crawl_one(self.queue.pop())

    def crawl_one(self, url):
        """Fetch a single page and analyze it for links. The found triples are
        stored in the database, and found links that should be crawled are
        added to the queue.

        :param url: The page to fetch and analyze
        :returns: None
        """
        if self.verbose:
            print(url, file=sys.stderr)

        status, html = self.fetcher.fetch(url)

        if status is None:
            # The status code will be None if retrieval failed
            print("Failed to get {}".format(url), file=sys.stderr)
        else:
            # Search for links and images in the page, and get them as triples
            # of (page URL, link type, link URL)
            triples = self.analyzer.analyze(url, html)

            self.database.store_triples(triples)

            # Any linked URLs that are eligible for crawling are added to the
            # pending crawl queue
            for page_url, link_type, link_url in triples:
                if self.should_crawl(page_url, link_type, link_url):
                    self.queue.add(link_url)

    def should_crawl(self, page_url, link_type, link_url):
        """Determine whether a URL should be crawled.

        :param page_url: The page the link came from.
        :param link_type: The type of link URL.
        :param link_url: The link URL to test.
        :returns: True if the link URL should be crawled, otherwise False.
        """
        # Only HTML pages should be crawled, not other media
        if link_type not in ('page', 'iframe'):
            return False

        # The link should be on the same domain as the page it's linked from
        if not self.have_same_domain(page_url, link_url):
            return False

        # Fetching the link URL should be permitted by robots.txt
        if not self.robot_parser.can_fetch('Cosmo', link_url):
            return False

        # The linked page should not have been crawled already
        if self.database.is_page_stored(link_url):
            return False

        return True

    def have_same_domain(self, url1, url2):
        """Test whether two URLs have the same hostname and port.

        :returns: True if they do, otherwise False
        """
        return urlparse(url1).netloc == urlparse(url2).netloc

    def load_robots_file(self, url):
        """Load the /robots.txt file for the given URL by reusing the scheme
        and authority parts.

        :param url: The URL from which to take the scheme and authority parts.
        :returns: None
        """
        # Create a new URL with the same scheme, host and port, but with a
        # path of /robots.txt
        parsed = urlparse(url)
        robots_url = urlunparse((parsed.scheme, parsed.netloc, '/robots.txt', '', '', ''))

        # Load the robots.txt file using the requests library, because we need
        # to specify the User-Agent header. I noticed on a CloudFlare-fronted
        # site that it returns a 403 for /robots.txt if the the user agent is
        # Python-urllib, but 200 if it's Cosmo.
        status, robots_file = self.fetcher.fetch(robots_url)
        if status in (401, 403):
            self.robot_parser.disallow_all = True
        elif status >= 400:
            self.robot_parser.allow_all = True
        else:
            self.robot_parser.parse(robots_file.splitlines())