Пример #1
0
def zap(url):
    if args.archive:
        from plugins.wayback import timeMachine
        print ('%s Fetching URLs from archive.org' % run)
        if False:
            archived_urls = timeMachine(domain, 'domain')
        else:
            archived_urls = timeMachine(host, 'host')
        print ('%s Retrieved %i URLs from archive.org' % (good, len(archived_urls) - 1))
        for url in archived_urls:
            verb('Internal page', url)
            internal.add(url)
    response = get(url + '/robots.txt', verify=False).text # makes request to robots.txt
    if '<body' not in response: # making sure robots.txt isn't some fancy 404 page
        matches = findall(r'Allow: (.*)|Disallow: (.*)', response) # If you know it, you know it
        if matches:
            for match in matches: # iterating over the matches, match is a tuple here
                match = ''.join(match) # one item in match will always be empty so will combine both items
                if '*' not in match: # if the url doesn't use a wildcard
                    url = main_url + match
                    internal.add(url) # add the url to internal list for crawling
                    robots.add(url) # add the url to robots list
            print('%s URLs retrieved from robots.txt: %s' % (good, len(robots)))
    response = get(url + '/sitemap.xml', verify=False).text # makes request to sitemap.xml
    if '<body' not in response: # making sure robots.txt isn't some fancy 404 page
        matches = xmlParser(response)
        if matches: # if there are any matches
            print('%s URLs retrieved from sitemap.xml: %s' % (good, len(matches)))
            for match in matches:
                verb('Internal page', url)
                internal.add(match) #cleaning up the url & adding it to the internal list for crawling
Пример #2
0
def zap(url):
    """Extract links from robots.txt and sitemap.xml."""
    if args.archive:
        from plugins.wayback import timeMachine
        print('%s Fetching URLs from archive.org' % run)
        archived_urls = timeMachine(host, 'host')
        print('%s Retrieved %i URLs from archive.org' %
              (good, len(archived_urls) - 1))
        for url in archived_urls:
            verb('Internal page', url)
            internal.add(url)
    # Makes request to robots.txt
    response = get(url + '/robots.txt', verify=False).text
    # Making sure robots.txt isn't some fancy 404 page
    if '<body' not in response:
        # If you know it, you know it
        matches = findall(r'Allow: (.*)|Disallow: (.*)', response)
        if matches:
            # Iterating over the matches, match is a tuple here
            for match in matches:
                # One item in match will always be empty so will combine both
                # items
                match = ''.join(match)
                # If the URL doesn't use a wildcard
                if '*' not in match:
                    url = main_url + match
                    # Add the URL to internal list for crawling
                    internal.add(url)
                    # Add the URL to robots list
                    robots.add(url)
            print('%s URLs retrieved from robots.txt: %s' %
                  (good, len(robots)))
    # Makes request to sitemap.xml
    response = get(url + '/sitemap.xml', verify=False).text
    # Making sure robots.txt isn't some fancy 404 page
    if '<body' not in response:
        matches = xmlParser(response)
        if matches:  # if there are any matches
            print('%s URLs retrieved from sitemap.xml: %s' %
                  (good, len(matches)))
            for match in matches:
                verb('Internal page', url)
                # Cleaning up the URL and adding it to the internal list for
                # crawling
                internal.add(match)