def test_url_join_case01(self): u = URL('http://w3af.com/foo.bar') self.assertEqual( u.url_join('abc.html').url_string, u'http://w3af.com/abc.html') self.assertEqual( u.url_join('/abc.html').url_string, u'http://w3af.com/abc.html')
def test_url_join_case03(self): u = URL('http://w3af.com/def/jkl/') self.assertEqual(u.url_join('/def/abc.html').url_string, u'http://w3af.com/def/abc.html') self.assertEqual(u.url_join('def/abc.html').url_string, u'http://w3af.com/def/jkl/def/abc.html')
def do_follow_redirect(self, req, fp, code, msg, headers): """ Implementation note: To avoid the server sending us into an infinite loop, the request object needs to track what URLs we have already seen. Do this by adding a handler-specific attribute to the Request object. """ # Check if we can redirect according to the RFC if not self.redirect_allowed_by_rfc(req, code): raise self.create_error_from_parts(req, code, msg, headers, fp) # Some servers (incorrectly) return multiple Location headers # (so probably same goes for URI). Use first header. if LOCATION in headers: new_url_raw = headers.getheaders(LOCATION)[0] elif URI in headers: new_url_raw = headers.getheaders(URI)[0] else: raise self.create_error_from_parts(req, code, msg, headers, fp) # Calculate the target URL try: current_url = URL(req.get_full_url()) new_url_str = current_url.url_join(new_url_raw).url_string new_url_obj = current_url.url_join(new_url_raw) except ValueError: raise self.create_error_from_parts(req, code, msg, headers, fp) # For security reasons we do not allow redirects to protocols # other than HTTP or HTTPS new_url_lower = new_url_str.lower() if not (new_url_lower.startswith('http://') or new_url_lower.startswith('https://')): raise self.create_error_from_parts(req, code, msg, headers, fp) # XXX Probably want to forget about the state of the current # request, although that might interact poorly with other # handlers that also use handler-specific request attributes new_request = self.create_redirect_request(req, fp, code, msg, headers, new_url_str, new_url_obj) # loop detection # .redirect_dict has a key url if url was previously visited. if hasattr(req, 'redirect_dict'): visited = new_request.redirect_dict = req.redirect_dict if (visited.get(new_url_str, 0) >= self.max_repeats or len(visited) >= self.max_redirections): raise self.create_error_from_parts(req, code, msg, headers, fp) else: visited = new_request.redirect_dict = req.redirect_dict = {} visited[new_url_str] = visited.get(new_url_str, 0) + 1 # Don't close the fp until we are sure that we won't use it # with HTTPError. fp.read() fp.close() return self.parent.open(new_request, timeout=req.timeout)
def test_url_join_case01(self): u = URL('http://w3af.com/foo.bar') self.assertEqual(u.url_join('abc.html').url_string, u'http://w3af.com/abc.html') self.assertEqual(u.url_join('/abc.html').url_string, u'http://w3af.com/abc.html')
def do_follow_redirect(self, req, fp, code, msg, headers): # Check if we can redirect according to the RFC if not self.redirect_allowed_by_rfc(req, code): raise self.create_error_from_parts(req, code, msg, headers, fp) # Some servers (incorrectly) return multiple Location headers # (so probably same goes for URI). Use first header. if LOCATION in headers: new_url_raw = headers.getheaders(LOCATION)[0] elif URI in headers: new_url_raw = headers.getheaders(URI)[0] else: raise self.create_error_from_parts(req, code, msg, headers, fp) # Calculate the target URL try: current_url = URL(req.get_full_url()) new_url_str = current_url.url_join(new_url_raw).url_string new_url_obj = current_url.url_join(new_url_raw) except ValueError: raise self.create_error_from_parts(req, code, msg, headers, fp) # For security reasons we do not allow redirects to protocols # other than HTTP or HTTPS new_url_lower = new_url_str.lower() if not (new_url_lower.startswith('http://') or new_url_lower.startswith('https://')): raise self.create_error_from_parts(req, code, msg, headers, fp) # XXX Probably want to forget about the state of the current # request, although that might interact poorly with other # handlers that also use handler-specific request attributes new_request = self.create_redirect_request(req, fp, code, msg, headers, new_url_str, new_url_obj) # loop detection # .redirect_dict has a key url if url was previously visited. if hasattr(req, 'redirect_dict'): visited = new_request.redirect_dict = req.redirect_dict if (visited.get(new_url_str, 0) >= self.max_repeats or len(visited) >= self.max_redirections): raise self.create_error_from_parts(req, code, msg, headers, fp) else: visited = new_request.redirect_dict = req.redirect_dict = {} visited[new_url_str] = visited.get(new_url_str, 0) + 1 # Don't close the fp until we are sure that we won't use it # with HTTPError. fp.read() fp.close() return self.parent.open(new_request, timeout=req.timeout)
def test_redirect_uri_relative(self): ws = web_spider() body = '' url = URL('http://www.w3af.org') redir_url = '/redir' headers = Headers([('content-type', 'text/html'), ('uri', redir_url)]) resp = HTTPResponse(200, body, headers, url, url) gen = ws._headers_url_generator(resp, None) extracted_data = [i for i in gen] expected_data = [(url.url_join(redir_url), None, resp, False)] self.assertEqual(extracted_data, expected_data)
class xssed_dot_com(InfrastructurePlugin): """ Search in xssed.com to find xssed pages. :author: Nicolas Crocfer ([email protected]) :author: Raul Siles: set "." in front of the root domain to limit search """ def __init__(self): InfrastructurePlugin.__init__(self) # # Depend on xssed.com # self.XSSED_URL = URL('http://www.xssed.com') self.UNFIXED = 'UNFIXED' self.XSSED_DOMAIN_RE = re.compile("<a href='(/mirror/\d*/)'" " target='_blank'>") self.XSSED_URL_RE = re.compile('URL: (.*?)</th>') @runonce(exc_class=RunOnce) def discover(self, fuzzable_request): """ Search in xssed.com and parse the output. :param fuzzable_request: A fuzzable_request instance that contains (among other things) the URL to test. """ target_domain = fuzzable_request.get_url().get_root_domain() target_path = "/search?key=.%s" % target_domain check_url = self.XSSED_URL.url_join(target_path) try: response = self._uri_opener.GET(check_url) except BaseFrameworkException, e: msg = ('An exception was raised while running xssed_dot_com' ' plugin. Exception: "%s".') om.out.debug(msg % e) else:
class xssed_dot_com(InfrastructurePlugin): """ Search in xssed.com to find xssed pages. :author: Nicolas Crocfer ([email protected]) :author: Raul Siles: set "." in front of the root domain to limit search """ def __init__(self): InfrastructurePlugin.__init__(self) # # Depend on xssed.com # self.XSSED_URL = URL("http://www.xssed.com") self.UNFIXED = "UNFIXED" self.XSSED_DOMAIN_RE = re.compile("<a href='(/mirror/\d*/)'" " target='_blank'>") self.XSSED_URL_RE = re.compile("URL: (.*?)</th>") @runonce(exc_class=RunOnce) def discover(self, fuzzable_request): """ Search in xssed.com and parse the output. :param fuzzable_request: A fuzzable_request instance that contains (among other things) the URL to test. """ target_domain = fuzzable_request.get_url().get_root_domain() target_path = "/search?key=.%s" % target_domain check_url = self.XSSED_URL.url_join(target_path) try: response = self._uri_opener.GET(check_url) except BaseFrameworkException, e: msg = "An exception was raised while running xssed_dot_com" ' plugin. Exception: "%s".' om.out.debug(msg % e) else:
def test_url_join_case07(self): u = URL('http://w3af.com/') self.assertEqual( u.url_join('http://w3af.org:8080/abc.html').url_string, u'http://w3af.org:8080/abc.html')
def test_url_join_case05(self): u = URL('http://w3af.com/def/') self.assertEqual( u.url_join(u'тест').url_string, u'http://w3af.com/def/тест')
def test_url_join_case07(self): u = URL('http://w3af.com/') self.assertEqual(u.url_join('http://w3af.org:8080/abc.html').url_string, u'http://w3af.org:8080/abc.html')
def test_url_join_case05(self): u = URL('http://w3af.com/def/') self.assertEqual(u.url_join(u'тест').url_string, u'http://w3af.com/def/тест')
def test_url_join_with_query_string(self): u = URL('http://w3af.com/abc/?id=1') self.assertEqual(u.url_join('/../def/').url_string, u'http://w3af.com/def/')
def do_follow_redirect(self, request, response): """ Implementation note: To avoid the server sending us into an infinite loop, the request object needs to track what URLs we have already seen. Do this by adding a handler-specific attribute to the Request object. """ headers = response.info() # # Some servers incorrectly return multiple `Location` headers # (so probably same goes for URI). Use first header # if LOCATION in headers: new_url_raw = headers.getheaders(LOCATION)[0] elif URI in headers: new_url_raw = headers.getheaders(URI)[0] else: # There is no location or uri headers # Return the original response and continue return response # # Calculate the target URL using urljoin() # try: current_url = URL(request.get_full_url()) new_url_obj = current_url.url_join(new_url_raw) new_url_str = new_url_obj.url_string except ValueError: # The target URI seems to be invalid # Return the original response and continue return response # # For security reasons we do not allow redirects to protocols # other than HTTP or HTTPS # new_url_lower = new_url_str.lower() if not (new_url_lower.startswith('http://') or new_url_lower.startswith('https://')): # The target URI seems to be pointing to file:// or ftp:// # Return the original response and continue return response # XXX Probably want to forget about the state of the current # request, although that might interact poorly with other # handlers that also use handler-specific request attributes new_request = self.create_redirect_request(request, response, new_url_obj) # loop detection # .redirect_dict has a key url if url was previously visited. if hasattr(request, 'redirect_dict'): visited = new_request.redirect_dict = request.redirect_dict if visited.get(new_url_str, 0) >= self.max_repeats: # Already visited the same URL more than max_repeats # Return the original response and continue return response if len(visited) >= self.max_redirections: # Already visited more than max_redirections during this process # Return the original response and continue return response else: visited = new_request.redirect_dict = request.redirect_dict = {} visited[new_url_str] = visited.get(new_url_str, 0) + 1 # # Send the new HTTP request to the opener # return self.parent.open(new_request)