def test_clear(self): url = URL("http://w3af.com/a/b/c.php") request = HTTPRequest(url, data="a=1") hdr = Headers([("Content-Type", "text/html")]) res = HTTPResponse(200, "<html>", hdr, url, url) h1 = HistoryItem() h1.request = request res.set_id(1) h1.response = res h1.save() table_name = h1.get_table_name() db = get_default_temp_db_instance() self.assertTrue(db.table_exists(table_name)) clear_result = h1.clear() self.assertTrue(clear_result) self.assertFalse(os.path.exists(h1._session_dir), "%s exists." % h1._session_dir) # Changed the meaning of clear a little bit... now it simply removes # all rows from the table, not the table itself self.assertTrue(db.table_exists(table_name))
def test_find(self): find_id = random.randint(1, 499) url = URL("http://w3af.org/a/b/foobar.php?foo=123") tag_value = rand_alnum(10) for i in xrange(0, 500): request = HTTPRequest(url, data="a=1") code = 200 if i == find_id: code = 302 hdr = Headers([("Content-Type", "text/html")]) res = HTTPResponse(code, "<html>", hdr, url, url) h1 = HistoryItem() h1.request = request res.set_id(i) h1.response = res if i == find_id: h1.toggle_mark() h1.update_tag(tag_value) h1.save() h2 = HistoryItem() self.assertEqual(len(h2.find([("tag", "%" + tag_value + "%", "like")])), 1) self.assertEqual(len(h2.find([("code", 302, "=")])), 1) self.assertEqual(len(h2.find([("mark", 1, "=")])), 1) self.assertEqual(len(h2.find([("has_qs", 1, "=")])), 500) self.assertEqual(len(h2.find([("has_qs", 1, "=")], result_limit=10)), 10) results = h2.find([("has_qs", 1, "=")], result_limit=1, orderData=[("id", "desc")]) self.assertEqual(results[0].id, 499) search_data = [] search_data.append(("id", find_id + 1, "<")) search_data.append(("id", find_id - 1, ">")) self.assertEqual(len(h2.find(search_data)), 1)
def _new_no_content_resp(self, uri, log_it=False): ''' Return a new NO_CONTENT HTTPResponse object. Optionally call the subscribed log handlers :param uri: URI string or request object :param log_it: Boolean that indicated whether to log request and response. ''' # accept a URI or a Request object if isinstance(uri, URL): req = HTTPRequest(uri) elif isinstance(uri, HTTPRequest): req = uri else: msg = 'The uri parameter of ExtendedUrllib._new_content_resp() has to be'\ ' of HTTPRequest of URL type.' raise Exception(msg) # Work, no_content_response = HTTPResponse(NO_CONTENT, '', Headers(), uri, uri, msg='No Content') if log_it: # This also assigns the id to both objects. LogHandler.log_req_resp(req, no_content_response) if no_content_response.id is None: no_content_response.id = seq_gen.inc() return no_content_response
def new_no_content_resp(uri): ''' Return a new NO_CONTENT HTTPResponse object. :param uri: URI string or request object ''' no_content_response = HTTPResponse(NO_CONTENT, '', Headers(), uri, uri, msg='No Content') if no_content_response.id is None: no_content_response.id = seq_gen.inc() return no_content_response
def new_no_content_resp(uri, add_id=False): ''' Return a new NO_CONTENT HTTPResponse object. :param uri: URI string or request object ''' no_content_response = HTTPResponse(NO_CONTENT, '', Headers(), uri, uri, msg='No Content') if add_id: no_content_response.id = consecutive_number_generator.inc() return no_content_response
def test_save_load(self): i = random.randint(1, 499) url = URL("http://w3af.com/a/b/c.php") request = HTTPRequest(url, data="a=1") hdr = Headers([("Content-Type", "text/html")]) res = HTTPResponse(200, "<html>", hdr, url, url) h1 = HistoryItem() h1.request = request res.set_id(i) h1.response = res h1.save() h2 = HistoryItem() h2.load(i) self.assertEqual(h1.request, h2.request) self.assertEqual(h1.response.body, h2.response.body)
def test_clear_clear(self): url = URL("http://w3af.com/a/b/c.php") request = HTTPRequest(url, data="a=1") hdr = Headers([("Content-Type", "text/html")]) res = HTTPResponse(200, "<html>", hdr, url, url) h1 = HistoryItem() h1.request = request res.set_id(1) h1.response = res h1.save() h1.clear() h1.clear()
def _load_from_file(self, id): fname = self._get_fname_for_id(id) # # Due to some concurrency issues, we need to perform this check # before we try to read the .trace file. # if not os.path.exists(fname): for _ in xrange(1 / 0.05): time.sleep(0.05) if os.path.exists(fname): break else: msg = 'Timeout expecting trace file to be written "%s"' % fname raise IOError(msg) # # Ok... the file exists, but it might still be being written # req_res = open(fname, 'rb') request_dict, response_dict = msgpack.load(req_res) req_res.close() request = HTTPRequest.from_dict(request_dict) response = HTTPResponse.from_dict(response_dict) return (request, response)
def test_response_body(self): body = 'hello user!' headers = Headers([('content-type', 'text/html')]) response = HTTPResponse(200, body, headers, self.url, self.url, _id=1) option_list = self.plugin.get_options() option_list['expressions'].set_value('sb/user/notluser/') self.plugin.set_options(option_list) mod_request = self.plugin.mangle_request(self.request) mod_response = self.plugin.mangle_response(response) self.assertEqual(mod_request.get_headers(), self.request.get_headers()) self.assertEqual(mod_response.get_headers(), response.get_headers()) self.assertEqual(mod_request.get_uri(), self.request.get_uri()) self.assertEqual(mod_response.get_uri(), response.get_uri()) self.assertEqual(mod_response.get_body(), 'hello notluser!')
def test_tag(self): tag_id = random.randint(501, 999) tag_value = rand_alnum(10) url = URL("http://w3af.org/a/b/c.php") for i in xrange(501, 1000): request = HTTPRequest(url, data="a=1") hdr = Headers([("Content-Type", "text/html")]) res = HTTPResponse(200, "<html>", hdr, url, url) h1 = HistoryItem() h1.request = request res.set_id(i) h1.response = res if i == tag_id: h1.update_tag(tag_value) h1.save() h2 = HistoryItem() h2.load(tag_id) self.assertEqual(h2.tag, tag_value)
def test_delete(self): i = random.randint(1, 499) url = URL("http://w3af.com/a/b/c.php") request = HTTPRequest(url, data="a=1") hdr = Headers([("Content-Type", "text/html")]) res = HTTPResponse(200, "<html>", hdr, url, url) res.set_id(i) h1 = HistoryItem() h1.request = request h1.response = res h1.save() fname = h1._get_fname_for_id(i) self.assertTrue(os.path.exists(fname)) h1.delete(i) self.assertRaises(DBException, h1.read, i) self.assertFalse(os.path.exists(fname))
def test_site_protected_against_xss_by_csp_case04(self): ''' Test case in witch site provide CSP features and enable use of the javascript "eval()" function into is CSP Script policies BUT we do not accept theses configurations. ''' header_value = "script-src 'self' unsafe-eval; script-nonce 'AADD'" hrds = {CSP_HEADER_W3C: header_value}.items() csp_headers = Headers(hrds) http_response = HTTPResponse(200, '', csp_headers, self.url, self.url) site_protected = site_protected_against_xss_by_csp(http_response) self.assertFalse(site_protected)
def test_parser_re_link(self): '''Get a link by applying regular expressions''' response = HTTPResponse(200, 'header /index.aspx footer', Headers(), self.url, self.url) w = WMLParser(response) re, parsed = w.get_references() # TODO: Shouldn't this be the other way around?! self.assertEqual([], re) self.assertEqual(len(parsed), 1) self.assertEqual(u'http://www.w3af.com/index.aspx', parsed[0].url_string)
def test_redirect_uri_relative(self): body = '' redir_url = '/foo.bar' headers = Headers([('content-type', 'text/html'), ('uri', redir_url)]) http_response = HTTPResponse(200, body, headers, self.url, self.url) redir_fr = create_fuzzable_requests(http_response, add_self=False) self.assertEqual(len(redir_fr), 1) redir_fr = redir_fr[0] self.assertEqual(redir_fr.get_url().url_string, self.url.url_string[:-1] + redir_url)
def test_redirect_location(self): body = '' redir_url = 'http://www.w3af.org/' headers = Headers([('content-type', 'text/html'), ('location', redir_url)]) http_response = HTTPResponse(200, body, headers, self.url, self.url) redir_fr = create_fuzzable_requests(http_response, add_self=False) self.assertEqual(len(redir_fr), 1) redir_fr = redir_fr[0] self.assertEqual(redir_fr.get_url().url_string, redir_url)
def test_provides_csp_features_yes_case03(self): ''' Test case in which site provides CSP features using report-only + mandatory policies. ''' hrds = {} hrds[CSP_HEADER_W3C] = CSP_DIRECTIVE_OBJECT + " 'self'" hrds[CSP_HEADER_W3C_REPORT_ONLY] = CSP_DIRECTIVE_CONNECTION + " *" csp_headers = Headers(hrds.items()) http_response = HTTPResponse(200, '', csp_headers, self.url, self.url) self.assertTrue(provides_csp_features(http_response))
def test_path_disclosure_calculated_webroot(self): kb.kb.add_url(self.url) res = HTTPResponse(200, 'header /var/www/foo/bar.py footer', self.header, self.url, self.url, _id=1) self.plugin.grep(self.request, res) webroot = kb.kb.raw_read('path_disclosure', 'webroot') self.assertEqual(webroot, '/var/www')
def test_cookie(self): body = '' redir_url = '/foo.bar' headers = Headers([('content-type', 'text/html'), ('uri', redir_url), ('cookie', 'abc=def')]) http_response = HTTPResponse(200, body, headers, self.url, self.url) redir_fr_cookie = create_fuzzable_requests(http_response, add_self=False) self.assertEqual(len(redir_fr_cookie), 1) redir_fr_cookie = redir_fr_cookie[0] self.assertEqual(str(redir_fr_cookie.get_cookie()), 'abc=def;')
def test_http_auth_detect_uri(self): body = 'ABC ' * 100 body += 'http://*****:*****@www.w3af.com/foo.bar' body += '</br> ' * 50 response = HTTPResponse(200, body, self.headers, self.url, self.url, _id=1) self.plugin.grep(self.request, response) self.assertEqual(len(kb.kb.get('http_auth_detect', 'auth')), 0) self.assertEqual(len(kb.kb.get('http_auth_detect', 'userPassUri')), 1)
def test_analyze_cookies_collect(self): body = '' url = URL('http://www.w3af.com/') headers = Headers({ 'content-type': 'text/html', 'Set-Cookie': 'abc=def' }.items()) response = HTTPResponse(200, body, headers, url, url, _id=1) request = FuzzableRequest(url, method='GET') self.plugin.grep(request, response) headers = Headers({ 'content-type': 'text/html', 'Set-Cookie': '123=456' }.items()) response = HTTPResponse(200, body, headers, url, url, _id=1) request = FuzzableRequest(url, method='GET') self.plugin.grep(request, response) self.assertEqual(len(kb.kb.get('analyze_cookies', 'cookies')), 2) self.assertEqual(len(kb.kb.get('analyze_cookies', 'invalid-cookies')), 0)
def test_from_dict(self): html = 'header <b>ABC</b>-<b>DEF</b>-<b>XYZ</b> footer' headers = Headers([('Content-Type', 'text/html')]) orig_resp = self.create_resp(headers, html) msg = msgpack.dumps(orig_resp.to_dict()) loaded_dict = msgpack.loads(msg) loaded_resp = HTTPResponse.from_dict(loaded_dict) self.assertEqual(orig_resp, loaded_resp) self.assertEqual(orig_resp.__dict__.values(), loaded_resp.__dict__.values())
def parse(self, filename): body = file(filename).read() swf_mime = 'application/x-shockwave-flash' hdrs = Headers({'Content-Type': swf_mime}.items()) response = HTTPResponse(200, body, hdrs, URL('http://moth/xyz/'), URL('http://moth/xyz/'), _id=1) parser = SWFParser(response) return parser
def _handle_send_success(self, req, res, grep, original_url, original_url_inst, start_time): ''' Handle the case in "def _send" where the request was successful and we were able to get a valid HTTP response. :return: An HTTPResponse object. ''' # Everything went well! rdata = req.get_data() if not rdata: msg = ('%s %s returned HTTP code "%s"' % (req.get_method(), urllib.unquote_plus(original_url), res.code)) else: printable_data = urllib.unquote_plus(rdata) if len(rdata) > 75: printable_data = '%s...' % printable_data[:75] printable_data = printable_data.replace('\n', ' ') printable_data = printable_data.replace('\r', ' ') msg = ('%s %s with data: "%s" returned HTTP code "%s"' % ( req.get_method(), original_url, printable_data, res.code) ) from_cache = hasattr(res, 'from_cache') flags = ' (id=%s,from_cache=%i,grep=%i)' % (res.id, from_cache, grep) msg += flags om.out.debug(msg) http_resp = HTTPResponse.from_httplib_resp(res, original_url=original_url_inst) http_resp.set_id(res.id) http_resp.set_wait_time(time.time() - start_time) # Clear the log of failed requests; this request is DONE! req_id = id(req) if req_id in self._error_count: del self._error_count[req_id] self._zero_global_error_count() if grep: self._grep(req, http_resp) return http_resp
def test_mark(self): mark_id = 3 url = URL("http://w3af.org/a/b/c.php") for i in xrange(0, 500): request = HTTPRequest(url, data="a=1") hdr = Headers([("Content-Type", "text/html")]) res = HTTPResponse(200, "<html>", hdr, url, url) h1 = HistoryItem() h1.request = request res.set_id(i) h1.response = res if i == mark_id: h1.toggle_mark() h1.save() h2 = HistoryItem() h2.load(mark_id) self.assertTrue(h2.mark) h3 = HistoryItem() h3.load(mark_id - 1) self.assertFalse(h3.mark)
def test_provides_csp_features_no_case03(self): ''' Test case in which site provides broken CSP. ''' # Note the errors in the directive: # default-src -> default-source # img-src -> image-src header_value = "default-src ' '; img-src ' '" hrds = {CSP_HEADER_W3C: header_value}.items() csp_headers = Headers(hrds) http_response = HTTPResponse(200, '', csp_headers, self.url, self.url) self.assertFalse(provides_csp_features(http_response))
def test_multi(self): body = '''<html> <a href="http://moth/abc.jsp?call=SELECT x FROM TABLE">x</a> <a href="http://moth/abc.jsp?call=s(12,3)">x</a> </html>''' response = HTTPResponse(200, body, self.headers, self.url, self.url, _id=1) self.plugin.grep(self.request, response) self.assertEquals( len(kb.kb.get('strange_parameters', 'strange_parameters')), 2)
def test_url_session_in_url(self): body = 'abc' url = URL('http://www.w3af.com/?JSESSIONID=231badb19b93e44f47da1bd64a8147f2') headers = Headers([('content-type', 'text/html')]) request = FuzzableRequest(url, method='GET') resp = HTTPResponse(200, body, headers, url, url, _id=1) self.plugin.grep(request, resp) infos = kb.kb.get('url_session', 'url_session') self.assertEquals(len(infos), 1) info = infos[0] self.assertEqual(info.get_name(), 'Session ID in URL')
def setUp(self): super(TestCORSOrigin, self).setUp() self.co = cors_origin() self.url = URL('http://moth/') self.origin = 'http://moth/' self.response = HTTPResponse(200, '', Headers(), self.url, self.url, _id=3) self.request = FuzzableRequest(self.url)
def test_path_disclosure_positive(self): res = HTTPResponse(200, 'header /etc/passwd footer', self.header, self.url, self.url, _id=1) self.plugin.grep(self.request, res) infos = kb.kb.get('path_disclosure', 'path_disclosure') self.assertEquals(len(infos), 1) path = infos[0]['path'] self.assertEqual(path, '/etc/passwd')
def test_pdf_parser(self): body = file(self.LINKS_SAMPLE).read() hdrs = Headers({'Content-Type': 'application/pdf'}.items()) response = HTTPResponse(200, body, hdrs, URL('http://moth/'), URL('http://moth/'), _id=1) parser = PDFParser(response) parsed, re_refs = parser.get_references() self.assertEqual(parsed, []) self.assertEqual(re_refs, [URL('http://moth/pdf/'),])
def test_analyze_cookies_with_httponly_secure(self): body = '' url = URL('https://www.w3af.com/') headers = Headers({ 'content-type': 'text/html', 'Set-Cookie': 'abc=def;HttpOnly; secure;' }.items()) response = HTTPResponse(200, body, headers, url, url, _id=1) request = FuzzableRequest(url, method='GET') self.plugin.grep(request, response) self.assertEqual(len(kb.kb.get('analyze_cookies', 'cookies')), 1) self.assertEqual(len(kb.kb.get('analyze_cookies', 'security')), 0)
def test_provides_cors_features_false(self): url = URL('http://moth/') fr = FuzzableRequest(url) http_response = HTTPResponse(200, '', Headers(), url, url) url_opener_mock = Mock() url_opener_mock.GET = MagicMock(return_value=http_response) cors = provides_cors_features(fr, url_opener_mock) call_header = Headers({'Origin': 'www.w3af.org'}.items()) url_opener_mock.GET.assert_called_with(url, headers=call_header) self.assertFalse(cors)
def test_provides_csp_features_yes_case02(self): ''' Test case in which site provides CSP features using only report-only policies. ''' header_value = "default-src 'self'; img-src *; object-src"\ " media1.example.com media2.example.com"\ " *.cdn.example.com; script-src"\ " trustedscripts.example.com" hrds = {CSP_HEADER_W3C_REPORT_ONLY: header_value}.items() csp_headers = Headers(hrds) http_response = HTTPResponse(200, '', csp_headers, self.url, self.url) self.assertTrue(provides_csp_features(http_response))
def test_unsafe_inline_enabled_yes_case02(self): ''' Test case in which site provides "unsafe-inline" related CSP for Style. ''' hrds = {} hrds[CSP_HEADER_FIREFOX] = CSP_DIRECTIVE_STYLE + " '" + \ CSP_DIRECTIVE_VALUE_UNSAFE_INLINE + "'" hrds[CSP_HEADER_W3C] = CSP_DIRECTIVE_SCRIPT + " 'self';" + \ CSP_DIRECTIVE_REPORT_URI + " /myrelativeuri" csp_headers = Headers(hrds.items()) http_response = HTTPResponse(200, '', csp_headers, self.url, self.url) self.assertTrue(unsafe_inline_enabled(http_response))
def test_no_clamav_eicar(self, *args): body = clamd.EICAR url = URL('http://www.w3af.com/') headers = Headers([('content-type', 'text/html')]) response = HTTPResponse(200, body, headers, url, url, _id=1) request = FuzzableRequest(url, method='GET') # Simulate that we don't have clamd running self.plugin._connection_test = Mock(return_value=False) self.plugin._scan_http_response = Mock() self.plugin.grep(request, response) findings = kb.kb.get('clamav', 'malware') self.assertEqual(len(findings), 0) self.assertEqual(self.plugin._scan_http_response.call_count, 0)
def test_cache_control_images(self): ''' No cache control, but the content is not sensitive (is an image) so no bug is stored in KB. ''' body = 'abc' url = URL('https://www.w3af.com/image.png') headers = Headers([('content-type', 'image/jpeg')]) request = FuzzableRequest(url, method='GET') resp = HTTPResponse(200, body, headers, url, url, _id=1) self.plugin.grep(request, resp) self.plugin.end() infos = kb.kb.get('cache_control', 'cache_control') self.assertEquals(len(infos), 0)
def test_from_dict_encodings(self): for body, charset in TEST_RESPONSES.values(): html = body.encode(charset) resp = self.create_resp(Headers([('Content-Type', 'text/xml')]), html) msg = msgpack.dumps(resp.to_dict()) loaded_dict = msgpack.loads(msg) loaded_resp = HTTPResponse.from_dict(loaded_dict) self.assertEquals( smart_unicode(html, DEFAULT_CHARSET, ESCAPED_CHAR, on_error_guess=False), loaded_resp.body)
def test_cache_control_empty_body(self): ''' No cache control, but the content is not sensitive (since it is an empty string) so no bug is stored in KB. ''' body = '' url = URL('https://www.w3af.com/') headers = Headers([('content-type', 'text/html')]) request = FuzzableRequest(url, method='GET') resp = HTTPResponse(200, body, headers, url, url, _id=1) self.plugin.grep(request, resp) self.plugin.end() infos = kb.kb.get('cache_control', 'cache_control') self.assertEquals(len(infos), 0)
def test_strange_headers_positive(self): body = 'Hello world' url = URL('http://www.w3af.com/') headers = Headers([('content-type', 'text/html'), ('hello-world', 'yes!')]) request = FuzzableRequest(url, method='GET') resp_positive = HTTPResponse(200, body, headers, url, url, _id=1) self.plugin.grep(request, resp_positive) infos = kb.kb.get('strange_headers', 'strange_headers') self.assertEquals(len(infos), 1) info = infos[0] self.assertEqual(info.get_name(), 'Strange header') self.assertEqual(info.get_url(), url)
def test_from_dict_encodings(self): for body, charset in TEST_RESPONSES.values(): html = body.encode(charset) resp = self.create_resp(Headers([('Content-Type', 'text/xml')]), html) msg = msgpack.dumps(resp.to_dict()) loaded_dict = msgpack.loads(msg) loaded_resp = HTTPResponse.from_dict(loaded_dict) self.assertEquals( smart_unicode(html, DEFAULT_CHARSET, ESCAPED_CHAR, on_error_guess=False), loaded_resp.body )
def test_find_vulns_case04(self): ''' Test case in which we configure correctly policies for all directives. ''' header_value = "default-src 'self';script-src 'self';object-src 'self';" \ "style-src 'self';img-src 'self';media-src 'self';" \ "frame-src 'self';font-src 'self';sandbox;" \ "form-action '/myCtx/act';connect-src 'self';"\ "plugin-types application/pdf;reflected-xss filter;"\ "script-nonce AABBCCDDEE;" hrds = {CSP_HEADER_W3C: header_value}.items() csp_headers = Headers(hrds) http_response = HTTPResponse(200, '', csp_headers, self.url, self.url) vulns = find_vulns(http_response) self.assertEqual(len(vulns), 0)
def test_no_code_disclosure_xml(self, *args): body = ''' <?xml version="1.0"?> <note> <to>Tove</to> <from>Jani</from> <heading>Reminder</heading> <body>Don't forget me this weekend!</body> </note>''' url = URL('http://www.w3af.com/') headers = Headers([('content-type', 'text/html')]) response = HTTPResponse(200, body, headers, url, url, _id=1) request = FuzzableRequest(url, method='GET') self.plugin.grep(request, response) self.assertEqual( len(kb.kb.get('code_disclosure', 'code_disclosure')), 0)
def test_ssn_with_complex_html(self): ''' Test for false positive "...discloses a US Social Security Number: "12-56-1011"..." ''' body = '''<select name="servers"> <option value="0" selected="selected">0</option> <option value="1">1</option> <option value="2-5">2-5</option> <option value="6-10">6-10</option> <option value="11-19">11-19</option> <option value="20+">20+</option> </select>''' headers = Headers([('content-type', 'text/html')]) response = HTTPResponse(200, body, headers, self.url, self.url, _id=1) self.plugin.grep(self.request, response) self.assertEqual(len(kb.kb.get('ssn', 'ssn')), 0)
def test_provides_cors_features_true(self): url = URL('http://moth/') fr = FuzzableRequest(url) hdrs = {'Access-Control-Allow-Origin': 'http://www.w3af.org/'}.items() cors_headers = Headers(hdrs) http_response = HTTPResponse(200, '', cors_headers, url, url) url_opener_mock = Mock() url_opener_mock.GET = MagicMock(return_value=http_response) cors = provides_cors_features(fr, url_opener_mock) url_opener_mock.GET.assert_called_with(url) self.assertTrue(cors)
def log_req_resp(request, response): ''' Send the request and the response to the output manager. ''' if not isinstance(response, HTTPResponse): url = request.url_object resp = HTTPResponse.from_httplib_resp(response, original_url=url) resp.set_id(response.id) if not isinstance(request, HTTPRequest): msg = 'There is something odd going on in LogHandler,'\ ' request should be of type HTTPRequest got %s'\ ' instead.' raise TypeError(msg % type(request)) om.out.log_http(request, resp)
def store_in_cache(request, response): # Create the http response object resp = HTTPResponse.from_httplib_resp(response, original_url=request.url_object) resp.set_id(response.id) resp.set_alias(gen_hash(request)) hi = HistoryItem() hi.request = request hi.response = resp # Now save them try: hi.save() except Exception, ex: msg = ( "Exception while inserting request/response to the" " database: %s\nThe request/response that generated" " the error is: %s %s %s" % (ex, resp.get_id(), request.get_uri(), resp.get_code()) ) om.out.error(msg) raise Exception(msg)
def store_in_cache(request, response): # Create the http response object resp = HTTPResponse.from_httplib_resp(response, original_url=request.url_object) resp.set_id(response.id) resp.set_alias(gen_hash(request)) hi = HistoryItem() hi.request = request hi.response = resp # Now save them try: hi.save() except sqlite3.Error, e: msg = 'A sqlite3 error was raised: "%s".' % e if 'disk' in str(e).lower(): msg += ' Please check if your disk is full.' raise w3afMustStopException(msg)
msg = ('%s %s with data: "%s" returned HTTP code "%s"' % ( req.get_method(), original_url, printable_data, res.code) ) from_cache = hasattr(res, 'from_cache') flags = ' (id=%s,from_cache=%i,grep=%i)' % (res.id, from_cache, grep) msg += flags om.out.debug(msg) http_resp = HTTPResponse.from_httplib_resp(res, original_url=original_url_inst) http_resp.set_id(res.id) http_resp.set_wait_time(time.time() - start_time) # Let the upper layers know that this response came from the # local cache. if isinstance(res, CachedResponse): http_resp.set_from_cache(True) # Clear the log of failed requests; this request is done! req_id = id(req) if req_id in self._error_count: del self._error_count[req_id] self._zero_global_error_count() if grep: