コード例 #1
0
ファイル: test_main.py プロジェクト: zsdlove/w3af
    def test_json_pet_store(self):
        # http://petstore.swagger.io/v2/swagger.json
        body = file(self.SWAGGER_JSON).read()
        headers = Headers({'Content-Type': 'application/json'}.items())
        response = HTTPResponse(200,
                                body,
                                headers,
                                URL('http://moth/swagger.json'),
                                URL('http://moth/swagger.json'),
                                _id=1)

        parser = OpenAPI(response)
        parser.parse()
        api_calls = parser.get_api_calls()

        json_headers = Headers([('Content-Type', 'application/json')])
        multipart_headers = Headers([('Content-Type', 'multipart/form-data')])
        url_encoded_headers = Headers([('Content-Type',
                                        'application/x-www-form-urlencoded')])
        json_api_headers = Headers([('api_key', 'FrAmE30.'),
                                    ('Content-Type', 'application/json')])

        url_root = 'http://petstore.swagger.io/v2'

        expected_body_1 = ('{"body": {"category": {"id": 42, "name": "John"},'
                           ' "status": "available", "name": "John",'
                           ' "tags": [{"id": 42, "name": "John"}],'
                           ' "photoUrls": ["56"], "id": 42}}')

        expected_body_2 = (
            '{"body": {"username": "******", "firstName": "John",'
            ' "lastName": "Smith", "userStatus": 42,'
            ' "email": "*****@*****.**", "phone": "55550178",'
            ' "password": "******", "id": 42}}')

        expected_body_3 = (
            '{"body": [{"username": "******", "firstName": "John",'
            ' "lastName": "Smith", "userStatus": 42,'
            ' "email": "*****@*****.**", "phone": "55550178",'
            ' "password": "******", "id": 42}]}')

        expected_body_4 = (
            '{"body": {"status": "placed",'
            ' "shipDate": "2017-06-30T23:59:45",'
            ' "complete": true, "petId": 42, "id": 42, "quantity": 42}}')

        e_api_calls = [
            ('GET', '/pet/findByStatus?status=available', json_headers, ''),
            ('POST', '/pet/42/uploadImage', multipart_headers, ''),
            ('POST', '/pet/42', url_encoded_headers, ''),
            ('POST', '/pet', json_headers, expected_body_1),
            ('GET', '/pet/42', json_headers, ''),
            ('GET', '/pet/42', json_api_headers, ''),
            ('GET', '/pet/findByTags?tags=56', json_headers, ''),
            ('PUT', '/pet', json_headers, expected_body_1),
            ('PUT', '/user/John8212', json_headers, expected_body_2),
            ('POST', '/user/createWithList', json_headers, expected_body_3),
            ('POST', '/user', json_headers, expected_body_2),
            ('GET', '/user/John8212', json_headers, ''),
            ('GET', '/user/login?username=John8212&password=FrAmE30.',
             json_headers, ''),
            ('GET', '/user/logout', Headers(), ''),
            ('POST', '/user/createWithArray', json_headers, expected_body_3),
            ('GET', '/store/order/2', json_headers, ''),
            ('GET', '/store/inventory', json_headers, ''),
            ('GET', '/store/inventory', json_api_headers, ''),
            ('POST', '/store/order', json_headers, expected_body_4),
        ]

        for api_call in api_calls:
            method = api_call.get_method()
            headers = api_call.get_headers()
            data = api_call.get_data()

            uri = api_call.get_uri().url_string
            uri = uri.replace(url_root, '')

            data = (method, uri, headers, data)

            self.assertIn(data, e_api_calls)
コード例 #2
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_get_directories_path_levels_4(self):
     result = [
         i.url_string for i in URL('http://w3af.com/').get_directories()
     ]
     expected = [u'http://w3af.com/']
     self.assertEqual(result, expected)
コード例 #3
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_url_join_case07(self):
     u = URL('http://w3af.com/')
     self.assertEqual(
         u.url_join('http://w3af.org:8080/abc.html').url_string,
         u'http://w3af.org:8080/abc.html')
コード例 #4
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_encode_plus(self):
     res_str = URL(u'https://w3af.com:443/file.asp?id=1+2').url_encode()
     expected = 'https://w3af.com/file.asp?id=1%202'
     self.assertEqual(res_str, expected)
コード例 #5
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_encode_math(self):
     res_str = URL(u'http://w3af.com/x.py?ec=x*y/2==3').url_encode()
     EXPECTED = 'http://w3af.com/x.py?ec=x%2Ay%2F2%3D%3D3'
     self.assertEqual(res_str, EXPECTED)
コード例 #6
0
ファイル: afd.py プロジェクト: webvul/webfuzzer
            http_resp = self._uri_opener.GET(original_url, cache=True)
        except BaseFrameworkException, bfe:
            msg = 'Active filter detection plugin failed to receive a' \
                  ' response for the first request. The exception was: "%s".' \
                  ' Can not perform analysis.'
            raise BaseFrameworkException(msg % bfe)
        else:
            orig_resp_body = http_resp.get_body()
            orig_resp_body = orig_resp_body.replace(rnd_param, '')
            orig_resp_body = orig_resp_body.replace(rnd_value, '')

            tests = []
            for offending_string in self._get_offending_strings():
                offending_url = fmt % (fuzzable_request.get_url(), rnd_param,
                                       offending_string)
                offending_url = URL(offending_url)
                tests.append((offending_string, offending_url, orig_resp_body,
                              rnd_param))

            self.worker_pool.map_multi_args(self._send_and_analyze, tests)

            return self._filtered, self._not_filtered

    def _send_and_analyze(self, offending_string, offending_url,
                          original_resp_body, rnd_param):
        """
        Actually send the HTTP request.

        :return: None, everything is saved to the self._filtered and
                 self._not_filtered lists.
        """
コード例 #7
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def decode_get_qs(self, url_str):
     return URL(url_str).url_decode().querystring['id'][0]
コード例 #8
0
 def setUp(self):
     self.url = URL('http://w3af.com')
     self.headers = Headers([(u'content-type', u'text/html')])
     self.mpdoc = MultiProcessingDocumentParser()
コード例 #9
0
ファイル: test_response.py プロジェクト: zsdlove/w3af
    def test_normalize_path(self):
        url = URL('https://w3af.org/a/b/c/')
        normalized_path = FourOhFourResponse.normalize_path(url)

        self.assertEqual(normalized_path, 'https://w3af.org/a/b/path/')
コード例 #10
0
ファイル: test_baseparser.py プロジェクト: llcoolj1/w3af-kali
 def setUp(self):
     self.url = URL('http://www.w3af.com/')
     response = HTTPResponse(200, '', Headers(), self.url, self.url)
     self.bp_inst = BaseParser(response)
コード例 #11
0
ファイル: spider_man.py プロジェクト: chenbremer/w3af-1
from w3af.core.data.request.fuzzable_request import FuzzableRequest
from w3af.core.controllers.daemons.proxy.templates.utils import render
from w3af.core.controllers.plugins.crawl_plugin import CrawlPlugin
from w3af.core.controllers.daemons.proxy import Proxy, ProxyHandler
from w3af.core.controllers.exceptions import RunOnce, ProxyException
from w3af.core.controllers.misc.decorators import runonce

from w3af.core.data.options.opt_factory import opt_factory
from w3af.core.data.options.option_list import OptionList
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.dc.headers import Headers

# Cohny changed the original http://w3af/spider_man?terminate
# to http://127.7.7.7/spider_man?terminate because in Opera we got
# an error if we used the original one! Thanks Cohny!
TERMINATE_URL = URL('http://127.7.7.7/spider_man?terminate')
TERMINATE_FAVICON_URL = URL('http://127.7.7.7/favicon.ico')


class spider_man(CrawlPlugin):
    """
    SpiderMan is a local proxy that will collect new URLs.

    :author: Andres Riancho ([email protected])
    :author: Alexander Berezhnoy < alexander.berezhnoy |at| gmail.com >
    """
    def __init__(self):
        CrawlPlugin.__init__(self)
        self._first_captured_request = True
        self._proxy = None
コード例 #12
0
 def setUp(self):
     self.url = URL('http://www.w3af.com/')
コード例 #13
0
 def setUp(self):
     self.fuzzer_config = {'fuzz_cookies': True}
     self.payloads = ['abc', 'def']
     self.url = URL('http://moth/')
コード例 #14
0
 def test_full(self):
     p = self.parse('test_full_url.js')
     expected = [], [URL('http://moth/spam.html'),
                     URL('http://moth/eggs.html')]
     self.assertEqual(p.get_references(), expected)
コード例 #15
0
ファイル: test_sgml.py プロジェクト: webvul/webfuzzer
 def test_baseurl(self):
     body = HTML_DOC % {'head': BASE_TAG, 'body': ''}
     resp = build_http_response(self.url, body)
     p = SGMLParser(resp)
     p.parse()
     self.assertEquals(URL('http://www.w3afbase.com/'), p._base_url)
コード例 #16
0
ファイル: helper.py プロジェクト: webvul/webfuzzer
    def _scan(self,
              target,
              plugins,
              debug=False,
              assert_exceptions=True,
              verify_targets=True):
        """
        Setup env and start scan. Typically called from children's
        test methods.

        :param target: The target to scan.
        :param plugins: PluginConfig objects to activate and setup before
            the test runs.
        """
        if not isinstance(target, (basestring, tuple)):
            raise TypeError('Expected basestring or tuple in scan target.')

        if isinstance(target, tuple):
            target = tuple([URL(u) for u in target])

        elif isinstance(target, basestring):
            target = (URL(target), )

        if verify_targets and not self.MOCK_RESPONSES:
            self._verify_targets_up(target)

        target_opts = create_target_option_list(*target)
        self.w3afcore.target.set_options(target_opts)

        # Enable plugins to be tested
        for ptype, plugincfgs in plugins.items():
            self.w3afcore.plugins.set_plugins([p.name for p in plugincfgs],
                                              ptype)

            for pcfg in plugincfgs:

                if pcfg.name == 'all':
                    continue

                plugin_instance = self.w3afcore.plugins.get_plugin_inst(
                    ptype, pcfg.name)
                default_option_list = plugin_instance.get_options()
                unit_test_options = pcfg.options

                for option in default_option_list:
                    if option.get_name() not in unit_test_options:
                        unit_test_options.add(option)

                self.w3afcore.plugins.set_plugin_options(
                    ptype, pcfg.name, unit_test_options)

        # Enable text output plugin for debugging
        environ_debug = os.environ.get('DEBUG', '0') == '1'
        if debug or environ_debug:
            self._configure_debug()

        # Set a special user agent to be able to grep the logs and identify
        # requests sent by each test
        custom_test_agent = self.get_custom_agent()
        self.w3afcore.uri_opener.settings.set_user_agent(custom_test_agent)

        # Verify env and start the scan
        self.w3afcore.plugins.init_plugins()
        self.w3afcore.verify_environment()
        self.w3afcore.start()

        #
        # I want to make sure that we don't have *any hidden* exceptions in our
        # tests. This was in tearDown before, but moved here because I was
        # getting failed assertions in my test code that were because of
        # exceptions in the scan and they were hidden.
        #
        if assert_exceptions:
            caught_exceptions = self.w3afcore.exception_handler.get_all_exceptions(
            )
            tracebacks = [e.get_details() for e in caught_exceptions]
            self.assertEqual(len(caught_exceptions), 0, tracebacks)
コード例 #17
0
ファイル: test_sgml.py プロジェクト: webvul/webfuzzer
class TestSGMLParser(unittest.TestCase):
    url = URL('http://w3af.com')

    def test_get_emails_filter(self):
        resp = build_http_response(self.url, '')
        p = SGMLParser(resp)
        p._emails = {'*****@*****.**', '*****@*****.**'}

        self.assertEqual(p.get_emails(), {'*****@*****.**', '*****@*****.**'})

        self.assertEqual(p.get_emails(domain='w3af.com'), ['*****@*****.**'])
        self.assertEqual(p.get_emails(domain='not.com'), ['*****@*****.**'])

    def test_extract_emails_blank(self):
        resp = build_http_response(self.url, '')
        p = SGMLParser(resp)

        self.assertEqual(p.get_emails(), set())

    def test_extract_emails_mailto(self):
        body = u'<a href="mailto:[email protected]">test</a>'
        resp = build_http_response(self.url, body)
        p = SGMLParser(resp)
        p.parse()

        expected_res = {u'*****@*****.**'}
        self.assertEqual(p.get_emails(), expected_res)

    def test_extract_emails_mailto_dup(self):
        body = u'<a href="mailto:[email protected]">a</a>' \
               u'<a href="mailto:[email protected]">b</a>'
        resp = build_http_response(self.url, body)
        p = SGMLParser(resp)
        p.parse()

        expected_res = {u'*****@*****.**'}
        self.assertEqual(p.get_emails(), expected_res)

    def test_extract_emails_mailto_not_dup(self):
        body = u'<a href="mailto:[email protected]">a</a>' \
               u'<a href="mailto:[email protected]">b</a>'
        resp = build_http_response(self.url, body)
        p = SGMLParser(resp)
        p.parse()

        expected_res = {u'*****@*****.**', u'*****@*****.**'}
        self.assertEqual(p.get_emails(), expected_res)

    def test_mailto_subject_body(self):
        body = u'<a href="mailto:[email protected]?subject=testing out mailto' \
               u'&body=Just testing">test</a>'
        resp = build_http_response(self.url, body)
        p = SGMLParser(resp)
        p.parse()

        expected_res = {u'*****@*****.**'}
        self.assertEqual(p.get_emails(), expected_res)

    def test_parser_attrs(self):
        body_content = HTML_DOC % {'head': '', 'body': ''}
        p = SGMLParser(build_http_response(self.url, body_content))

        # Assert parser has these attrs correctly initialized
        self.assertFalse(getattr(p, '_inside_form'))
        self.assertFalse(getattr(p, '_inside_select'))
        self.assertFalse(getattr(p, '_inside_text_area'))
        self.assertFalse(getattr(p, '_inside_script'))

        self.assertEquals(set(), getattr(p, '_tag_and_url'))
        self.assertEquals([], getattr(p, '_forms'))
        self.assertEquals([], getattr(p, '_comments_in_doc'))
        self.assertEquals([], getattr(p, '_meta_redirs'))
        self.assertEquals([], getattr(p, '_meta_tags'))

    def test_baseurl(self):
        body = HTML_DOC % {'head': BASE_TAG, 'body': ''}
        resp = build_http_response(self.url, body)
        p = SGMLParser(resp)
        p.parse()
        self.assertEquals(URL('http://www.w3afbase.com/'), p._base_url)

    def test_meta_tags(self):
        body = HTML_DOC % \
               {'head': META_REFRESH + META_REFRESH_WITH_URL,
                'body': ''}
        resp = build_http_response(self.url, body)
        p = SGMLParser(resp)
        p.parse()
        self.assertTrue(2, len(p.meta_redirs))
        self.assertTrue("2;url=http://crawler.w3af.com/" in p.meta_redirs)
        self.assertTrue("600" in p.meta_redirs)
        self.assertEquals([URL('http://crawler.w3af.com/')], p.references[0])

    def test_case_sensitivity(self):
        """
        Ensure handler methods are *always* called with lowered-cased
        tag and attribute names
        """
        def islower(s):
            il = False
            if isinstance(s, basestring):
                il = s.islower()
            else:
                il = all(k.islower() for k in s)
            assert il, "'%s' is not lowered-case" % s
            return il

        def start_wrapper(orig_start, tag):
            islower(tag.tag)
            islower(tag.attrib)
            return orig_start(tag)

        tags = (A_LINK_ABSOLUTE, INPUT_CHECKBOX_WITH_NAME, SELECT_WITH_NAME,
                TEXTAREA_WITH_ID_AND_DATA, INPUT_HIDDEN)
        ops = "lower", "upper", "title"

        for indexes in combinations(range(len(tags)), 2):

            body_elems = []

            for index, tag in enumerate(tags):
                ele = tag
                if index in indexes:
                    ele = getattr(tag, choice(ops))()
                body_elems.append(ele)

            body = HTML_DOC % {'head': '', 'body': ''.join(body_elems)}
            resp = build_http_response(self.url, body)
            p = SGMLParser(resp)
            orig_start = p.start
            wrapped_start = partial(start_wrapper, orig_start)
            p.start = wrapped_start
            p.parse()

    def test_parsed_references(self):
        # The *parsed* urls *must* come both from valid tags and tag attributes
        # Also invalid urls like must be ignored (like javascript instructions)
        body = """
        <html>
            <a href="/x.py?a=1" Invalid_Attr="/invalid_url.php">
            <form action="javascript:history.back(1)">
                <tagX href="/py.py"/>
            </form>
        </html>"""
        r = build_http_response(self.url, body)
        p = SGMLParser(r)
        p.parse()
        parsed_refs = p.references[0]
        self.assertEquals(1, len(parsed_refs))
        self.assertEquals('http://w3af.com/x.py?a=1',
                          parsed_refs[0].url_string)

    def test_reference_with_colon(self):
        body = """
        <html>
            <a href="d:url.html?id=13&subid=3">foo</a>
        </html>"""
        r = build_http_response(self.url, body)
        p = SGMLParser(r)
        p.parse()
        parsed_refs = p.references[0]
        #
        #    Finding zero URLs is the correct behavior based on what
        #    I've seen in Opera and Chrome.
        #
        self.assertEquals(0, len(parsed_refs))

    def test_get_clear_text_body(self):
        html = 'header <b>ABC</b>-<b>DEF</b>-<b>XYZ</b> footer'
        clear_text = 'header ABC-DEF-XYZ footer'
        headers = Headers([('Content-Type', 'text/html')])
        r = build_http_response(self.url, html, headers)

        p = SGMLParser(r)
        p.parse()

        self.assertEquals(clear_text, p.get_clear_text_body())

    def test_get_clear_text_body_memoized(self):
        html = 'header <b>ABC</b>-<b>DEF</b>-<b>XYZ</b> footer'
        clear_text = 'header ABC-DEF-XYZ footer'
        headers = Headers([('Content-Type', 'text/html')])
        r = build_http_response(self.url, html, headers)

        p = SGMLParser(r)
        p.parse()

        calculated_clear_text = p.get_clear_text_body()
        self.assertEquals(clear_text, calculated_clear_text)

    def test_get_clear_text_body_encodings(self):

        raise SkipTest('Not sure why this one is failing :S')

        for lang_desc, (body, encoding) in TEST_RESPONSES.iteritems():
            encoding_header = 'text/html; charset=%s' % encoding
            headers = Headers([('Content-Type', encoding_header)])

            encoded_body = body.encode(encoding)
            r = build_http_response(self.url, encoded_body, headers)

            p = SGMLParser(r)
            p.parse()

            ct_body = p.get_clear_text_body()

            # These test strings don't really have tags, so they should be eq
            self.assertEqual(ct_body, body)

    def test_get_clear_text_issue_4402(self):
        """
        :see: https://github.com/andresriancho/w3af/issues/4402
        """
        test_file_path = 'core/data/url/tests/data/encoding_4402.php'
        test_file = os.path.join(ROOT_PATH, test_file_path)
        body = file(test_file, 'rb').read()

        sample_encodings = [
            encoding for _, (_, encoding) in TEST_RESPONSES.iteritems()
        ]
        sample_encodings.extend(['', 'utf-8'])

        for encoding in sample_encodings:
            encoding_header = 'text/html; charset=%s' % encoding
            headers = Headers([('Content-Type', encoding_header)])

            r = build_http_response(self.url, body, headers)

            p = SGMLParser(r)
            p.parse()

            p.get_clear_text_body()
コード例 #18
0
 def test_unknown_domain(self):
     url = URL('http://longsitethatdoesnotexistfoo.com/')
     self.assertRaises(HTTPRequestException, self.uri_opener.GET, url)
コード例 #19
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_copy(self):
     u = URL('http://www.w3af.com/?id=1&id=2')
     self.assertEqual(u, u.copy())
コード例 #20
0
 def test_file_proto(self):
     url = URL('file://foo/bar.txt')
     self.assertRaises(HTTPRequestException, self.uri_opener.GET, url)
コード例 #21
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_encode_simple(self):
     res_str = URL(u'http://w3af.com').url_encode()
     expected = 'http://w3af.com/'
     self.assertEqual(res_str, expected)
コード例 #22
0
 def test_url_port_closed(self):
     # TODO: Change 2312 by an always closed/non-http port
     url = URL('http://127.0.0.1:2312/')
     self.assertRaises(HTTPRequestException, self.uri_opener.GET, url)
コード例 #23
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_encode_url_encode_plus(self):
     res_str = URL(u'https://w3af.com/file.asp?id=1%2B2').url_encode()
     EXPECTED = 'https://w3af.com/file.asp?id=1%2B2'
     self.assertEqual(res_str, EXPECTED)
コード例 #24
0
 def test_pause_stop(self):
     self.uri_opener.pause(True)
     self.uri_opener.stop()
     url = URL(get_moth_http())
     self.assertRaises(ScanMustStopByUserRequest, self.uri_opener.GET, url)
コード例 #25
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_encode_param(self):
     res_str = URL(u'http://w3af.com/x.py;id=1?y=3').url_encode()
     EXPECTED = 'http://w3af.com/x.py;id=1?y=3'
     self.assertEqual(res_str, EXPECTED)
コード例 #26
0
 def test_special_char_header(self):
     url = URL(get_moth_http('/core/headers/echo-headers.py'))
     header_content = u'name=ábc'
     headers = Headers([('Cookie', header_content)])
     http_response = self.uri_opener.GET(url, cache=False, headers=headers)
     self.assertIn(header_content, http_response.body)
コード例 #27
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_url_join_case05(self):
     u = URL('http://w3af.com/def/')
     self.assertEqual(
         u.url_join(u'тест').url_string, u'http://w3af.com/def/тест')
コード例 #28
0
ファイル: base_template.py プロジェクト: webvul/webfuzzer
 def __init__(self):
     self.name = ''
     self.url = URL('http://host.tld/')
     self.data = parse_qs('')
     self.method = 'GET'
     self.vulnerable_parameter = ''
コード例 #29
0
ファイル: test_url.py プロジェクト: webvul/webfuzzer
 def test_url_with_repeated_parameter_names(self):
     u = URL('http://w3af.com/?id=3&id=4')
     self.assertEqual(u.get_querystring(),
                      QueryString([(u'id', [u'3', u'4'])]))
コード例 #30
0
ファイル: test_main.py プロジェクト: zsdlove/w3af
    def test_disabling_headers_discovery(self):
        body = file(self.MULTIPLE_PATHS_AND_HEADERS).read()
        headers = Headers({'Content-Type': 'application/json'}.items())
        response = HTTPResponse(200,
                                body,
                                headers,
                                URL('http://moth/swagger.json'),
                                URL('http://moth/swagger.json'),
                                _id=1)

        parser = OpenAPI(response, discover_fuzzable_headers=False)
        parser.parse()
        api_calls = parser.get_api_calls()

        api_calls.sort(by_path)

        self.assertEqual(len(api_calls), 4)

        e_force_fuzzing_headers = []

        #
        # Assertions on call #1
        #
        api_call = api_calls[0]

        e_url = 'http://w3af.org/api/cats'
        e_headers = Headers([('X-Awesome-Header', '2018'),
                             ('X-Foo-Header', 'foo'),
                             ('Content-Type', 'application/json')])

        self.assertEqual(api_call.get_method(), 'GET')
        self.assertEqual(api_call.get_uri().url_string, e_url)
        self.assertEquals(api_call.get_headers(), e_headers)
        self.assertEqual(api_call.get_force_fuzzing_headers(),
                         e_force_fuzzing_headers)

        #
        # Assertions on call #2
        #
        api_call = api_calls[1]

        e_url = 'http://w3af.org/api/cats?limit=42'
        e_headers = Headers([('X-Awesome-Header', '2018'),
                             ('X-Foo-Header', 'foo'),
                             ('Content-Type', 'application/json')])

        self.assertEqual(api_call.get_method(), 'GET')
        self.assertEqual(api_call.get_uri().url_string, e_url)
        self.assertEquals(api_call.get_headers(), e_headers)
        self.assertEqual(api_call.get_force_fuzzing_headers(),
                         e_force_fuzzing_headers)

        #
        # Assertions on call #3
        #
        api_call = api_calls[2]

        e_url = 'http://w3af.org/api/pets'
        e_headers = Headers([('X-Foo-Header', '42'),
                             ('Content-Type', 'application/json')])

        self.assertEqual(api_call.get_method(), 'GET')
        self.assertEqual(api_call.get_uri().url_string, e_url)
        self.assertEquals(api_call.get_headers(), e_headers)
        self.assertEqual(api_call.get_force_fuzzing_headers(),
                         e_force_fuzzing_headers)

        #
        # Assertions on call #4
        #
        api_call = api_calls[3]

        e_url = 'http://w3af.org/api/pets'
        e_headers = Headers([('X-Bar-Header', '56'), ('X-Foo-Header', '42'),
                             ('Content-Type', 'application/json')])

        self.assertEqual(api_call.get_method(), 'GET')
        self.assertEqual(api_call.get_uri().url_string, e_url)
        self.assertEquals(api_call.get_headers(), e_headers)
        self.assertEqual(api_call.get_force_fuzzing_headers(),
                         e_force_fuzzing_headers)