Example #1
0
    def _sendToServer( self,  grep=False ):
        '''
        Send a request that arrived from the browser to the remote web server.
        
        Important variables used here:
            - self.headers : Stores the headers for the request
            - self.rfile : A file like object that stores the postdata
            - self.path : Stores the URL that was requested by the browser
        '''
        self.headers['Connection'] = 'close'

        path = self.path
        uri_instance = url_object(path)

        # See HTTPWrapperClass
        if hasattr(self.server, 'chainedHandler'):
            basePath = "https://" + self.server.chainedHandler.path
            path = basePath + path
            uri_instance = url_object(path)
        
        # Do the request to the remote server
        if self.headers.dict.has_key('content-length'):
            # most likely a POST request
            postData = self._getPostData()

            try:
                httpCommandMethod = getattr( self._urlOpener, self.command )
                res = httpCommandMethod( uri_instance, data=postData, headers=self.headers )
            except w3afException, w:
                om.out.error('The proxy request failed, error: ' + str(w) )
            except Exception, e:
                raise e
Example #2
0
    def test_add_httpPostDataRequest(self):
        ds = disk_set()
        
        uri = url_object('http://w3af.org/?id=2')
        pdr1 = httpPostDataRequest(uri, method='GET', headers={'Referer': 'http://w3af.org/'})

        uri = url_object('http://w3af.org/?id=3')
        pdr2 = httpPostDataRequest(uri, method='GET', headers={'Referer': 'http://w3af.com/'})
        
        uri = url_object('http://w3af.org/?id=7')
        pdr3 = httpPostDataRequest(uri, method='FOO', headers={'Referer': 'http://w3af.com/'})
        
        ds.add( pdr1 )
        ds.add( pdr2 )
        ds.add( pdr2 )
        ds.add( pdr1 )
        
        self.assertEqual( ds[0] , pdr1)
        self.assertEqual( ds[1] , pdr2)
        self.assertFalse( pdr3 in ds )
        self.assertTrue( pdr2 in ds )
        self.assertEqual( len(ds) , 2)
        
        # This forces an internal change in the URL object
        pdr2.getURL().url_string
        self.assertTrue( pdr2 in ds )
Example #3
0
    def se_search(self, query, start = 1, count = 100):
        """
        se_search(query, start = 0, count = 10) -> results

        Search the web with yahoo Site Explorer.
        """
        # https://siteexplorer.search.yahoo.com/export?p=http%3A%2F%2Fwww.cybsec.com%2F
        url = 'https://siteexplorer.search.yahoo.com/export?p=http://'
        url += query
        url_instance = url_object(url)
        
        response = self._urlOpener.GET(url_instance, headers=self._headers, useCache=True, grepResult=False)
        
        results = []

        # The export script returns a tab separated file, parse it.
        response_body = response.getBody()
        response_body_lines = response_body.split('\n')[1:]
        for body_line in response_body_lines:
            try:
                text, url, length, content_type = body_line.split('\t')
            except Exception, e:
                msg = 'Something went wrong while parsing the YSE result line: "' + body_line + '"'
                om.out.debug( msg )
            else:
                yse_result = yahooSiteExplorerResult( url_object(url) )
                results.append( yse_result )
Example #4
0
    def validate(self, text):
        '''Redefinition of ValidatedEntry's method.

        @param text: the text to validate
        @return Always True, there's no validation to perform
        '''
        try:
            url_object(text)
        except Exception:
            return False
        else:        
            return True
Example #5
0
    def _verifyURL(self, target_url, fileTarget=True):
        '''
        Verify if the URL is valid and raise an exception if w3af doesn't
        support it.
        
        >>> ts = w3af_core_target()        
        >>> ts._verifyURL('ftp://www.google.com/')
        Traceback (most recent call last):
          ...
        w3afException: Invalid format for target URL "ftp://www.google.com/", you have to specify the protocol (http/https/file) and a domain or IP address. Examples: http://host.tld/ ; https://127.0.0.1/ .
        >>> ts._verifyURL('http://www.google.com/')
        >>> ts._verifyURL('http://www.google.com:39/') is None
        True
        
        @param target_url: The target URL object to check if its valid or not.
        @return: None. A w3afException is raised on error.
        '''
        try:
            target_url = url_object(target_url)
        except ValueError:
            is_invalid = True
        else:
            protocol = target_url.getProtocol()
            aFile = fileTarget and protocol == 'file' and \
                                                target_url.getDomain() or ''
            aHTTP = protocol in ('http', 'https') and \
                                                target_url.is_valid_domain()
            is_invalid = not (aFile or aHTTP)

        if is_invalid:
            msg = ('Invalid format for target URL "%s", you have to specify '
            'the protocol (http/https/file) and a domain or IP address. '
            'Examples: http://host.tld/ ; https://127.0.0.1/ .' % target_url)
            raise w3afException(msg)
Example #6
0
    def discover(self, fuzzableRequest ):
        '''
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        if not self._run:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
        else:
            # This plugin will only run one time. 
            self._run = False
            
            pks_se = pks( self._urlOpener)
            
            root_domain = fuzzableRequest.getURL().getRootDomain()
            
            results = pks_se.search( root_domain )
            for result in results:
                i = info.info()
                i.setURL( url_object('http://pgp.mit.edu:11371/') )
                i.setPluginName(self.getName())
                i.setId( [] )
                mail = result.username +'@' + root_domain
                i.setName( mail )
                i.setDesc( 'The mail account: "'+ mail + '" was found in the MIT PKS server. ' )
                i['mail'] = mail
                i['user'] = result.username
                i['name'] = result.name
                i['url_list'] = ['http://pgp.mit.edu:11371/', ]
                kb.kb.append( 'mails', 'mails', i )
                #   Don't save duplicated information in the KB. It's useless.
                #kb.kb.append( self, 'mails', i )
                om.out.information( i.getDesc() )

        return []
Example #7
0
    def test_bruteforcer_default(self):
        url = url_object('http://www.w3af.org/')
        
        bf = bruteforcer()
        bf.setURL(url)
        bf.init()
        
        expected_combinations = [
                                 ('prueba1', '123abc'),
                                 ('test', 'freedom'),
                                 ('user', 'letmein'),
                                 ('www.w3af.org', 'master'),    # URL feature
                                 ('admin', '7emp7emp'),         # l337 feature
                                 ('user1', ''),                 # No password
                                 ('user1', 'user1')             # User eq password
                                ]
        generated = []
        
        next = True
        while next:
            try:
                gen_comb = bf.getNext()
                generated.append( gen_comb )
            except:
                break

        for gen_comb in expected_combinations:
            self.assertTrue( gen_comb in generated )
Example #8
0
 def test_find(self):
     find_id = random.randint(1, 499)
     url = url_object('http://w3af.org/a/b/foobar.php?foo=123')
     tag_value = createRandAlNum(10)
     for i in xrange(0, 500):
         fr = FuzzReq(url, dc={'a': ['1']})
         code = 200
         if i == find_id:
             code = 302
         res = httpResponse(code, '<html>',{'Content-Type':'text/html'}, url, url)
         h1 = HistoryItem()
         h1.request = fr
         res.setId(i)
         h1.response = res
         if i == find_id:
             h1.toggleMark()
             h1.updateTag(tag_value)
         h1.save()
     h2 = HistoryItem()
     self.assertEqual(len(h2.find([('tag', "%"+tag_value+"%", 'like')])), 1)
     self.assertEqual(len(h2.find([('code', 302, '=')])), 1)
     self.assertEqual(len(h2.find([('mark', 1, '=')])), 1)
     self.assertEqual(len(h2.find([('has_qs', 1, '=')])), 500)
     self.assertEqual(len(h2.find([('has_qs', 1, '=')], resultLimit=10)), 10)
     results = h2.find([('has_qs', 1, '=')], resultLimit=1, orderData=[('id','desc')])
     self.assertEqual(results[0].id, 499)
     search_data = []
     search_data.append(('id', find_id + 1, "<"))
     search_data.append(('id', find_id - 1, ">"))
     self.assertEqual(len(h2.find(search_data)), 1)
Example #9
0
    def discover(self, fuzzableRequest ):
        '''
        Search zone_h and parse the output.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains 
                                                    (among other things) the URL to test.
        '''
        if not self._exec :
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
        else:
            # Only run once
            self._exec = False
                        
            target_domain = fuzzableRequest.getURL().getRootDomain()
            
            # Example URL:
            # http://www.zone-h.org/archive/domain=cyprus-stones.com
        
            # TODO: Keep this URL updated!
            zone_h_url_str = 'http://www.zone-h.org/archive/domain=' + target_domain
            zone_h_url = url_object( zone_h_url_str )

            try:
                response = self._urlOpener.GET( zone_h_url )
            except w3afException, e:
                msg = 'An exception was raised while running zone-h plugin. Exception: ' + str(e)
                om.out.debug( msg )
            else:
Example #10
0
    def _createFuzzableRequest(self):
        '''
        Based on the attributes, return a fuzzable request object.
        
        Important variables used here:
            - self.headers : Stores the headers for the request
            - self.rfile : A file like object that stores the postdata
            - self.path : Stores the URL that was requested by the browser
        '''
        # See HTTPWrapperClass
        if hasattr(self.server, 'chainedHandler'):
            basePath = "https://" + self.server.chainedHandler.path
            path = basePath + self.path
        else:
            path = self.path

        fuzzReq = fuzzableRequest(
                              url_object(path), 
                              self.command,
                              self.headers.dict
                              )
        postData = self._getPostData()
        if postData:
            fuzzReq.setData(postData)
        return fuzzReq
Example #11
0
    def store_in_cache(request, response):
        hi = HistoryItem()
        
        # Set the request
        headers = dict(request.headers)
        headers.update(request.unredirected_hdrs)
    
        req = createFuzzableRequestRaw(method=request.get_method(),
                                      url=request.url_object,
                                      postData=str(request.get_data() or ''),
                                      headers=headers)
        hi.request = req

        # Set the response
        resp = response
        code, msg, hdrs, url, body, id = (resp.code, resp.msg, resp.info(),
                                          resp.geturl(), resp.read(), resp.id)
        # BUGBUG: This is where I create/log the responses that always have
        # 0.2 as the time!
        url_instance = url_object( url )
        resp = httpResponse.httpResponse(code, body, hdrs, url_instance,
                                         request.url_object, msg=msg, id=id,
                                         alias=gen_hash(request))
        hi.response = resp

        # Now save them
        try:
            hi.save()
        except KeyboardInterrupt, k:
            raise k
Example #12
0
    def _do_google_search(self):
        
        start = self._start
        res_pages = []
        max_start = start + self._count
        param_dict = {'dc': 'gorganic', 'hl': 'en', 'q': self._query,
                      'sa': 'N', 'source': 'mobileproducts'}
        there_is_more = True
        
        while start < max_start and there_is_more:
            param_dict['start'] = start
            params = urllib.urlencode(param_dict)
            gm_url = self.GOOGLE_SEARCH_URL + params
            gm_url_instance = url_object(gm_url)
            response = self._do_GET( gm_url_instance )               
            
            if GOOGLE_SORRY_PAGE in response:
                raise w3afException(
                      'Google is telling us to stop doing automated tests.')
            
            if not self._has_more_items(response.getBody()):
                there_is_more = False
            
            res_pages.append(response)                          
            start += 10

        return res_pages
Example #13
0
    def do_ALL(self):
        global global_firstRequest
        if global_firstRequest:
            global_firstRequest = False
            om.out.information('The user is navigating through the spiderMan proxy.')
        
        # Convert to url_object
        path = url_object(self.path)
            
        if path == TERMINATE_URL:
            om.out.information('The user terminated the spiderMan session.')
            self._sendEnd()
            self._spiderMan.stopProxy()
            return


        om.out.debug("[spiderMan] Handling request: %s %s" %
                                                (self.command, path))
        #   Send this information to the plugin so it can send it to the core
        freq = self._createFuzzableRequest()
        self._spiderMan.append_fuzzable_request( freq )
        
        grep = True if path.getDomain() == self.server.w3afLayer.targetDomain else False
            
        try:
            response = self._sendToServer(grep=grep)
        except Exception, e:
            self._sendError( e )
Example #14
0
 def _find_relative(self, doc_str):
     '''
     
     Now detect some relative URL's (also using regexs)
     
     '''
     res = set()
     filter_false_urls = self._filter_false_urls
     
     # TODO: Also matches //foo/bar.txt and http://host.tld/foo/bar.txt
     # I'm removing those matches manually below
     for match_tuple in filter( filter_false_urls, self.RELATIVE_URL_RE.findall(doc_str) ):
         
         match_str = match_tuple[0]
                     
         try:
             url = self._baseUrl.urlJoin(match_str).url_string
             url = url_object(self._decode_url(url),
                              encoding=self._encoding)
         except ValueError:
             # In some cases, the relative URL is invalid and triggers an 
             # ValueError: Invalid URL "%s" exception. All we can do at this
             # point is to ignore this "fake relative URL".
             pass
         else:
             url_lower = url.url_string.lower()
             if url_lower.startswith('http://') or url_lower.startswith('https://'):
                 res.add(url)
     
     return res
Example #15
0
    def _do_google_search(self):
        '''
        Performs a google set search.
        http://labs.google.com/sets
        '''
        
        results = []
        
        if self._word_list:
            # I'll use the first 5 inputs
            _word_list = self._word_list[:5]
        
            # This is a search for a set with input blue and white
            # http://labs.google.com/sets?hl=en&q1=blue&q2=white&q3=&q4=
            #&q5=&btn=Small+Set+%2815+items+or+fewer%29
            url = self.GOOGLE_SEARCH_URL
            q_param = 1
            
            for word in _word_list:
                url += '&q' + str(q_param) + '=' + urllib.quote_plus(word)
                q_param += 1

            url += '&btn=Small+Set+%2815+items+or+fewer%29'
            url_instance = url_object( url )
            # Now I get the results
            response = self._do_GET(url_instance)
            results.append(response)

        return results
Example #16
0
 def _send_requests( self, fuzzableRequest ):
     '''
     Actually send the requests that might be blocked.
     @parameter fuzzableRequest: The fuzzableRequest to modify in order to see if it's blocked
     '''
     rnd_param = createRandAlNum(7)
     rnd_value = createRandAlNum(7)
     original_url_str = fuzzableRequest.getURL() + '?' + rnd_param + '=' + rnd_value
     original_url = url_object(original_url_str)
     
     try:
         original_response_body = self._urlOpener.GET( original_url , useCache=True ).getBody()
     except Exception:
         msg = 'Active filter detection plugin failed to receive a '
         msg += 'response for the first request. Can not perform analysis.'
         raise w3afException( msg )
     else:
         original_response_body = original_response_body.replace( rnd_param, '' )
         original_response_body = original_response_body.replace( rnd_value, '' )
         
         for offending_string in self._get_offending_strings():
             offending_URL = fuzzableRequest.getURL() + '?' + rnd_param + '=' + offending_string
             
             # Perform requests in different threads
             targs = (offending_string, offending_URL, original_response_body, rnd_param)
             self._tm.startFunction( target=self._send_and_analyze, args=targs, ownerObj=self )
         
         # Wait for threads to finish
         self._tm.join( self )
         
         # Analyze the results
         return self._filtered, self._not_filtered
Example #17
0
    def _mutate_path(self, url):
        '''
        Mutate the path instead of the file.
        
        @parameter url: An url_object to transform.
        @return: A list of url_object's that mutate the original url passed as parameter.

        >>> from core.data.parsers.urlParser import url_object
        >>> u = urlFuzzer()
        >>> url = url_object( 'http://www.w3af.com/' )
        >>> list(u._mutate_path(url))
        []
        
        >>> url = url_object( 'http://www.w3af.com/foo.html' )
        >>> list(u._mutate_path(url))
        []
        
        >>> url = url_object('http://www.w3af.com/foo/bar.html' )
        >>> mutants = list(u._mutate_path(url))
        >>> url_object('http://www.w3af.com/foo.tar.gz') in mutants
        True
        >>> url_object('http://www.w3af.com/foo.old') in mutants
        True
        >>> url_object('http://www.w3af.com/foo.zip') in mutants
        True
        '''
        url_string = url.url_string
        
        if url_string.count('/') > 3:
            # Create the new path
            url_string = url_string[:url_string.rfind('/')]
            to_append_list = self._appendables
            for to_append in to_append_list:
                newurl = url_object(url_string + to_append)
                yield newurl
Example #18
0
 def discover(self, fuzzableRequest ):
     '''
     If url not in _tested, append a ?wsdl and check the response.
     
     @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
     '''
     url = fuzzableRequest.getURL().uri2url()
     url_string = url.url_string
     
     if url_string not in self._already_tested:
         self._already_tested.add( url_string )
         
         # perform the requests
         for wsdl_parameter in self._get_WSDL():
             url_to_request = url_string + wsdl_parameter
             url_instance = url_object(url_to_request)
             
             #   Send the requests using threads:
             targs = ( url_instance, )
             self._tm.startFunction( target=self._do_request, args=targs, ownerObj=self )
     
         # Wait for all threads to finish
         self._tm.join( self )
     
     return self._new_fuzzable_requests
Example #19
0
 def setOptions( self, optionsMap ):
     '''
     This method sets all the options that are configured using the user interface 
     generated by the framework using the result of getOptions().
     
     @parameter optionsMap: A dictionary with the options for the plugin.
     @return: No value is returned.
     '''
     cf.cf.save('fuzzableCookie', optionsMap['fuzzCookie'].getValue() )
     cf.cf.save('fuzzFileContent', optionsMap['fuzzFileContent'].getValue() )
     cf.cf.save('fuzzFileName', optionsMap['fuzzFileName'].getValue() )
     cf.cf.save('fuzzFCExt', optionsMap['fuzzFCExt'].getValue() )
     cf.cf.save('fuzzFormComboValues', optionsMap['fuzzFormComboValues'].getValue() )
     cf.cf.save('autoDependencies', optionsMap['autoDependencies'].getValue() )
     cf.cf.save('maxDiscoveryTime', optionsMap['maxDiscoveryTime'].getValue() )
     
     if optionsMap['maxThreads'].getValue()  > 100:
         raise w3afException('The maximum valid number of threads is 100.')
     max_threads = optionsMap['maxThreads'].getValue()
     cf.cf.save('maxThreads', max_threads )
     tm.setMaxThreads( max_threads )
     
     cf.cf.save('fuzzableHeaders', optionsMap['fuzzableHeaders'].getValue() )
     cf.cf.save('interface', optionsMap['interface'].getValue() )
     cf.cf.save('localAddress', optionsMap['localAddress'].getValue() )
     cf.cf.save('demo', optionsMap['demo'].getValue()  )
     
     url_list = []
     for url_str in optionsMap['nonTargets'].getValue():
         url_list.append( url_object( url_str ) )
     cf.cf.save('nonTargets', url_list )
     
     cf.cf.save('exportFuzzableRequests', optionsMap['exportFuzzableRequests'].getValue() )
     
     cf.cf.save('msf_location', optionsMap['msf_location'].getValue() )
Example #20
0
    def _do_google_search(self):
        res_pages = []
        
        start = self._start
        max_start = start + self._count
        there_is_more = True
        
        while start < max_start  and there_is_more:
            params = urllib.urlencode({'hl': 'en', 'q': self._query,
                                       'start': start, 'sa': 'N'})
            
            google_url_instance = url_object(self.GOOGLE_SEARCH_URL + params)
            response = self._do_GET( google_url_instance )
            
            # Remember that httpResponse objects have a faster "__in__" than
            # the one in strings; so string in response.getBody() is slower than
            # string in response
            if GOOGLE_SORRY_PAGE in response:
                raise w3afException(
                      'Google is telling us to stop doing automated tests.')
            if not self._has_more_items(response.getBody()):
                there_is_more = False

            # Save the result page
            res_pages.append(response)
            
            start += 10

        return res_pages
Example #21
0
    def _send(self, req, cache=False, useMultipart=False, grep=True):
        '''
        Actually send the request object.
        
        @param req: The HTTPRequest object that represents the request.
        @return: An httpResponse object.
        '''
        # This is the place where I hook the pause and stop feature
        # And some other things like memory usage debugging.
        self._callBeforeSend()

        # Sanitize the URL
        self._checkURI(req)
        
        # Evasion
        original_url = req._Request__original
        original_url_inst = req.url_object
        req = self._evasion(req)
        
        start_time = time.time()
        res = None

        req.get_from_cache = cache
        
        try:
            res = self._opener.open(req)
        except urllib2.HTTPError, e:
            # We usually get here when response codes in [404, 403, 401,...]
            msg = '%s %s returned HTTP code "%s" - id: %s' % \
                            (req.get_method(), original_url, e.code, e.id)
            if hasattr(e, 'from_cache'):
                msg += ' - from cache.'
            om.out.debug(msg)
            
            # Return this info to the caller
            code = int(e.code)
            info = e.info()
            geturl_instance = url_object(e.geturl())
            read = self._readRespose(e)
            httpResObj = httpResponse(code, read, info, geturl_instance,
                                      original_url_inst, id=e.id,
                                      time=time.time()-start_time, msg=e.msg,
                                      charset=getattr(e.fp, 'encoding', None))
            
            # Clear the log of failed requests; this request is done!
            req_id = id(req)
            if req_id in self._errorCount:
                del self._errorCount[req_id]

            # Reset errors counter
            self._zeroGlobalErrorCount()
        
            if grep:
                self._grep(req, httpResObj)
            else:
                om.out.debug('No grep for: "%s", the plugin sent '
                             'grep=False.' % geturl_instance)

            return httpResObj
Example #22
0
 def test_no_code_disclosure_blank(self):
     body = ''
     url = url_object('http://www.w3af.com/')
     headers = {'content-type': 'text/html'}
     response = httpResponse(200, body , headers, url, url)
     request = fuzzableRequest(url, method='GET')
     self.plugin.grep(request, response)
     self.assertTrue( len(kb.kb.getData('codeDisclosure', 'codeDisclosure')) == 0 )
Example #23
0
 def http_request( self, req ):
     url_instance = url_object( req.get_full_url() )
     new_url = url_instance.setParam( self._url_parameter )
     
     new_request = HTTPRequest(new_url, headers=req.headers,
         origin_req_host=req.get_origin_req_host(),
         unverifiable=req.is_unverifiable())
     return new_request
Example #24
0
 def _parse( self, content_text ):
     # Get the URLs using a regex
     url_regex = '((http|https):[A-Za-z0-9/](([A-Za-z0-9$_.+!*(),;/?:@&~=-])|'
     url_regex += '%[A-Fa-f0-9]{2})+(#([a-zA-Z0-9][a-zA-Z0-9$_.+!*(),;/?:@&~=%-]*))?)'
     self._re_URLs = [ url_object( x[0] ) for x in re.findall(url_regex, content_text ) ]
     
     # Get the mail addys
     self.findEmails( content_text )
Example #25
0
    def setURI( self, uri ):
        if not isinstance(uri, url_object):
            msg = 'The "url" parameter of setURL @ fuzzableRequest'
            msg += ' must be of urlParser.url_object type.'
            raise ValueError( msg )

        self._uri = url_object( uri.url_string.replace(' ', '%20') )
        self._url = self._uri.uri2url()
Example #26
0
 def _get_fuzzed_request(self, fuzzableRequest):
     '''
     Generate a new Url by adding or substracting the '/' character.
     @param fuzzableRequest: The original fuzzableRequest
     @return: The modified fuzzableRequest.
     '''
     fr = fuzzableRequest.copy()
     
     url_string = str(fuzzableRequest.getURL()) 
     
     if url_string.endswith('/'):
         new_url = url_object(url_string.rstrip('/'))
     else:
         new_url = url_object(url_string + '/')
     
     fr.setURL(new_url)
     return fr
Example #27
0
 def test_ASP_code_disclosure(self):
     body = 'header <% Response.Write("Hello World!") %> footer'
     url = url_object('http://www.w3af.com/')
     headers = {'content-type': 'text/html'}
     response = httpResponse(200, body , headers, url, url)
     request = fuzzableRequest(url, method='GET')
     self.plugin.grep(request, response)
     self.assertTrue( len(kb.kb.getData('codeDisclosure', 'codeDisclosure')) == 1 )
Example #28
0
 def test_meta_tags(self):
     body = HTML_DOC % {"head": META_REFRESH + META_REFRESH_WITH_URL, "body": ""}
     resp = _build_http_response(URL, body)
     p = _SGMLParser(resp)
     p._parse(resp)
     self.assertTrue(2, len(p.meta_redirs))
     self.assertTrue("2;url=http://crawler.w3af.com/" in p.meta_redirs)
     self.assertTrue("600" in p.meta_redirs)
     self.assertEquals([url_object("http://crawler.w3af.com/")], p.references[0])
Example #29
0
 def test_none(self):
     body = '<an object="1"> <or applet=2> <apple>'
     url = url_object('http://www.w3af.com/')
     headers = {'content-type': 'text/html'}
     response = httpResponse(200, body , headers, url, url)
     request = fuzzableRequest(url, method='GET')
     self.plugin.grep(request, response)
     
     self.assertEquals( len(kb.kb.getData('objects', 'objects')), 0 )
Example #30
0
    def _extract_links(self, pages):
        links = []

        for page in pages:
            # Update results list
            parsed_page = json.loads(page.getBody())
            links += [googleResult( url_object( res['url'] ) ) for res in \
                        parsed_page['responseData']['results']]
        return links[:self._count]