Пример #1
0
    def _upload_file( self, domain_path,  randFile ):
        '''
        Upload the file using author.dll
        
        @parameter domain_path: http://localhost/f00/
        @parameter randFile: fj01afka.html
        '''
        file_path = urlParser.getPath(domain_path) + randFile
        
        # TODO: The frontpage version should be obtained from the information saved in the kb
        # by the discovery.frontpage_version plugin!
        # The 4.0.2.4715 version should be dynamic!
        # The information is already saved in the discovery plugin in the line:
        # i['version'] = version_match.group(1)
        content = "method=put document:4.0.2.4715&service_name=&document=[document_name="
        content += file_path
        content += ";meta_info=[]]&put_option=overwrite&comment=&keep_checked_out=false"
        content += '\n'
        # The content of the file I'm uploading is the file name reversed
        content += randFile[::-1]
        
        # TODO: The _vti_bin and _vti_aut directories should be PARSED from the _vti_inf file
        # inside the discovery.frontpage_version plugin, and then used here
        targetURL = urlParser.urlJoin( domain_path, '_vti_bin/_vti_aut/author.dll' )

        try:
            res = self._urlOpener.POST( targetURL , data=content )
        except w3afException,  e:
            om.out.debug('Exception while uploading file using author.dll: ' + str(e))
 def grep(self, request, response):
     '''
     Plugin entry point, search for directory indexing.
     @parameter request: The HTTP request object.
     @parameter response: The HTTP response object
     @return: None
     '''
     if urlParser.getDomainPath(response.getURL()) in self._already_visited:
         # Already worked for this URL, no reason to work twice
         return
     
     else:
         # Save it,
         self._already_visited.add( urlParser.getDomainPath(response.getURL()) )
         
         # Work,
         if response.is_text_or_html():
             html_string = response.getBody()
             for indexing_regex in self._compiled_regex_list:
                 if indexing_regex.search( html_string ):
                     v = vuln.vuln()
                     v.setPluginName(self.getName())
                     v.setURL( response.getURL() )
                     msg = 'The URL: "' + response.getURL() + '" has a directory '
                     msg += 'indexing vulnerability.'
                     v.setDesc( msg )
                     v.setId( response.id )
                     v.setSeverity(severity.LOW)
                     path = urlParser.getPath( response.getURL() )
                     v.setName( 'Directory indexing - ' + path )
                     
                     kb.kb.append( self , 'directory' , v )
                     break
Пример #3
0
    def _verify_reference(self, reference, original_request, originalURL, possibly_broken):
        """
        This method GET's every new link and parses it in order to get new links and forms.
        """
        fuzzable_request_list = []
        is_forward = self._is_forward(reference)
        if not self._only_forward or is_forward:
            response = None
            #
            #   Remember that this "breaks" the useCache=True in most cases!
            #
            # headers = { 'Referer': originalURL }
            #
            #   But this does not, and it is friendlier that simply ignoring the referer
            #
            referer = urlParser.getDomainPath(originalURL).replace(urlParser.getPath(originalURL), "")
            if not referer.endswith("/"):
                referer += "/"
            headers = {"Referer": referer}

            try:
                response = self._urlOpener.GET(reference, useCache=True, headers=headers)
            except KeyboardInterrupt, e:
                raise e
            except w3afException, w3:
                om.out.error(str(w3))
Пример #4
0
 def discover(self, fuzzableRequest ):
     '''
     Sends the special request.
     
     @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                 (among other things) the URL to test.
     '''
     domain = urlParser.getDomain(fuzzableRequest.getURL())
     extension = urlParser.getDomain(fuzzableRequest.getURL())
     
     if (domain, extension) not in self._already_tested:
         
         # Do it only one time
         self._already_tested.append( (domain, extension) )
         
         # Generate the new URL
         domain += '.'
         path = urlParser.getPath( fuzzableRequest.getURL() )
         protocol = urlParser.getProtocol( fuzzableRequest.getURL() )
         new_URL = protocol + '://' + domain + path
         try:
             # GET the original response
             original_response = self._urlOpener.GET( fuzzableRequest.getURL(), useCache=False )
             # GET the response with the modified domain (with the trailing dot)
             response = self._urlOpener.GET( new_URL, useCache=False )
         except KeyboardInterrupt,e:
             raise e
         except w3afException,w3:
             om.out.error( str(w3) )
Пример #5
0
 def _get_site_directories(self):
     '''
     @return: A list of the website directories.
     '''
     url_list = kb.kb.getData('urls','urlList')
     url_list = [ urlParser.getPath(i) for i in url_list ]
     # And now remove the filename
     url_list = [ i[:i.rfind('/')] for i in url_list ]
     # uniq
     url_list = list(set(url_list))
     return url_list
Пример #6
0
    def api_read(self, parameters):
        self.result = {}

        #
        #    Parameter validation
        #
        if len(parameters) != 1:
            msg = "You need to specify an output directory where the "
            msg += "remote application source will be downloaded."
            raise Exception(msg)

        else:
            output_directory = parameters[0]
            if not os.path.isdir(output_directory):
                msg = 'The output directory "%s" is invalid.'
                raise Exception(msg % output_directory)

            elif not os.access(output_directory, os.W_OK):
                msg = 'Failed to open "%s" for writing.'
                raise Exception(msg % output_directory)

            else:
                #
                #    The real stuff
                #

                apache_root_directory = self.exec_payload("apache_root_directory")
                webroot_list = apache_root_directory["apache_root_directory"]

                url_list = kb.kb.getData("urls", "urlList")

                for webroot in webroot_list:
                    for url in url_list:

                        path_and_file = getPath(url)
                        relative_path_file = path_and_file[1:]
                        remote_full_path = os.path.join(webroot, relative_path_file)

                        file_content = self.shell.read(remote_full_path)
                        if file_content:
                            #
                            #    Now I write the file to the local disk
                            #    I have to maintain the remote file structure
                            #

                            #    Create the file path to be written to disk
                            local_full_path = os.path.join(output_directory, webroot[1:], relative_path_file)

                            #    Create the local directories (if needed)
                            local_directory = os.path.dirname(local_full_path)
                            if not os.path.exists(local_directory):
                                os.makedirs(local_directory)

                            #    Write the file!
                            fh = file(local_full_path, "w")
                            fh.write(file_content)
                            fh.close()

                            self.result[remote_full_path] = local_full_path

        return self.result
Пример #7
0
    def _update_KB_path_list(self):
        """
        If a path disclosure was found, I can create a list of full paths to all URLs ever visited.
        This method updates that list.
        """
        path_disc_vulns = kb.kb.getData("pathDisclosure", "pathDisclosure")
        if len(path_disc_vulns) == 0:
            # I can't calculate the list !
            pass
        else:
            # Init the kb variables
            kb.kb.save(self, "listFiles", [])

            # Note that this list is recalculated every time a new page is accesed
            # this is goood :P
            url_list = kb.kb.getData("urls", "urlList")

            # Now I find the longest match between one of the URLs that w3af has
            # discovered, and one of the path disclosure strings that this plugin has
            # found. I use the longest match because with small match_list I have more
            # probability of making a mistake.
            longest_match = ""
            longest_path_disc_vuln = None
            for path_disc_vuln in path_disc_vulns:
                for url in url_list:
                    path_and_file = urlParser.getPath(url)

                    if path_disc_vuln["path"].endswith(path_and_file):
                        if len(longest_match) < len(path_and_file):
                            longest_match = path_and_file
                            longest_path_disc_vuln = path_disc_vuln

            # Now I recalculate the place where all the resources are in disk, all this
            # is done taking the longest_match as a reference, so... if we don't have a
            # longest_match, then nothing is actually done
            if longest_match:

                # Get the webroot
                webroot = longest_path_disc_vuln["path"].replace(longest_match, "")

                #
                #   This if fixes a strange case reported by Olle
                #           if webroot[0] == '/':
                #           IndexError: string index out of range
                #   That seems to be because the webroot == ''
                #
                if webroot:
                    kb.kb.save(self, "webroot", webroot)

                    # Check what path separator we should use (linux / windows)
                    if webroot[0] == "/":
                        path_sep = "/"
                    else:
                        # windows
                        path_sep = "\\"

                    # Create the remote locations
                    remote_locations = []
                    for url in url_list:
                        remote_path = urlParser.getPath(url).replace("/", path_sep)
                        remote_locations.append(webroot + remote_path)
                    remote_locations = list(set(remote_locations))

                    kb.kb.save(self, "listFiles", remote_locations)
Пример #8
0
        
        if self._analyzeResult( response, expected_response, parameters, url ):
            kb.kb.append( self, 'url', response.getURL() )
            
            v = vuln.vuln()
            v.setPluginName(self.getName())
            v.setURI( response.getURI() )
            v.setMethod( method )
            vuln_desc = 'pykto plugin found a vulnerability at URL: "' + v.getURL() + '". '
            vuln_desc += 'Vulnerability description: "' + desc.strip() + '"'
            if not vuln_desc.endswith('.'):
                vuln_desc += '.'
            v.setDesc( vuln_desc )
            v.setId( response.id )

            if not urlParser.getPath(response.getURL()).endswith('/'):
                msg = 'Insecure file - ' + urlParser.getPath(response.getURL())
            else:
                msg = 'Insecure directory - ' + urlParser.getPath(response.getURL())
            v.setName( msg )
            v.setSeverity(severity.LOW)

            kb.kb.append( self, 'vuln', v )
            om.out.vulnerability( v.getDesc(), severity=v.getSeverity() )
            
            self._new_fuzzable_requests.extend( self._createFuzzableRequests( response ) )
        
    def _analyzeResult( self , response , expected_response, parameters, uri ):
        '''
        Analyzes the result of a _send()