def process_link(self, fuzzresult, link_url): parsed_link = parse_url(link_url) if (not parsed_link.scheme or parsed_link.scheme == "http" or parsed_link.scheme == "https") and self.from_domain( fuzzresult, parsed_link): cache_key = parsed_link.cache_key(self.base_fuzz_res.history.urlp) if cache_key not in self.list_links: self.list_links.add(cache_key) self.enqueue_link(fuzzresult, link_url, parsed_link)
def process(self, fuzzresult): list_links = set() # <a href="www.owasp.org/index.php/OWASP_EU_Summit_2008">O # ParseResult(scheme='', netloc='', path='www.owasp.org/index.php/OWASP_EU_Summit_2008', params='', query='', fragment='') for regex in self.regex: for link_url in regex.findall(fuzzresult.history.content): if not link_url: continue parsed_link = parse_url(link_url) if (not parsed_link.scheme or parsed_link.scheme == "http" or parsed_link.scheme == "https") and self.from_domain( fuzzresult, parsed_link): cache_key = parsed_link.cache_key( self.base_fuzz_res.history.urlp) if cache_key not in list_links: list_links.add(cache_key) self.enqueue_link(fuzzresult, link_url, parsed_link)
def process(self, fuzzresult): list_links = [] # <a href="www.owasp.org/index.php/OWASP_EU_Summit_2008">O # ParseResult(scheme='', netloc='', path='www.owasp.org/index.php/OWASP_EU_Summit_2008', params='', query='', fragment='') for r in self.regex: for i in r.findall(fuzzresult.history.content): parsed_link = parse_url(i) if (not parsed_link.scheme or parsed_link.scheme == "http" or parsed_link.scheme == "https") and (not parsed_link.netloc and parsed_link.path): if i not in list_links: list_links.append(i) # dir path split_path = parsed_link.path.split("/") newpath = '/'.join(split_path[:-1]) + "/" self.queue_url(urljoin(fuzzresult.url, newpath)) # file path self.queue_url(urljoin(fuzzresult.url, i))
def process(self, fuzzresult): l = [] #<a href="www.owasp.org/index.php/OWASP_EU_Summit_2008">O #ParseResult(scheme='', netloc='', path='www.owasp.org/index.php/OWASP_EU_Summit_2008', params='', query='', fragment='') for r in self.regex: for i in r.findall(fuzzresult.history.content): parsed_link = parse_url(i) if (not parsed_link.scheme or parsed_link.scheme == "http" or parsed_link.scheme == "https") and \ (not parsed_link.netloc and parsed_link.path): if i not in l: l.append(i) # dir path split_path = parsed_link.path.split("/") newpath = '/'.join(split_path[:-1]) + "/" self.queue_url(urljoin(fuzzresult.url, newpath)) # file path self.queue_url(urljoin(fuzzresult.url, i))
def queue_url(self, url): if not parse_url(url).isbllist: BasePlugin.queue_url(self, url) return True return False