def scan(self): try: r = _requests(self.target, headers=headers) if isinstance(r, bool): print "invaild url please input correct url" return self.target_domain = urlparse.urlparse(self.target).netloc print "start crawl" print "*********************" hand = crawl(self.target, self.depth, self.concurrent_num) crawl_urls = hand.scan() print "*********************" print "crawl finish" dirs = self.get_dir(crawl_urls) print "*********************" print "load server path " server_result = exploit_server_path(self.target) print "*********************" print "load backup path" backup_result = exploit_backup_path(self.target, dirs) print "*********************" print "load directory path" directory_result = exploit_directory_path(self.target, dirs) print "*********************" print "load common file path" common_file_result = exploit_common_file(self.target, self.extion, dirs) print "************************" print "finish scan :: {}".format(self.target) print "************************" if any([server_result, backup_result, directory_result, common_file_result]): with open("./report/" + self.target_domain + ".txt", 'w') as f: if server_result: f.writelines("************server path************\n") for url in server_result: f.writelines(url + '\n') f.writelines("************server path************\n\n\n") if backup_result: f.writelines("************backup path************\n") for url in backup_result: f.writelines(url + '\n') f.writelines("************backup path************\n\n\n") if directory_result: f.writelines("************directory path************\n") for url in directory_result: f.writelines(url + '\n') f.writelines("************directory path************\n\n\n") if common_file_result: f.writelines("************common file path************\n") for url in common_file_result: f.writelines(url + '\n') f.writelines("************common file path************\n\n\n") f.close() except: traceback.print_exc()
def scan(self): try: if not self.target.startswith("http"): self.targets = [ "http://" + self.target, "https://" + self.target ] for target in self.targets: r = _requests(target, headers=headers) if not isinstance(r, bool): self.target = target break r = _requests(self.target, headers=headers) if isinstance(r, bool): print "{} is invaild url".format(self.target) return self.target_domain = urlparse.urlparse(self.target).netloc hand = crawl(self.target, self.depth, self.concurrent_num) crawl_urls = hand.scan() dirs = self.get_dir(crawl_urls) server_result = exploit_server_path(self.target) backup_result = exploit_backup_path(self.target, dirs) directory_result = exploit_directory_path(self.target, dirs) dirs.append("") if self.parse_extion: common_file_result = exploit_common_file( self.target, self.parse_extion, dirs) else: extion = get_extion_by_sever(self.target) if extion: common_file_result = exploit_common_file( self.target, extion, dirs) else: common_file_result = exploit_common_file( self.target, self.extion, dirs) if any([ server_result, backup_result, directory_result, common_file_result ]): with open("report/" + self.target_domain + ".txt", 'w') as f: if server_result: f.writelines( "****************** server path ******************\n" ) for url in server_result: f.writelines(url + '\n') f.writelines( "****************** server path ******************\n" ) if backup_result: f.writelines( "****************** backup path ******************\n" ) for url in backup_result: f.writelines(url + '\n') f.writelines( "****************** backup path ******************\n" ) if directory_result: f.writelines( "**************** directory path *****************\n" ) for url in directory_result: f.writelines(url + '\n') f.writelines( "**************** directory path *****************\n" ) if common_file_result: f.writelines( "*************** common file path ****************\n" ) for url in common_file_result: f.writelines(url + '\n') f.writelines( "*************** common file path ****************\n" ) f.close() except: traceback.print_exc()