예제 #1
0
def fuzz_start(siteurl, file_ext):
    output = CLIOutput()

    if siteurl.startswith('http://') == False and siteurl.startswith(
            'https://') == False:
        siteurl = 'http://%s' % siteurl

    # 检查waf是否存在
    #checkWaf(url=siteurl, header=headers, proxy=proxies, timeout=timeout, allow_redirects=allow_redirects)

    global dir_exists
    dir_exists = []

    global blacklist
    blacklist = []
    if dir_check(siteurl + '/fdhasuyfgryufgasfkdsfeowueir47738473943fhu.html'
                 ) != requests.HTTPError:
        blacklist.append(
            dir_check(
                siteurl +
                '/fdhasuyfgryufgasfkdsfeowueir47738473943fhu.html').content)
    if dir_check(siteurl + '/fdhasuyfgryufgasfkdsfeowueir477384dd43fhu'
                 ) != requests.HTTPError:
        blacklist.append(
            dir_check(siteurl +
                      '/fdhasuyfgryufgasfkdsfeowueir477384dd43fhu').content)

    # 生成队列堆栈
    queue = Queue.Queue()

    for line in FileUtils.getLines(using_dic):
        line = '%s/%s' % (siteurl.rstrip('/'), line.replace('%EXT%', file_ext))
        queue.put(line)

    output.printHeader('-' * 60)
    output.printTarget(siteurl)
    output.printConfig(file_ext, str(threads_count), str(queue.qsize()))
    output.printHeader('-' * 60)

    # 初始化线程组
    threads = []
    for i in xrange(threads_count):
        threads.append(WyWorker(queue))
    # 启动线程
    for t in threads:
        t.start()
    # 等待线程执行结束后,回到主线程中
    for t in threads:
        t.join()

    output.printHeader('-' * 60)
    for url in dir_exists:
        output.printWarning(url)
    output.printHeader('-' * 60)
예제 #2
0
def fuzz_start(siteurl, file_ext):
    output = CLIOutput()

    if not siteurl.startswith('http://'):
        siteurl = 'http://%s' % siteurl

    # 检查waf是否存在
    checkWaf(url=siteurl,
             header=headers,
             proxy=proxies,
             timeout=timeout,
             allow_redirects=allow_redirects)

    global dir_exists
    dir_exists = []

    # 生成队列堆栈
    queue = Queue.Queue()

    for line in FileUtils.getLines(using_dic):
        line = '%s/%s' % (siteurl.rstrip('/'), line.replace('%EXT%', file_ext))
        queue.put(line)

    output.printHeader('-' * 60)
    output.printTarget(siteurl)
    output.printConfig(file_ext, str(threads_count), str(queue.qsize()))
    output.printHeader('-' * 60)

    # 初始化线程组
    threads = []
    for i in xrange(threads_count):
        threads.append(WyWorker(queue))
    # 启动线程
    for t in threads:
        t.start()
    # 等待线程执行结束后,回到主线程中
    for t in threads:
        t.join()

    output.printHeader('-' * 60)
    for url in dir_exists:
        if url.startswith('[{}]'.format(requests.codes.ok)):
            output.printInfo(url)
        elif url.startswith('[{}]'.format(requests.codes.forbidden)):
            output.printWarning(url)
    output.printHeader('-' * 60)
예제 #3
0
def fuzz_start(siteurl):
    output = CLIOutput()

    global dir_exists
    dir_exists = []

    # 生成队列堆栈
    queue = Queue.Queue()

    for line in FileUtils.getLines(using_dic):
        line1 = '%s/%s' % (siteurl.rstrip('/'), line.replace('%EXT%', 'asp'))
        line2 = '%s/%s' % (siteurl.rstrip('/'), line.replace('%EXT%', 'php'))
        line3 = '%s/%s' % (siteurl.rstrip('/'), line.replace('%EXT%', 'jsp'))
        # print "line1"+line1
        # print "line2"+line2

        queue.put(line1)
        queue.put(line2)
        queue.put(line3)

    #	output.printHeader('-' * 60)
    #	output.printTarget(siteurl)
    #	output.printConfig( str(threads_count), str(queue.qsize()))
    #	output.printHeader('-' * 60)

    # 初始化线程组
    threads = []
    for i in xrange(threads_count):
        threads.append(WyWorker(queue))
    # 启动线程
    for t in threads:
        t.start()
    # 等待线程执行结束后,回到主线程中
    for t in threads:
        t.join()
    #print type(dir_exists)

    abuff = []
    for a in dir_exists:
        if a not in abuff:
            abuff.append(a)
    dir_exists = abuff

    return dir_exists
예제 #4
0
def fuzz_start(siteurl, file_ext):

	output = CLIOutput()

	if not siteurl.startswith('http://'):
		siteurl = 'http://%s' % siteurl
	elif not siteurl.startswith('https://'):
		siteurl = 'https://%s' % siteurl

	global dir_exists
	dir_exists = []

	# 生成队列堆栈
	queue = Queue.Queue()

	for line in FileUtils.getLines(using_dic):
		line = '%s/%s' % (siteurl.rstrip('/'), line.replace('%EXT%', file_ext))
		queue.put(line)

	output.printHeader('-' * 60)
	output.printTarget(siteurl)
	output.printConfig(file_ext, str(threads_count), str(queue.qsize()))
	output.printHeader('-' * 60)

	# 初始化线程组
	threads = []
	for i in xrange(threads_count):
		threads.append(WyWorker(queue))
	# 启动线程
	for t in threads:
		t.start()
	# 等待线程执行结束后,回到主线程中
	for t in threads:
		t.join()

	output.printHeader('-' * 60)
	for url in dir_exists:
		output.printWarning(url)
	output.printHeader('-' * 60)
예제 #5
0
파일: dirbrute.py 프로젝트: Xyntax/DirBrute
def fuzz_start(siteurl, file_ext):
    output = CLIOutput()

    if not siteurl.startswith("http://"):
        siteurl = "http://%s" % siteurl

    # 检查waf是否存在
    checkWaf(url=siteurl, header=headers, proxy=proxies, timeout=timeout, allow_redirects=allow_redirects)

    global dir_exists
    dir_exists = []

    # 生成队列堆栈
    queue = Queue.Queue()

    for line in FileUtils.getLines(using_dic):
        line = "%s/%s" % (siteurl.rstrip("/"), line.replace("%EXT%", file_ext))
        queue.put(line)

    output.printHeader("-" * 60)
    output.printTarget(siteurl)
    output.printConfig(file_ext, str(threads_count), str(queue.qsize()))
    output.printHeader("-" * 60)

    # 初始化线程组
    threads = []
    for i in xrange(threads_count):
        threads.append(WyWorker(queue))
    # 启动线程
    for t in threads:
        t.start()
    # 等待线程执行结束后,回到主线程中
    for t in threads:
        t.join()

    output.printHeader("-" * 60)
    for url in dir_exists:
        output.printWarning(url)
    output.printHeader("-" * 60)
예제 #6
0
def fuzz_start(siteurl, file_ext):

    output = CLIOutput()

    if not siteurl.startswith('http://'):
        siteurl = 'http://%s' % siteurl
    elif not siteurl.startswith('https://'):
        siteurl = 'https://%s' % siteurl

    global dir_exists
    dir_exists = []

    # 生成队列堆栈
    queue = Queue.Queue()

    for line in FileUtils.getLines(using_dic):
        line = '%s/%s' % (siteurl.rstrip('/'), line.replace('%EXT%', file_ext))
        queue.put(line)

    output.printHeader('-' * 60)
    output.printTarget(siteurl)
    output.printConfig(file_ext, str(threads_count), str(queue.qsize()))
    output.printHeader('-' * 60)

    # 初始化线程组
    threads = []
    for i in xrange(threads_count):
        threads.append(WyWorker(queue))
    # 启动线程
    for t in threads:
        t.start()
    # 等待线程执行结束后,回到主线程中
    for t in threads:
        t.join()

    output.printHeader('-' * 60)
    for url in dir_exists:
        output.printWarning(url)
    output.printHeader('-' * 60)
예제 #7
0
                    jb = {}
                    jb["method"] = "httpsbanner"
                    jb["target"] = str(self.queue.get())
                    getIndex.delay(jb)
                print "upload 50 new jobs"
                event.clear()
                continue
            else:
                print "event wait"
                event.wait()
                continue





for line in FileUtils.getLines(sys.argv[2]):
    line = line.strip()
    jobsQueue.put(line)


t1 = checkJobsThread()
t1.start()

t2 = sendJobsThread(jobsQueue)
t2.start()

t1.join()
t2.join()
t2.join()
예제 #8
0
                      default=10,
                      type='int',
                      help='Number of threads. default = 10')
    parser.add_option('-d',
                      '--dic',
                      dest='dic_path',
                      default='./dics/dirs.txt',
                      type='string',
                      help='Default dictionaty: ./dics/dirs.txt')
    parser.add_option('-f',
                      '--file',
                      dest='target_file_path',
                      default='',
                      type='string',
                      help='Default dictionaty:\'\'')
    (options, args) = parser.parse_args()

    if options.dic_path:
        using_dic = options.dic_path
    if options.threads_num:
        threads_count = options.threads_num
    if options.target_file_path:
        target_file_path = options.target_file_path
        for line in FileUtils.getLines(target_file_path):
            fuzz_start(line, options.ext)
    elif len(sys.argv) > 1:
        fuzz_start(sys.argv[1], options.ext)
    else:
        parser.print_help()
        sys.exit(0)