Example #1
0
def hunt(urls, threads, exclude_flags, interesting_extensions,
         interesting_files, stdout_flags, progress_enabled):
    """

    :param int threads:
    :type exclude_flags: list
    """
    for code in tuple(exclude_flags):
        match = re.match('^(\d{3})-(\d{3})$', code)
        if match:
            exclude_flags.remove(code)
            exclude_flags += list(
                map(str, status_code_range(*map(int, match.groups()))))
    progress_enabled = (sys.stdout.isatty() or sys.stderr.isatty()
                        ) if progress_enabled is None else progress_enabled
    crawler = Crawler(max_workers=threads,
                      interesting_extensions=interesting_extensions,
                      interesting_files=interesting_files,
                      echo=print if sys.stdout.isatty() else eprint,
                      progress_enabled=progress_enabled)
    crawler.add_init_urls(*urls)
    try:
        catch_keyboard_interrupt(crawler.print_results,
                                 crawler.restart)(set(exclude_flags))
    except SystemExit:
        crawler.close()
    if not sys.stdout.isatty():
        output_urls(crawler, stdout_flags)
Example #2
0
def hunt(urls, threads, exclude_flags, include_flags, interesting_extensions, interesting_files, stdout_flags,
         progress_enabled, timeout, max_depth, not_follow_subdomains, exclude_sources, proxies, delay,
         not_allow_redirects):
    """Find web directories without bruteforce
    """
    if exclude_flags and include_flags:
        raise BadOptionUsage('--exclude-flags and --include-flags are mutually exclusive.')
    welcome()
    if not urls:
        click.echo('•_•) OOPS! Add urls to analyze.\nFor example: dirhunt http://domain/path\n\n'
                   'Need help? Then use dirhunt --help', err=True)
        return
    exclude_flags, include_flags = flags_range(exclude_flags), flags_range(include_flags)
    progress_enabled = (sys.stdout.isatty() or sys.stderr.isatty()) if progress_enabled is None else progress_enabled
    crawler = Crawler(max_workers=threads, interesting_extensions=interesting_extensions,
                      interesting_files=interesting_files, std=sys.stdout if sys.stdout.isatty() else sys.stderr,
                      progress_enabled=progress_enabled, timeout=timeout, depth=max_depth,
                      not_follow_subdomains=not_follow_subdomains, exclude_sources=exclude_sources,
                      not_allow_redirects=not_allow_redirects, proxies=proxies, delay=delay)
    crawler.add_init_urls(*urls)
    try:
        catch_keyboard_interrupt(crawler.print_results, crawler.restart)(set(exclude_flags), set(include_flags))
    except SystemExit:
        crawler.close()
    crawler.print_urls_info()
    if not sys.stdout.isatty():
        output_urls(crawler, stdout_flags)
Example #3
0
def hunt(urls, threads, exclude_flags, include_flags, interesting_extensions, interesting_files, stdout_flags,
         progress_enabled, timeout, max_depth, not_follow_subdomains, exclude_sources, proxies, delay,
         not_allow_redirects, limit, to_file):
    """Find web directories without bruteforce
    """
    if exclude_flags and include_flags:
        raise BadOptionUsage('--exclude-flags and --include-flags are mutually exclusive.')
    welcome()
    urls = flat_list(urls)
    proxies = multiplier_args(proxies)
    if not urls:
        click.echo('•_•) OOPS! Add urls to analyze.\nFor example: dirhunt http://domain/path\n\n'
                   'Need help? Then use dirhunt --help', err=True)
        return
    exclude_flags, include_flags = flags_range(exclude_flags), flags_range(include_flags)
    progress_enabled = (sys.stdout.isatty() or sys.stderr.isatty()) if progress_enabled is None else progress_enabled
    crawler = Crawler(max_workers=threads, interesting_extensions=interesting_extensions,
                      interesting_files=interesting_files, std=sys.stdout if sys.stdout.isatty() else sys.stderr,
                      progress_enabled=progress_enabled, timeout=timeout, depth=max_depth,
                      not_follow_subdomains=not_follow_subdomains, exclude_sources=exclude_sources,
                      not_allow_redirects=not_allow_redirects, proxies=proxies, delay=delay, limit=limit,
                      to_file=to_file)
    if os.path.exists(crawler.get_resume_file()):
        click.echo('Resuming the previous program execution...')
        try:
            crawler.resume(crawler.get_resume_file())
        except IncompatibleVersionError as e:
            click.echo(e)
    crawler.add_init_urls(*urls)
    while True:
        choice = catch_keyboard_interrupt_choices(crawler.print_results, ['abort', 'continue', 'results'], 'a')\
            (set(exclude_flags), set(include_flags))
        if choice == 'a':
            crawler.close(True)
            click.echo('Created resume file "{}". Run again using the same parameters to resume.'.format(
                crawler.get_resume_file())
            )
            return
        elif choice == 'c':
            crawler.restart()
            continue
        else:
            break
    crawler.print_urls_info()
    if not sys.stdout.isatty():
        output_urls(crawler, stdout_flags)
    if to_file:
        crawler.create_report(to_file)
    if not to_file and os.path.exists(crawler.get_resume_file()):
        # The resume file exists. Deleting...
        os.remove(crawler.get_resume_file())