Ejemplo n.º 1
0
def hunt(urls, threads, exclude_flags, include_flags, interesting_extensions, interesting_files, stdout_flags,
         progress_enabled, timeout, max_depth, not_follow_subdomains, exclude_sources, proxies, delay,
         not_allow_redirects, limit, to_file):
    """Find web directories without bruteforce
    """
    if exclude_flags and include_flags:
        raise BadOptionUsage('--exclude-flags and --include-flags are mutually exclusive.')
    welcome()
    urls = flat_list(urls)
    proxies = multiplier_args(proxies)
    if not urls:
        click.echo('•_•) OOPS! Add urls to analyze.\nFor example: dirhunt http://domain/path\n\n'
                   'Need help? Then use dirhunt --help', err=True)
        return
    exclude_flags, include_flags = flags_range(exclude_flags), flags_range(include_flags)
    progress_enabled = (sys.stdout.isatty() or sys.stderr.isatty()) if progress_enabled is None else progress_enabled
    crawler = Crawler(max_workers=threads, interesting_extensions=interesting_extensions,
                      interesting_files=interesting_files, std=sys.stdout if sys.stdout.isatty() else sys.stderr,
                      progress_enabled=progress_enabled, timeout=timeout, depth=max_depth,
                      not_follow_subdomains=not_follow_subdomains, exclude_sources=exclude_sources,
                      not_allow_redirects=not_allow_redirects, proxies=proxies, delay=delay, limit=limit,
                      to_file=to_file)
    if os.path.exists(crawler.get_resume_file()):
        click.echo('Resuming the previous program execution...')
        try:
            crawler.resume(crawler.get_resume_file())
        except IncompatibleVersionError as e:
            click.echo(e)
    crawler.add_init_urls(*urls)
    while True:
        choice = catch_keyboard_interrupt_choices(crawler.print_results, ['abort', 'continue', 'results'], 'a')\
            (set(exclude_flags), set(include_flags))
        if choice == 'a':
            crawler.close(True)
            click.echo('Created resume file "{}". Run again using the same parameters to resume.'.format(
                crawler.get_resume_file())
            )
            return
        elif choice == 'c':
            crawler.restart()
            continue
        else:
            break
    crawler.print_urls_info()
    if not sys.stdout.isatty():
        output_urls(crawler, stdout_flags)
    if to_file:
        crawler.create_report(to_file)
    if not to_file and os.path.exists(crawler.get_resume_file()):
        # The resume file exists. Deleting...
        os.remove(crawler.get_resume_file())
Ejemplo n.º 2
0
def hunt(urls, threads, exclude_flags, include_flags, interesting_extensions,
         interesting_files, stdout_flags, progress_enabled, timeout, max_depth,
         not_follow_subdomains, exclude_sources, proxies, delay,
         not_allow_redirects, limit):
    """Find web directories without bruteforce
    """
    if exclude_flags and include_flags:
        raise BadOptionUsage(
            '--exclude-flags and --include-flags are mutually exclusive.')
    welcome()
    urls = flat_list(urls)
    proxies = multiplier_args(proxies)
    if not urls:
        click.echo(
            '•_•) OOPS! Add urls to analyze.\nFor example: dirhunt http://domain/path\n\n'
            'Need help? Then use dirhunt --help',
            err=True)
        return
    exclude_flags, include_flags = flags_range(exclude_flags), flags_range(
        include_flags)
    progress_enabled = (sys.stdout.isatty() or sys.stderr.isatty()
                        ) if progress_enabled is None else progress_enabled
    crawler = Crawler(max_workers=threads,
                      interesting_extensions=interesting_extensions,
                      interesting_files=interesting_files,
                      std=sys.stdout if sys.stdout.isatty() else sys.stderr,
                      progress_enabled=progress_enabled,
                      timeout=timeout,
                      depth=max_depth,
                      not_follow_subdomains=not_follow_subdomains,
                      exclude_sources=exclude_sources,
                      not_allow_redirects=not_allow_redirects,
                      proxies=proxies,
                      delay=delay,
                      limit=limit)
    crawler.add_init_urls(*urls)
    try:
        catch_keyboard_interrupt(crawler.print_results,
                                 crawler.restart)(set(exclude_flags),
                                                  set(include_flags))
    except SystemExit:
        crawler.close()
    crawler.print_urls_info()
    if not sys.stdout.isatty():
        output_urls(crawler, stdout_flags)
Ejemplo n.º 3
0
 def test_with_sublists(self):
     self.assertEqual(flat_list([1, [2, 3], 4]), [1, 2, 3, 4])
Ejemplo n.º 4
0
 def test_without_sublists(self):
     self.assertEqual(flat_list([1, 2, 3]), [1, 2, 3])
Ejemplo n.º 5
0
 def test_without_items(self):
     self.assertEqual(flat_list([]), [])