예제 #1
0
def full_recon():
	from modules.sslinfo import cert
	from modules.crawler import crawler
	from modules.headers import headers
	from modules.dns import dnsrec
	from modules.traceroute import troute
	from modules.whois import whois_lookup
	from modules.dirrec import hammer
	from modules.portscan import ps
	from modules.subdom import subdomains
	headers(target, output, data)
	cert(hostname, output, data)
	whois_lookup(ip, output, data)
	dnsrec(domain, output, data)
	if type_ip == False:
		subdomains(domain, tout, output, data)
	else:
		pass
	troute(ip, mode, port, tr_tout, output, data)
	ps(ip, output, data)
	crawler(target, output, data)
	hammer(target, threads, tout, wdlist, redir, sslv, dserv, output, data)
예제 #2
0
def main():
	# Initialize necessary variables
	inputfile = outputfile = ''
	cpause = 0
	cdepth = 1

	# Get arguments with argparse
	parser = argparse.ArgumentParser(
		description="TorCrawl.py is a python script to crawl and extract (regular or onion) webpages through TOR network.")

	# General
	parser.add_argument(
		'-v',
		'--verbose',
		action='store_true',
		help='Show more information about the progress'
	)
	parser.add_argument(
		'-u',
		'--url',
		required=True,
		help='URL of webpage to crawl or extract'
	)
	parser.add_argument(
		'-w',
		'--without',
		action='store_true',
		help='Without the use of Relay TOR'
	)

	# Extract
	parser.add_argument(
		'-e',
		'--extract',
		action='store_true',
		help='Extract page\'s code to terminal or file.'
	)
	parser.add_argument(
		'-i',
		'--input',
		help='Input file with URL(s) (seperated by line)'
	)
	parser.add_argument(
		'-o',
		'--output',
		help='Output page(s) to file(s) (for one page)'
	)

	# Crawl
	parser.add_argument(
		'-c',
		'--crawl',
		action='store_true',
		help='Crawl website (Default output on /links.txt)'
	)
	parser.add_argument(
		'-d',
		'--cdepth',
		help='Set depth of crawl\'s travel (Default: 1)'
	)
	parser.add_argument(
		'-p',
		'--pause',
		help='The length of time the crawler will pause'
	)
	parser.add_argument(
		'-l',
		'--log',
		action='store_true',
		help='A save log will let you see which URLs were visited and their response code'
	)
	parser.add_argument(
		'-f',
		'--folder',
		help='The root directory which will contain the generated files'
	)

	args = parser.parse_args()

	# Parse arguments to variables
	if args.input:
		inputfile = args.input
	if args.output:
		outputfile = args.output
	if args.cdepth:
		cdepth = args.cdepth
	if args.pause:
		cpause = args.cdepth

	# Connect to TOR
	if args.without is False:
		checktor(args.verbose)
		connecttor()

	if args.verbose:
		checkip()
		print(('## URL: ' + args.url))

	# Canon/ion of website and create path for output
	if len(args.url) > 0:
		global website
		global outpath
		website = urlcanon(args.url, args.verbose)
		if args.folder is not None:
			outpath = folder(args.folder, args.verbose)
		else:
			outpath = folder(extract_domain(website), args.verbose)

	if args.crawl:
		lst = crawler(website, cdepth, cpause, outpath, args.log, args.verbose)
		lstfile = open(outpath + '/links.txt', 'w+')
		for item in lst:
			lstfile.write("%s\n" % item)
		lstfile.close()
		print(("## File created on " + os.getcwd() + "/" + outpath + "/links.txt"))
		if args.extract:
			inputfile = outpath + "/links.txt"
			extractor(website, args.crawl, outputfile, inputfile, outpath)
	else:
		extractor(website, args.crawl, outputfile, inputfile, outpath)
예제 #3
0
    if headinfo == True:
        from modules.headers import headers
        headers(target, output, data)

    if sslinfo == True:
        from modules.sslinfo import cert
        cert(hostname, sslp, output, data)

    if whois == True:
        from modules.whois import whois_lookup
        whois_lookup(ip, output, data)

    if crawl == True:
        from modules.crawler import crawler
        crawler(target, output, data)

    if dns == True:
        from modules.dns import dnsrec
        dnsrec(domain, output, data)

    if subd == True and type_ip == False:
        from modules.subdom import subdomains
        subdomains(domain, tout, output, data)
    elif subd == True and type_ip == True:
        print(R + '[-]' + C +
              ' Sub-Domain Enumeration is Not Supported for IP Addresses' + W +
              '\n')
        os.remove(pid_path)
        sys.exit()
    else:
예제 #4
0
def all():
    headers(target)
    cert(hostname)
    whois_lookup(ip)
    crawler(target)
예제 #5
0
    try:
        ip = socket.gethostbyname(hostname)
        print(G + '[+]' + C + ' IP Address : ' + W + str(ip))
    except Exception as e:
        print(R + '[+]' + C + ' Unable to Get IP : ' + W + str(e))
        if '[Errno -2]' in str(e):
            exit()
        else:
            pass

    if headinfo is True:
        headers(target)
    elif sslinfo is True:
        cert(hostname)
    elif whois is True:
        whois_lookup(ip)
    elif crawl is True:
        crawler(target)
    elif full == True:
        all()
    else:
        print(R + '[-] Error : ' + C +
              'Atleast One Argument is Required with URL' + W)
        exit()

    print(G + '[+]' + C + ' Completed!' + W)
except KeyboardInterrupt:
    print(R + '[-]' + C + ' Keyboard Interrupt.')
    exit()