示例#1
0
	def spider_current_level(page):
		dirnames = []
		levelsoup = BeautifulSoup(page.text, 'html.parser')
		try:
			spans = levelsoup.findAll('span', {'class': "css-truncate css-truncate-target"})
			for s in spans:
				subtags = s.findAll('a', {'class': "js-navigation-open"}, href=True)
				for st in subtags:
					if '/blob/' in st['href']:
						lnk = st['href'].replace('blob/', '')
						if verbosity == 'on':
							lib.PrintStatus(f"file: {lnk}")
						full = baseraw + lnk
						fileaddrs.append(full)
					else:
						if verbosity == 'on':
							lib.PrintStatus(f"dir: {st['href']}")
						dirnames.append(st['href'])
			if len(dirnames) == 0:
				if verbosity == 'on':
					lib.PrintStatus("Branch exhausted")
			else:
				for subdir in dirnames:
					subdir_addr = baselink + subdir
					subdir_page = connect(subdir_addr)
					spider_current_level(subdir_page)
		except AttributeError:
			# TODO: find and fix
			lib.PrintFailure("Unusual file behavior detected, ending spidering with current resources...")
示例#2
0
	def spider_current_level(page):
		dirnames = []
		levelsoup = BeautifulSoup(page.text, 'html.parser')
		spans = levelsoup.findAll('span', {'class': "css-truncate css-truncate-target"})
		for s in spans:
			subtags = s.findAll('a', {'class': "js-navigation-open"}, href=True)
			for st in subtags:
				if '/blob/' in st['href']:
					lnk = st['href'].replace('blob/', '')
					if verbosity == 'y':
						lib.PrintStatus(f"File: {lnk}")
					full = baseraw + lnk
					fileaddrs.append(full)
				else:
					if verbosity == 'y':
						lib.PrintStatus(f"Directory: {st['href']}")
					if directory_filtering is True:
						slashcount = 0
						for character in st['href']:
							if character == '/':
								slashcount += 1
						directory_name = st['href'].split('/')[slashcount]
						if directory_name not in set(blacklisted_directories):
							dirnames.append(st['href'])
					else:
						dirnames.append(st['href'])
		if len(dirnames) == 0:
			if verbosity == 'y':
				lib.PrintStatus("Branch exhausted")
		else:
			for subdir in dirnames:
				subdir_addr = baselink + subdir
				subdir_page = connect(subdir_addr)
				spider_current_level(subdir_page)
示例#3
0
def load_config():
	if isdir(f'{curdir}/KRconfig') is False:
		lib.PrintError(f"Config directory not detected in {curdir}...")
		lib.PrintStatus(f"Making config directory in {curdir}...")
		mkdir(f'{curdir}/KRconfig')
	config_files = {}
	count = 0
	onlyfiles = [f for f in listdir(f'{curdir}/KRconfig') if isfile(join(f'{curdir}/KRconfig', f))]
	for file in onlyfiles:
		if file.endswith('.ini'):
			count += 1
			config_files[file] = count
	if count == 0:
		lib.PrintStatus("No config files detected, making default...")
		with codecs.open(f'{curdir}/KRconfig/defaultconfig.ini', 'w', 'utf-8') as dconf:
			dconf.write(
'''[initial_vars]
displaymode = b
[scraping_vars]
scrape_input_method = m
limiter = 5
repo_crawl = False
link_type = regular
directory_filtering = False
blacklisted_directories = []
verbosity = off''')
		config_files['Default Configuration'] = 1
		count += 1
	for k in config_files.keys():
		print(f"[{config_files[k]}]: {k}")
	while True:
		try:
			load_choice = int(input("Select which config file to load: "))
			if load_choice > count:
				raise ValueError
			elif load_choice == "":
				pass
				continue
			else:
				break
		except ValueError:
			lib.PrintFailure("Invalid Input. Please enter the integer that corresponds with the desired config file.")
			continue
	for k in config_files.keys():
		if load_choice == config_files[k]:
			selected_file = k
	parser.read(f"{curdir}/KRconfig/{selected_file}", encoding='utf-8')
	# Initial Variables
	displaymode = parser.get('initial_vars', 'displaymode')
	# Scraping Variables
	scrape_input_method = parser.get('scraping_vars', 'scrape_input_method')
	limiter = int(parser.get('scraping_vars', 'limiter'))
	repo_crawl = parser.getboolean('scraping_vars', 'repo_crawl')
	link_type = parser.get('scraping_vars', 'link_type')
	directory_filtering = parser.getboolean('scraping_vars', 'directory_filtering')
	blacklisted_directories = parser.get('scraping_vars', 'blacklisted_directories')
	verbosity = parser.get('scraping_vars', 'verbosity')
	return displaymode, scrape_input_method, limiter, repo_crawl, link_type, directory_filtering, blacklisted_directories, verbosity
示例#4
0
def manual_setup():
	while True:
		local_dir = input("Enter the local directory to exfiltrate data to: ")
		if isdir(local_dir) is True:
			break
		else:
			lib.PrintError("No such directory found, check input")
			continue
	while True:
		lib.PrintStatus("DataHound can utilize either Shodan for targeted results, scan the entire internet using Masscan, or read IP addresses from file.")
		lib.PrintStatus("Note that if you select Masscan, you must have Masscan installed on your system, due to the python-masscan library requirements.")
		search_type = input("[s]hodan, [m]asscan, or [f]ile: ")
		if search_type.lower() not in ['s', 'm', 'f']:
			lib.PrintError("Invalid Input.")
			continue
		else:
			if search_type.lower() == 's':
				shodan_key = input("Enter your shodan API key: ")
				break
			elif search_type.lower() == 'f':
				while True:
					address_file = input("Enter the filepath: ")
					if isfile(address_file) is True:
						break
					else:
						lib.PrintError("Invalid filepath, check input.")
						continue
				break
	while True:
		save_choice = input("Save configuration for repeated use? [y]/[n]: ")
		if save_choice.lower() not in ['y', 'n']:
			lib.PrintError("Invalid Input")
			continue
		else:
			if save_choice.lower() == 'n':
				break
			else:
				config_name = input("Enter the name for this configuration: ")
				with codecs.open(f'{syspath[0]}/{config_name}.ini', 'w', 'utf-8') as cfile:
					cfile.write('[vars]')
					cfile.write('\n')
					cfile.write(f'local_dir = {local_dir}')
					cfile.write('\n')
					cfile.write(f'search_type = {search_type}')
					cfile.write('\n')
					if search_type == 's':
						cfile.write(f'shodan_key = {shodan_key}')
					elif search_type == 'f':
						cfile.write(f'address_file = {address_file}')
				break
	if search_type == 's':
		ret_dict = {'local_dir': local_dir, 'search_type': search_type, 'shodan_key':shodan_key}
	elif search_type == 'f':
		ret_dict = {'local_dir':local_dir, 'search_type':search_type, 'address_file':address_file}
	else:
		ret_dict = {'local_dir': local_dir, 'search_type': search_type}
	return ret_dict
示例#5
0
def shodan_search(displaymode, page, repo_crawl, verbosity):
	if repo_crawl is False:
		lib.PrintStatus("Searching for Shodan keys...")
	elif repo_crawl is True and verbosity == 'on':
		lib.PrintStatus("Searching for Shodan keys...")
	shodan_pattern = r'\b[a-zA-Z0-9]{32}\b'
	pagetext = page.text
	keyset = []
	for k in re.findall(shodan_pattern, pagetext):
		keyset.append(k)
	if not keyset:
		if repo_crawl is False:
			lib.PrintFailure("No valid shodan keys found in set.")
		elif repo_crawl is True and verbosity == 'on':
			lib.PrintFailure("No valid shodan keys found in set.")
	else:
		valid_paid_keys = {}
		valid_unpaid_keys = []
		for key in set(keyset):
			api = shodan.Shodan(key)
			try:
				keydata = api.info()
				usage_limits = keydata['usage_limits']
				if keydata['plan'] == 'dev' or keydata['plan'] == 'edu':
					credits_tuple = (usage_limits['scan_credits'], usage_limits['query_credits'])
					valid_paid_keys[key] = credits_tuple
				elif keydata['plan'] == 'oss':
					valid_unpaid_keys.append(key)
			except Exception:
				pass
		if displaymode == 's' or displaymode == 'b':
			shodan_output = f'{curdir}/Output/ShodanKeys.txt'
			if not exists(dirname(shodan_output)):
				try:
					makedirs(dirname(shodan_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(shodan_output, 'a') as sofile:
				sofile.write('----------VALID KEYS----------')
				for pkey in valid_paid_keys.keys():
					sofile.write(f"Key: {pkey}\nCredits (scan, query): {valid_paid_keys[pkey][0]}, {valid_paid_keys[pkey][1]}\n\n")
				sofile.write('----------UNPAID KEYS----------')
				for upkeys in set(valid_unpaid_keys):
					sofile.write(f'Key: {upkeys}')
示例#6
0
文件: Keyring.py 项目: PoeBlu/Keyring
def scrape(scrape_input_method, displaymode, limiter, repo_crawl, verbosity):
    if scrape_input_method.lower() == 'm':
        url = input("Enter the URL: ")
        urlpage = connect(url)
        if urlpage == 'connection failed':
            lib.PrintError(
                "Connection to specified URL could not be established.")
            exit()
        else:
            lib.PrintStatus('Status: [200], Searching for API Keys...')
            if repo_crawl is False:
                search_execute(displaymode, urlpage)
            else:
                repository_list = get_repos(url)
                file_addresses = traverse_repos(repository_list, verbosity)
                executor = ThreadPoolExecutor(max_workers=len(file_addresses))
                for addr in set(file_addresses):
                    urlpage = connect(addr)
                    executor.submit(search_execute(displaymode, urlpage))
                    sleep(limiter)
            lib.PrintSuccess("Scanning complete.")
    else:
        while True:
            url_file = input("Enter the full path to the input file: ")
            if isfile(url_file) is True:
                break
            elif str(url_file) == "":
                lib.DoNothing()
            else:
                lib.PrintError("No Such File Found.")
                continue
        with open(url_file) as ufile:
            count = 0
            for line in ufile.readlines():
                if repo_crawl is False:
                    count += 1
                    urlpage = connect(line.rstrip())
                    if urlpage == 'connection failed':
                        lib.PrintFailure(
                            f"[Line: {count}] Connection failed on host {line}"
                        )
                    else:
                        search_execute(displaymode, urlpage)
                        sleep(limiter)
                else:
                    repository_list = get_repos(line)
                    file_addresses = traverse_repos(repository_list, verbosity)
                    executor = ThreadPoolExecutor(
                        max_workers=len(file_addresses))
                    for addr in set(file_addresses):
                        urlpage = connect(addr)
                        executor.submit(search_execute(displaymode, urlpage))
                        sleep(limiter)
示例#7
0
def generic_search(key, displaymode, page, repo_crawl, verbosity):
	if repo_crawl is False:
		lib.PrintStatus(f"Searching for {key[0]} keys...")
	elif repo_crawl is True and verbosity == 'on':
		lib.PrintStatus(f"Searching for {key[0]} keys...")
	pagetext = page.text
	for k in re.findall(lib.patterns_dict[key], pagetext):
		if displaymode == 's' or 'b':
			if key[1] is True:
				lib.PrintHighSeverity("Warning: High Severity Item Found")
			gen_output = f'{curdir}/Output/{key[0]}.txt'
			if not exists(dirname(gen_output)):
				try:
					makedirs(dirname(gen_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(gen_output, 'a') as gofile:
				gofile.write(f'Potential Key: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Key: {k}')
示例#8
0
def misc_database_secrets(displaymode, page, repo_crawl, verbosity):
	if repo_crawl is False:
		lib.PrintStatus("Searching for miscellaneous database secrets...")
	elif repo_crawl is True and verbosity == 'on':
		lib.PrintStatus("Searching for miscellaneous database secrets...")
	pagetext = page.text
	database_secrets = ['DB_USER', 'DB_PASSWORD', 'SUPERUSER_NAME', 'SUPERUSER_PASSWORD', 'DB_NAME']
	for ds in set(database_secrets):
		if ds in pagetext:
			lib.PrintHighSeverity('Warning: High Severity Item Found')
			if displaymode == 's' or 'b':
				db_output = f'{curdir}/Output/DatabaseSecrets.txt'
				if not exists(dirname(db_output)):
					try:
						makedirs(dirname(db_output))
					except OSError as racecondition:
						if racecondition.errno != errno.EEXIST:
							raise
				with open(db_output, 'a') as gofile:
					gofile.write(f'Database Secret: {ds}\n')
			elif displaymode == 'p' or 'b':
				print(f"Database secret: {ds}")
示例#9
0
def ssh_keys_search(displaymode, page, repo_crawl, verbosity):
	if repo_crawl is False:
		lib.PrintStatus("Scanning for SSH Keys...")
	elif repo_crawl is True and verbosity == 'on':
		lib.PrintStatus("Scanning for SSH Keys...")
	pagetext = page.text
	ssh_keys_identifiers = ["-----BEGIN OPENSSH PRIVATE KEY-----", "-----BEGIN DSA PRIVATE KEY-----", "-----BEGIN EC PRIVATE KEY-----"]
	for pattern in set(ssh_keys_identifiers):
		if pattern in pagetext:
			if displaymode == 's' or 'b':
				ssh_output = f'{curdir}/Output/SSHKeys.txt'
				if not exists(dirname(ssh_output)):
					try:
						makedirs(dirname(ssh_output))
					except OSError as racecondition:
						if racecondition.errno != errno.EEXIST:
							raise
				with open(ssh_output, 'a') as gofile:
					gofile.write(f'SSH Key: {pattern}\n')
			elif displaymode == 'p' or 'b':
				lib.PrintSuccess(f'SSH Key: {pattern}')
			lib.PrintHighSeverity('Warning: High Severity Item Found')
示例#10
0
def redis_search(displaymode, page, repo_crawl, verbosity):
	if repo_crawl is False:
		lib.PrintStatus("Scanning for Redis secrets...")
	elif repo_crawl is True and verbosity == 'on':
		lib.PrintStatus("Scanning for Redis secrets...")
	pagetext = page.text
	redis_pattern = r'redis://[0-9a-zA-Z:@.\\-]+'
	redis_artifacts = ['REDIS_PASSWORD', 'REDIS_CACHE_DATABASE', 'REDIS_HOST', 'REDIS_DATABASE']
	for k in re.findall(redis_pattern, pagetext):
		if displaymode == 's' or 'b':
			lib.PrintHighSeverity('Warning: High Severity Item Found')
			redis_output = f'{curdir}/Output/Redis/RedisLinks.txt'
			if not exists(dirname(redis_output)):
				try:
					makedirs(dirname(redis_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(redis_output, 'a') as gofile:
				gofile.write(f'Potential link: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential link: {k}')
			lib.PrintHighSeverity('Warning: High Severity Item Found')
	for ra in set(redis_artifacts):
		if ra in pagetext:
			lib.PrintHighSeverity('Warning: High Severity Item Found')
			if displaymode == 's' or 'b':
				redis_artifacts_output = f'{curdir}/Output/Redis/RedisArtifacts.txt'
				if not exists(dirname(redis_artifacts_output)):
					try:
						makedirs(dirname(redis_artifacts_output))
					except OSError as racecondition:
						if racecondition.errno != errno.EEXIST:
							raise
				with open(redis_artifacts_output, 'a') as rafile:
					rafile.write(f'Artifact found: {ra}')
			elif displaymode == 'p' or 'b':
				lib.PrintSuccess(f'Artifact Found: {ra}')
示例#11
0
def ftp_operations(host, output_location): # TODO: Find out how to check and record file sizes in relation to FTP
	if output_location.endswith('/') is False:
		output_location = output_location + '/'
	ftp_connection = ftplib.FTP(host)
	try:
		ftp_connection.login()
		lib.PrintSuccess('Login Status: 200')
		lib.PrintStatus(f'Exfiltrating files to {output_location}')
		filenames = ftp_connection.nlst()
		for filename in filenames:
			local_filename = path.join(output_location, filename)
			file = open(local_filename, 'wb')
			ftp_connection.retrbinary('RETR ' + filename, file.write)

	except Exception as e:
		lib.PrintError(f'{e}')
示例#12
0
def google_api_search(displaymode, page):
	lib.PrintStatus("Scanning for google API keys...")
	pagetext = page.text
	google_api_pattern =  r'AIzaSy[0-9a-zA-Z_\\-]{33}'
	for k in re.findall(google_api_pattern, pagetext):
		if displaymode == 's' or 'b':
			gapi_output = f'{curdir}/Output/GoogleAPIPotentialKeys.txt'
			if not exists(dirname(gapi_output)):
				try:
					makedirs(dirname(gapi_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(gapi_output, 'a') as gofile:
				gofile.write(f'Potential Key: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Key: {k}')
示例#13
0
文件: Keyring.py 项目: PoeBlu/Keyring
def slack_api_search(displaymode, page):
    lib.PrintStatus("Scanning for slack API keys...")
    pagetext = page.text
    slack_api_pattern = r"xoxp-\\d+-\\d+-\\d+-[0-9a-f]+"
    for k in re.findall(slack_api_pattern, pagetext):
        if displaymode == 's' or 'b':
            sapi_output = f'{curdir}\\Output\\SlackAPIPotentialKeys.txt'
            if not exists(dirname(sapi_output)):
                try:
                    makedirs(dirname(sapi_output))
                except OSError as racecondition:
                    if racecondition.errno != errno.EEXIST:
                        raise
            with open(sapi_output, 'a') as gofile:
                gofile.write(f'Potential Key: {k}\n')
        elif displaymode == 'p' or 'b':
            lib.PrintSuccess(f'Potential Key: {k}')
示例#14
0
def slack_webhook_search(displaymode, page):
	lib.PrintStatus("Scanning for slack webhooks...")
	pagetext = page.text
	slack_webhook_pattern = r"https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}"
	for k in re.findall(slack_webhook_pattern, pagetext):
		if displaymode == 's' or 'b':
			slack_webhook_output = f'{curdir}/Output/SlackWebhooks.txt'
			if not exists(dirname(slack_webhook_output)):
				try:
					makedirs(dirname(slack_webhook_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(slack_webhook_output, 'a') as gofile:
				gofile.write(f'Potential Hook: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Hook: {k}')
示例#15
0
def slack_bot_search(displaymode, page):
	lib.PrintStatus("Scanning for slack bot tokens...")
	pagetext = page.text
	slack_bot_pattern = r"xoxb-\\d+-[0-9a-zA-Z]+"
	for k in re.findall(slack_bot_pattern, pagetext):
		if displaymode == 's' or 'b':
			slack_bot_output = f'{curdir}/Output/SlackBotPotentialTokens.txt'
			if not exists(dirname(slack_bot_output)):
				try:
					makedirs(dirname(slack_bot_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(slack_bot_output, 'a') as gofile:
				gofile.write(f'Potential Token: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Token: {k}')
示例#16
0
def github_search(displaymode, page):
	lib.PrintStatus("Searching for Github keys...")
	github_api = r"[g|G][i|I][t|T][h|H][u|U][b|B].{0,30}['\"\\s][0-9a-zA-Z]{35,40}['\"\\s]"
	pagetext = page.text
	for k in re.findall(github_api, pagetext):
		if displaymode == 's' or 'b':
			github_output = f'{curdir}/Output/GithubPotentialKeys.txt'
			if not exists(dirname(github_output)):
				try:
					makedirs(dirname(github_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(github_output, 'a') as gofile:
				gofile.write(f'Potential Key: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Key: {k}')
示例#17
0
def discord_nitro_search(displaymode, page):
	lib.PrintStatus("Scanning for discord nitro links...")
	pagetext = page.text
	discord_nitro_pattern = r"(https:\/\/discord\.gift\/.+[a-z{1,16}])"
	for k in re.findall(discord_nitro_pattern, pagetext):
		if displaymode == 's' or 'b':
			discord_nitro_output = f'{curdir}/Output/DiscordNitroPotentialLinks.txt'
			if not exists(dirname(discord_nitro_output)):
				try:
					makedirs(dirname(discord_nitro_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(discord_nitro_output, 'a') as gofile:
				gofile.write(f'Potential link: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential link: {k}')
示例#18
0
def twilio_search(displaymode, page):
	lib.PrintStatus("Scanning for twilio keys...")
	pagetext = page.text
	twilio_pattern = r"SK[a-z0-9]{32}"
	for k in re.findall(twilio_pattern, pagetext):
		if displaymode == 's' or 'b':
			twilio_output = f'{curdir}/Output/TwilioKeys.txt'
			if not exists(dirname(twilio_output)):
				try:
					makedirs(dirname(twilio_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(twilio_output, 'a') as gofile:
				gofile.write(f'Potential Key: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Key: {k}')
示例#19
0
def heroku_search(displaymode, page):
	lib.PrintStatus("Scanning for Heroku API keys...")
	pagetext = page.text
	heroku_pattern = r"[h|H][e|E][r|R][o|O][k|K][u|U].{0,30}[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}"
	for k in re.findall(heroku_pattern, pagetext):
		if displaymode == 's' or 'b':
			heroku_output = f'{curdir}/Output/HerokuKeys.txt'
			if not exists(dirname(heroku_output)):
				try:
					makedirs(dirname(heroku_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(heroku_output, 'a') as gofile:
				gofile.write(f'Potential Key: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Key: {k}')
示例#20
0
def discord_webhook_search(displaymode, page):
	lib.PrintStatus("Scanning for discord webhooks...")
	pagetext = page.text
	discord_webhook_pattern = r"(https:\/\/discordapp\.com\/api\/webhooks\/[\d]+\/[\w]+)"
	for k in re.findall(discord_webhook_pattern, pagetext):
		if displaymode == 's' or 'b':
			discord_webhook_output = f'{curdir}/Output/DiscordWebhooks.txt'
			if not exists(dirname(discord_webhook_output)):
				try:
					makedirs(dirname(discord_webhook_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(discord_webhook_output, 'a') as gofile:
				gofile.write(f'Potential Hook: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Hook: {k}')
示例#21
0
def discord_bot_search(displaymode, page):
	lib.PrintStatus("Scanning for discord bot tokens...")
	pagetext = page.text
	discord_token_pattern = r"([\w\-\.]+[\-\.][\w\-\.]+)"
	for k in re.findall(discord_token_pattern, pagetext):
		if displaymode == 's' or 'b':
			discord_bot_output = f'{curdir}/Output/DiscordBotPotentialTokens.txt'
			if not exists(dirname(discord_bot_output)):
				try:
					makedirs(dirname(discord_bot_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(discord_bot_output, 'a') as gofile:
				gofile.write(f'Potential Token: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Token: {k}')
示例#22
0
def nonspecific_api_search(displaymode, page):
	lib.PrintStatus("Scanning for nonspecific API keys...")
	pagetext = page.text
	nonspecific_pattern = r"[a|A][p|P][i|I][_]?[k|K][e|E][y|Y].{0,30}['\"\\s][0-9a-zA-Z]{32,45}['\"\\s]"
	for k in re.findall(nonspecific_pattern, pagetext):
		if displaymode == 's' or 'b':
			nonspecific_output = f'{curdir}/Output/NonspecificPotentialKeys.txt'
			if not exists(dirname(nonspecific_output)):
				try:
					makedirs(dirname(nonspecific_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(nonspecific_output, 'a') as gofile:
				gofile.write(f'Potential Key: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Key: {k}')
示例#23
0
def AWS_search(displaymode, page):
	lib.PrintStatus("Searching for AWS Access Keys...")
	aws_pattern = r"AKIA[0-9A-Z]{16}"
	pagetext = page.text
	for k in re.findall(aws_pattern, pagetext):
		if displaymode == 's' or 'b':
			aws_output = f'{curdir}/Output/AWSPotentialTokens.txt'
			if not exists(dirname(aws_output)):
				try:
					makedirs(dirname(aws_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(aws_output, 'a') as gofile:
				gofile.write(f'Potential Tokens: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Token: {k}')
	lib.PrintHighSeverity('\nWarning: High Severity Item Found\n')
示例#24
0
def redis_search(displaymode, page):
	lib.PrintStatus("Scanning for Redis URLs...")
	pagetext = page.text
	redis_pattern = r'redis://[0-9a-zA-Z:@.\\-]+'
	for k in re.findall(redis_pattern, pagetext):
		if displaymode == 's' or 'b':
			redis_output = f'{curdir}/Output/RedisLinks.txt'
			if not exists(dirname(redis_output)):
				try:
					makedirs(dirname(redis_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(redis_output, 'a') as gofile:
				gofile.write(f'Potential link: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential link: {k}')
	lib.PrintHighSeverity('\nWarning: High Severity Item Found\n')
示例#25
0
def google_access_token_search(displaymode, page):
	lib.PrintStatus("Scanning for google access tokens...")
	pagetext = page.text
	gat_pattern = r'ya29.[0-9a-zA-Z_\\-]{68}'
	for k in re.findall(gat_pattern, pagetext):
		if displaymode == 's' or 'b':
			lib.PrintHighSeverity('\nWarning: High Severity Item Found\n')
			gat_output = f'{curdir}/Output/GoogleAccessPotentialTokens.txt'
			if not exists(dirname(gat_output)):
				try:
					makedirs(dirname(gat_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(gat_output, 'a') as gofile:
				gofile.write(f'Potential Token: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Token: {k}')
			lib.PrintHighSeverity('\nWarning: High Severity Item Found\n')
示例#26
0
def facebook_OAUTH(displaymode, page):
	lib.PrintStatus("Scanning for facebook OAUTH secrets...")
	pagetext = page.text
	fauth_pattern = r"[f|F][a|A][c|C][e|E][b|B][o|O][o|O][k|K].{0,30}['\"\\s][0-9a-f]{32}['\"\\s]"
	for k in re.findall(fauth_pattern, pagetext):
		if displaymode == 's' or 'b':
			lib.PrintHighSeverity('\nWarning: High Severity Item Found\n')
			fauth_output = f'{curdir}/Output/FacebookOAUTHSecrets.txt'
			if not exists(dirname(fauth_output)):
				try:
					makedirs(dirname(fauth_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(fauth_output, 'a') as gofile:
				gofile.write(f'Potential Secret: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Secret: {k}')
			lib.PrintHighSeverity('\nWarning: High Severity Item Found\n')
示例#27
0
def google_oauth_search(displaymode, page):
	lib.PrintStatus("Scanning for google OAUTH secrets...")
	pagetext = page.text
	gauth_pattern = r"(\"client_secret\":\"[a-zA-Z0-9-_]{24}\")"
	for k in re.findall(gauth_pattern, pagetext):
		if displaymode == 's' or 'b':
			lib.PrintHighSeverity('\nWarning: High Severity Item Found\n')
			gauth_output = f'{curdir}/Output/GoogleOAUTHSecrets.txt'
			if not exists(dirname(gauth_output)):
				try:
					makedirs(dirname(gauth_output))
				except OSError as racecondition:
					if racecondition.errno != errno.EEXIST:
						raise
			with open(gauth_output, 'a') as gofile:
				gofile.write(f'Potential Secret: {k}\n')
		elif displaymode == 'p' or 'b':
			lib.PrintSuccess(f'Potential Secret: {k}')
			lib.PrintHighSeverity('\nWarning: High Severity Item Found\n')
示例#28
0
文件: BinBot.py 项目: hpitas/BinBot
def ArchiveSearch(stop, amode):
    arch_runs = 0
    while True:
        if arch_runs > 0:
            lib.PrintStatus("Runs: " + str(arch_runs))
            if arch_runs >= stop and stop is False:
                lib.PrintSuccess("Runs Complete, Operation Finished... [" +
                                 str(datetime.now().strftime('%X')) + "]")
                exit()
            else:
                lib.PrintStatus("Pastes fetched, cooling down for " +
                                str(cooldown) + " seconds... [" +
                                str(datetime.now().strftime('%X')) + "]")
                sleep(cooldown / 2)
                lib.PrintStatus("Halfway through at [" +
                                str(datetime.now().strftime('%X')) + "]")
                sleep(cooldown / 2)
                lib.PrintStatus("resuming... [" +
                                str(datetime.now().strftime('%X')) + "]")
        if arch_runs < stop or stop is True:
            arch_page, arch_filename = archive_connect()
            arch_soup = BeautifulSoup(arch_page.text, 'html.parser')
            sleep(2)
            lib.PrintStatus("Getting archived pastes... [" +
                            str(datetime.now().strftime('%X')) + "]")
            if AccessDeniedError in arch_page.text:
                lib.PrintError(
                    "IP Temporarily suspending, pausing until the ban is lifted. Estimated time: one hour... ["
                    + str(datetime.now().strftime('%X')) + "]")
                sleep(cooldown)
                lib.PrintStatus("Process resumed... [" +
                                str(datetime.now().strftime('%X')) + "]")
                continue
            else:
                pass
            lib.PrintStatus("Finding params... [" +
                            str(datetime.now().strftime('%X')) + "]")

            table = arch_soup.find(
                "table",
                class_="maintable")  # Fetch the table of recent pastes
            while True:
                try:
                    tablehrefs = table.findAll(
                        'a', href=True)  # Find the <a> tags for every paste
                    break
                except AttributeError:
                    lib.PrintError(
                        "IP Temporarily suspending, pausing until the ban is lifted. Estimated time: one hour... ["
                        + str(datetime.now().strftime('%X')) + "]")
                    sleep(cooldown)
                    lib.PrinrError("Process resumed... [" +
                                   str(datetime.now().strftime('%X')) + "]")
                    continue

            for h in tablehrefs:
                proch = h['href']  # fetch the URL param for each paste
                lib.PrintSuccess("params fetched... [" +
                                 str(datetime.now().strftime('%X')) + "]")
                lib.PrintStatus("Acting on param " + str(proch) + "... [" +
                                str(datetime.now().strftime('%X')) + "]")
                full_archpage, full_arch_url = parameter_connect(proch)
                sleep(5)
                item_soup = BeautifulSoup(full_archpage.text, 'html.parser')
                unprocessed = item_soup.find(
                    'textarea')  # Fetch the raw text in the paste.
                for tag in taglist:
                    unprocessed = str(unprocessed).replace(
                        tag, ""
                    )  # process the raw text by removing all html elements
                if amode == 'r':
                    if path.isdir(workpath) is True:
                        if blacklisting is True:
                            flagged = False
                            compare_text = re.sub(
                                r'\s+', '', unprocessed
                            )  # strip all whitespace for comparison
                            for b in blacklist:
                                b = re.sub(
                                    r'\s+', '',
                                    b)  # strip all whitespace for comparison
                                if b.lower() in compare_text.lower():
                                    lib.PrintStatus(
                                        "Blacklisted phrase detected, passing..."
                                    )
                                    flagged = True

                            if flagged is True:
                                continue
                            else:
                                arch_final_file = codecs.open(
                                    str(workpath) + str(full_arch_url).replace(
                                        ":", "-").replace(":", "-").replace(
                                            "/", "-") + ".txt", 'w+', 'utf-8')
                                arch_final_file.write(unprocessed)
                                arch_final_file.close()
                                arch_runs += 1
                                continue
                        elif blacklisting is False:
                            arch_final_file = codecs.open(
                                str(workpath) +
                                str(full_arch_url).replace(":", "-").replace(
                                    ":", "-").replace("/", "-") + ".txt", 'w+',
                                'utf-8')
                            arch_final_file.write(unprocessed)
                            arch_final_file.close()
                            arch_runs += 1
                            continue
                    else:
                        lib.PrintStatus("Making directory... [" +
                                        str(datetime.now().strftime('%X')) +
                                        "]")
                        if blacklisting is True:
                            flagged = False
                            compare_text = re.sub(
                                r'\s+', '', unprocessed
                            )  # strip all whitespace for comparison
                            for b in blacklist:
                                b = re.sub(
                                    r'\s+', '',
                                    b)  # strip all whitespace for comparison
                                if b.lower() in compare_text.lower():
                                    lib.PrintStatus(
                                        "Blacklisted phrase detected, passing..."
                                    )
                                    flagged = True

                            if flagged is True:
                                continue
                            else:
                                arch_final_file = codecs.open(
                                    str(workpath) + str(full_arch_url).replace(
                                        ":", "-").replace(":", "-").replace(
                                            "/", "-") + ".txt", 'w+', 'utf-8')
                                arch_final_file.write(unprocessed)
                                arch_final_file.close()
                                arch_runs += 1
                                continue
                        elif blacklisting is False:
                            arch_final_file = codecs.open(
                                str(workpath) +
                                str(full_arch_url).replace(":", "-").replace(
                                    ":", "-").replace("/", "-") + ".txt", 'w+',
                                'utf-8')
                            arch_final_file.write(unprocessed)
                            arch_final_file.close()
                            arch_runs += 1
                            continue
                elif amode == 'f':
                    if path.isdir(workpath) is True:
                        lib.PrintStatus("Running engine... [" +
                                        str(datetime.now().strftime('%X')) +
                                        "]")
                        if blacklisting is True:
                            flagged = False
                            compare_text = re.sub(
                                r'\s+', '', unprocessed
                            )  # strip all whitespace for comparison
                            for b in blacklist:
                                b = re.sub(
                                    r'\s+', '',
                                    b)  # strip all whitespace for comparison
                                if b.lower() in compare_text.lower():
                                    lib.PrintStatus(
                                        "Blacklisted phrase detected, passing..."
                                    )
                                    flagged = True

                            if flagged is True:
                                continue
                            else:
                                archive_engine(unprocessed, keylisting,
                                               reglisting)
                                arch_runs += 1
                                continue
                        else:
                            lib.PrintStatus(
                                "Running engine... [" +
                                str(datetime.now().strftime('%X')) + "]")
                            archive_engine(unprocessed, keylisting, reglisting)
                            arch_runs += 1
                            continue
        else:
            lib.PrintSuccess("Operation Finished... [" +
                             str(datetime.now().strftime('%X')) + "]")
            break
示例#29
0
文件: BinBot.py 项目: hpitas/BinBot
                if list_choice.lower() == 'y':
                    blacklisting = True

                    while True:
                        bfile_input = input(
                            "Read blacklisted terms from file? [y]/[n]: ")
                        if bfile_input.lower() == 'n':
                            blacklist_input = input(
                                "Enter the phrases you wish to blacklist separated by a comma: "
                            ).split(",")
                            for b in blacklist_input:
                                blacklist.append(b)
                            break
                        elif bfile_input.lower() == 'y':
                            lib.PrintStatus(
                                "File should be structured with one term per line, with no comma."
                            )
                            bpath = input("Enter the full path of the file: ")
                            if path.isfile(bpath) is True:
                                print("Blacklist file detected...")
                                with open(bpath) as bfile:
                                    for bline in bfile.readlines():
                                        blacklist.append(bline.rstrip())
                                break
                    break

                elif list_choice.lower() == 'n':
                    blacklisting = False
                    break
                else:
                    lib.PrintError("invalid input.")
示例#30
0
def manual_setup():
	while True:
		displaymode = input("[p]rint to screen, [s]ave to file, or [b]oth: ")
		if displaymode.lower() not in ['p', 's', 'b']:
			lib.PrintError("Invalid Input")
			continue
		break
	while True:
		scrape_input_method = input("[m]anual input (single url) or load from [f]ile: ")
		if scrape_input_method.lower() not in ['m', 'f']:
			lib.PrintError("Invalid Input")
			continue
		break
	while True:
		try:
			limiter = int(input("Enter the time between requests, in seconds: "))
			if limiter < 0:
				continue
			break
		except ValueError:
			lib.PrintError("Invalid Input. Enter a positive integer.")
			continue
	lib.PrintStatus("\nIf provided links to one (or multiple) github profiles, Keyring can crawl all repositories for secrets.")
	lib.PrintStatus("However, this means Keyring WILL NOT FUNCTION CORRECTLY if provided links to other pages in the same text file.")
	lib.PrintStatus("Large profiles will also take a fairly long time, as Keyring fetches ALL files from ALL repos.\n")
	while True:
		repocrawlchoice = input("Enable repo crawling? [y]/[n]: ")
		if repocrawlchoice.lower() not in ['y', 'n']:
			lib.PrintError("Invalid Input.")
			continue
		elif repocrawlchoice.lower() == 'y':
			repo_crawl = True
			while True:
				lib.PrintHighSeverity("Warning: Turning on verbosity will output a LOT when spidering large profiles.")
				verbosity = input("Select verbosity for spidering: [off]/[on]: ")
				if verbosity.lower() not in ['off', 'on']:
					lib.PrintError("Invalid Input.")
					continue
				else:
					break
			break
		elif repocrawlchoice.lower() == 'n':
			repo_crawl = False
			verbosity = 'off'
			break
	while True:
		savechoice = input("Save choices as config file? [y]/[n]: ")
		if savechoice.lower() == 'n':
			break
		elif savechoice.lower() == 'y':
			while True:
				if isdir(f'{curdir}/KRconfig') is False:
					lib.PrintError(f"Config directory not detected in {curdir}...")
					lib.PrintStatus(f"Making config directory...")
					mkdir(f'{curdir}/KRconfig')
					break
				else:
					break
			configname = input("Enter the name for this configuration: ")
			with open(f'{curdir}/KRconfig/{configname}.ini', 'w') as cfile:
				cfile.write(
f'''[initial_vars]
displaymode = {displaymode}
[scraping_vars]
scrape_input_method = {scrape_input_method}
limiter = {limiter}
repo_crawl = {repo_crawl}
verbosity = {verbosity}
''')
				break
	return displaymode, scrape_input_method, limiter, repo_crawl, verbosity