Beispiel #1
0
def main():
	try:
		while True:
			initchoice = input("[L]oad config file or [m]anually enter?: ")
			if initchoice.lower() == 'l':
				displaymode, scrape_input_method, limiter, repo_crawl, verbosity = load_config()
				if scrape_input_method == 'f':
					while True:
						addressfile = input("Enter the full path to the address file: ")
						if isfile(addressfile) is True:
							break
						else:
							lib.PrintError("No such file found.")
							continue
				break
			elif initchoice.lower() == 'm':
				displaymode, scrape_input_method, limiter, repo_crawl, verbosity = manual_setup()
				break
			elif initchoice == "":
				lib.DoNothing()
			else:
				lib.PrintError("Invalid Input.")
				continue
		scrape(scrape_input_method, displaymode, limiter, repo_crawl, verbosity)
	except KeyboardInterrupt:
		print()
		lib.PrintError("Search canceled.")
Beispiel #2
0
def main():
	try:
		while True:
			initchoice = input("[L]oad config file or [m]anually enter?: ")
			if initchoice.lower() == 'l':
				displaymode, scrape_input_method, limiter, repo_crawl, link_type, directory_filtering, blacklisted_directories, verbosity = load_config()
				if scrape_input_method == 'f':
					while True:
						addressfile = input("Enter the full path to the address file: ")
						if isfile(addressfile) is True:
							break
						else:
							lib.PrintError("No such file found.")
							continue
				break
			elif initchoice.lower() == 'm':
				displaymode, scrape_input_method, limiter, repo_crawl, link_type, directory_filtering, blacklisted_directories, verbosity = manual_setup()
				break
			elif initchoice == "":
				pass
			else:
				lib.PrintError("Invalid Input.")
				continue
		scrape(scrape_input_method, displaymode, limiter, repo_crawl, link_type, directory_filtering, blacklisted_directories, verbosity)
	except KeyboardInterrupt:
		# print() # are you doing something with this?
		lib.PrintError("Search canceled.")
Beispiel #3
0
def manual_setup():
	while True:
		local_dir = input("Enter the local directory to exfiltrate data to: ")
		if isdir(local_dir) is True:
			break
		else:
			lib.PrintError("No such directory found, check input")
			continue
	while True:
		lib.PrintStatus("DataHound can utilize either Shodan for targeted results, scan the entire internet using Masscan, or read IP addresses from file.")
		lib.PrintStatus("Note that if you select Masscan, you must have Masscan installed on your system, due to the python-masscan library requirements.")
		search_type = input("[s]hodan, [m]asscan, or [f]ile: ")
		if search_type.lower() not in ['s', 'm', 'f']:
			lib.PrintError("Invalid Input.")
			continue
		else:
			if search_type.lower() == 's':
				shodan_key = input("Enter your shodan API key: ")
				break
			elif search_type.lower() == 'f':
				while True:
					address_file = input("Enter the filepath: ")
					if isfile(address_file) is True:
						break
					else:
						lib.PrintError("Invalid filepath, check input.")
						continue
				break
	while True:
		save_choice = input("Save configuration for repeated use? [y]/[n]: ")
		if save_choice.lower() not in ['y', 'n']:
			lib.PrintError("Invalid Input")
			continue
		else:
			if save_choice.lower() == 'n':
				break
			else:
				config_name = input("Enter the name for this configuration: ")
				with codecs.open(f'{syspath[0]}/{config_name}.ini', 'w', 'utf-8') as cfile:
					cfile.write('[vars]')
					cfile.write('\n')
					cfile.write(f'local_dir = {local_dir}')
					cfile.write('\n')
					cfile.write(f'search_type = {search_type}')
					cfile.write('\n')
					if search_type == 's':
						cfile.write(f'shodan_key = {shodan_key}')
					elif search_type == 'f':
						cfile.write(f'address_file = {address_file}')
				break
	if search_type == 's':
		ret_dict = {'local_dir': local_dir, 'search_type': search_type, 'shodan_key':shodan_key}
	elif search_type == 'f':
		ret_dict = {'local_dir':local_dir, 'search_type':search_type, 'address_file':address_file}
	else:
		ret_dict = {'local_dir': local_dir, 'search_type': search_type}
	return ret_dict
Beispiel #4
0
def scrape(scrape_input_method, displaymode, limiter, repo_crawl, verbosity):
    if scrape_input_method.lower() == 'm':
        url = input("Enter the URL: ")
        urlpage = connect(url)
        if urlpage == 'connection failed':
            lib.PrintError(
                "Connection to specified URL could not be established.")
            exit()
        else:
            lib.PrintStatus('Status: [200], Searching for API Keys...')
            if repo_crawl is False:
                search_execute(displaymode, urlpage)
            else:
                repository_list = get_repos(url)
                file_addresses = traverse_repos(repository_list, verbosity)
                executor = ThreadPoolExecutor(max_workers=len(file_addresses))
                for addr in set(file_addresses):
                    urlpage = connect(addr)
                    executor.submit(search_execute(displaymode, urlpage))
                    sleep(limiter)
            lib.PrintSuccess("Scanning complete.")
    else:
        while True:
            url_file = input("Enter the full path to the input file: ")
            if isfile(url_file) is True:
                break
            elif str(url_file) == "":
                lib.DoNothing()
            else:
                lib.PrintError("No Such File Found.")
                continue
        with open(url_file) as ufile:
            count = 0
            for line in ufile.readlines():
                if repo_crawl is False:
                    count += 1
                    urlpage = connect(line.rstrip())
                    if urlpage == 'connection failed':
                        lib.PrintFailure(
                            f"[Line: {count}] Connection failed on host {line}"
                        )
                    else:
                        search_execute(displaymode, urlpage)
                        sleep(limiter)
                else:
                    repository_list = get_repos(line)
                    file_addresses = traverse_repos(repository_list, verbosity)
                    executor = ThreadPoolExecutor(
                        max_workers=len(file_addresses))
                    for addr in set(file_addresses):
                        urlpage = connect(addr)
                        executor.submit(search_execute(displaymode, urlpage))
                        sleep(limiter)
Beispiel #5
0
def traverse_repos(repolist, link_type, directory_filtering, blacklisted_directories, verbosity): # Here Be Recursion
	fileaddrs = []
	def spider_current_level(page):
		dirnames = []
		levelsoup = BeautifulSoup(page.text, 'html.parser')
		spans = levelsoup.findAll('span', {'class': "css-truncate css-truncate-target"})
		for s in spans:
			subtags = s.findAll('a', {'class': "js-navigation-open"}, href=True)
			for st in subtags:
				if '/blob/' in st['href']:
					lnk = st['href'].replace('blob/', '')
					if verbosity == 'y':
						lib.PrintStatus(f"File: {lnk}")
					full = baseraw + lnk
					fileaddrs.append(full)
				else:
					if verbosity == 'y':
						lib.PrintStatus(f"Directory: {st['href']}")
					if directory_filtering is True:
						slashcount = 0
						for character in st['href']:
							if character == '/':
								slashcount += 1
						directory_name = st['href'].split('/')[slashcount]
						if directory_name not in set(blacklisted_directories):
							dirnames.append(st['href'])
					else:
						dirnames.append(st['href'])
		if len(dirnames) == 0:
			if verbosity == 'y':
				lib.PrintStatus("Branch exhausted")
		else:
			for subdir in dirnames:
				subdir_addr = baselink + subdir
				subdir_page = connect(subdir_addr)
				spider_current_level(subdir_page)
	if link_type == 'profile':
		for i in repolist:
			repopage = connect(i)
			if repopage == 'connection_failed':
				lib.PrintError(f'Connection to {i} failed.')
			else:
				spider_current_level(repopage)
	elif link_type == 'repo':
		repopage = connect(repolist)
		if repopage == 'connection_failed':
			lib.PrintError(f'Connection to {repolist} failed.')
		else:
			spider_current_level(repopage)
	return fileaddrs
Beispiel #6
0
def main():
	lib.PrintTitle()
	while True:
		conf = input("[L]oad configuration file or do [m]anual setup: ")
		if conf not in ['l', 'm']:
			lib.PrintError("Invalid Input")
			continue
		elif conf.lower() == 'l':
			vars_dict = load_config()
			if vars_dict['search_type'] == 's':
				shodan_scan(vars_dict['shodan_key'], vars_dict['local_dir'])
				lib.PrintSuccess("Scan Complete")
				break
			elif vars_dict['search_type'] == 'f':
				file_scan(vars_dict['address_file'], vars_dict['local_dir'])
				lib.PrintSuccess("Scan Complete")
				break
			else:
				masscan_scan(vars_dict['local_dir'])
				lib.PrintSuccess("Scan Complete")
				break
		else:
			vars_dict = manual_setup()
			if vars_dict['search_type'] == 's':
				shodan_scan(vars_dict['shodan_key'], vars_dict['local_dir'])
				lib.PrintSuccess("Scan Complete")
				break
			elif vars_dict['search_type'] == 'f':
				file_scan(vars_dict['address_file'], vars_dict['local_dir'])
				lib.PrintSuccess("Scan Complete")
				break
			else:
				masscan_scan(vars_dict['local_dir'])
				lib.PrintSuccess("Scan Complete")
				break
Beispiel #7
0
def load_config():
	if isdir(f'{curdir}/KRconfig') is False:
		lib.PrintError(f"Config directory not detected in {curdir}...")
		lib.PrintStatus(f"Making config directory in {curdir}...")
		mkdir(f'{curdir}/KRconfig')
	config_files = {}
	count = 0
	onlyfiles = [f for f in listdir(f'{curdir}/KRconfig') if isfile(join(f'{curdir}/KRconfig', f))]
	for file in onlyfiles:
		if file.endswith('.ini'):
			count += 1
			config_files[file] = count
	if count == 0:
		lib.PrintStatus("No config files detected, making default...")
		with codecs.open(f'{curdir}/KRconfig/defaultconfig.ini', 'w', 'utf-8') as dconf:
			dconf.write(
'''[initial_vars]
displaymode = b
[scraping_vars]
scrape_input_method = m
limiter = 5
repo_crawl = False
link_type = regular
directory_filtering = False
blacklisted_directories = []
verbosity = off''')
		config_files['Default Configuration'] = 1
		count += 1
	for k in config_files.keys():
		print(f"[{config_files[k]}]: {k}")
	while True:
		try:
			load_choice = int(input("Select which config file to load: "))
			if load_choice > count:
				raise ValueError
			elif load_choice == "":
				pass
				continue
			else:
				break
		except ValueError:
			lib.PrintFailure("Invalid Input. Please enter the integer that corresponds with the desired config file.")
			continue
	for k in config_files.keys():
		if load_choice == config_files[k]:
			selected_file = k
	parser.read(f"{curdir}/KRconfig/{selected_file}", encoding='utf-8')
	# Initial Variables
	displaymode = parser.get('initial_vars', 'displaymode')
	# Scraping Variables
	scrape_input_method = parser.get('scraping_vars', 'scrape_input_method')
	limiter = int(parser.get('scraping_vars', 'limiter'))
	repo_crawl = parser.getboolean('scraping_vars', 'repo_crawl')
	link_type = parser.get('scraping_vars', 'link_type')
	directory_filtering = parser.getboolean('scraping_vars', 'directory_filtering')
	blacklisted_directories = parser.get('scraping_vars', 'blacklisted_directories')
	verbosity = parser.get('scraping_vars', 'verbosity')
	return displaymode, scrape_input_method, limiter, repo_crawl, link_type, directory_filtering, blacklisted_directories, verbosity
Beispiel #8
0
def ftp_operations(host, output_location): # TODO: Find out how to check and record file sizes in relation to FTP
	if output_location.endswith('/') is False:
		output_location = output_location + '/'
	ftp_connection = ftplib.FTP(host)
	try:
		ftp_connection.login()
		lib.PrintSuccess('Login Status: 200')
		lib.PrintStatus(f'Exfiltrating files to {output_location}')
		filenames = ftp_connection.nlst()
		for filename in filenames:
			local_filename = path.join(output_location, filename)
			file = open(local_filename, 'wb')
			ftp_connection.retrbinary('RETR ' + filename, file.write)

	except Exception as e:
		lib.PrintError(f'{e}')
Beispiel #9
0
def shodan_search(displaymode, page):
    lib.PrintStatus("Searching for Shodan keys...")
    shodan_pattern = r'\b[a-zA-Z0-9]{32}\b'
    pagetext = page.text
    keyset = []
    for k in re.findall(shodan_pattern, pagetext):
        keyset.append(k)
    if not keyset:
        lib.PrintFailure("no keys found")
    else:
        valid_paid_keys = {}
        valid_unpaid_keys = []
        for key in set(keyset):
            api = shodan.Shodan(key)
            try:
                keydata = api.info()
                usage_limits = keydata['usage_limits']
                if keydata['plan'] == 'dev' or keydata['plan'] == 'edu':
                    credits_tuple = (usage_limits['scan_credits'],
                                     usage_limits['query_credits'])
                    valid_paid_keys[key] = credits_tuple
                elif keydata['plan'] == 'oss':
                    valid_unpaid_keys.append(key)
            except Exception as e:
                lib.PrintError(f"{e}.")
        if displaymode == 's' or displaymode == 'b':
            shodan_output = f'{curdir}\\Output\\ShodanKeys.txt'
            if not exists(dirname(shodan_output)):
                try:
                    makedirs(dirname(shodan_output))
                except OSError as racecondition:
                    if racecondition.errno != errno.EEXIST:
                        raise
            with open(shodan_output, 'a') as sofile:
                sofile.write('----------VALID KEYS----------')
                for pkey in valid_paid_keys.keys():
                    sofile.write(
                        f"Key: {pkey}\nCredits (scan, query): {valid_paid_keys[pkey][0]}, {valid_paid_keys[pkey][1]}\n\n"
                    )
                sofile.write('----------UNPAID KEYS----------')
                for upkeys in set(valid_unpaid_keys):
                    sofile.write(f'Key: {upkeys}')
Beispiel #10
0
def load_config():
	parser = ConfigParser()
	while True:
		config_path = input("Enter the filepath (include .ini file extension)  to the configuration file: ")
		if isfile(config_path) is True:
			break
		else:
			lib.PrintError("Invalid Filepath, Check Input")
			continue
	parser.read(config_path, encoding='utf-8')
	local_dir = parser.get('vars', 'local_dir')
	search_type = parser.get('vars', 'search_type')
	if search_type == 's':
		shodan_key = parser.get('vars', 'shodan_key')
		ret_dict = {'local_dir': local_dir, 'search_type': search_type, 'shodan_key':shodan_key}
	elif search_type == 'f':
		address_file = parser.get('vars', 'address_file')
		ret_dict = {'local_dir':local_dir, 'search_type':search_type, 'address_file':address_file}
	else:
		ret_dict = {'local_dir': local_dir, 'search_type': search_type}
	return ret_dict
Beispiel #11
0
def ArchiveSearch(stop, amode):
    arch_runs = 0
    while True:
        if arch_runs > 0:
            lib.PrintStatus("Runs: " + str(arch_runs))
            if arch_runs >= stop and stop is False:
                lib.PrintSuccess("Runs Complete, Operation Finished... [" +
                                 str(datetime.now().strftime('%X')) + "]")
                exit()
            else:
                lib.PrintStatus("Pastes fetched, cooling down for " +
                                str(cooldown) + " seconds... [" +
                                str(datetime.now().strftime('%X')) + "]")
                sleep(cooldown / 2)
                lib.PrintStatus("Halfway through at [" +
                                str(datetime.now().strftime('%X')) + "]")
                sleep(cooldown / 2)
                lib.PrintStatus("resuming... [" +
                                str(datetime.now().strftime('%X')) + "]")
        if arch_runs < stop or stop is True:
            arch_page, arch_filename = archive_connect()
            arch_soup = BeautifulSoup(arch_page.text, 'html.parser')
            sleep(2)
            lib.PrintStatus("Getting archived pastes... [" +
                            str(datetime.now().strftime('%X')) + "]")
            if AccessDeniedError in arch_page.text:
                lib.PrintError(
                    "IP Temporarily suspending, pausing until the ban is lifted. Estimated time: one hour... ["
                    + str(datetime.now().strftime('%X')) + "]")
                sleep(cooldown)
                lib.PrintStatus("Process resumed... [" +
                                str(datetime.now().strftime('%X')) + "]")
                continue
            else:
                pass
            lib.PrintStatus("Finding params... [" +
                            str(datetime.now().strftime('%X')) + "]")

            table = arch_soup.find(
                "table",
                class_="maintable")  # Fetch the table of recent pastes
            while True:
                try:
                    tablehrefs = table.findAll(
                        'a', href=True)  # Find the <a> tags for every paste
                    break
                except AttributeError:
                    lib.PrintError(
                        "IP Temporarily suspending, pausing until the ban is lifted. Estimated time: one hour... ["
                        + str(datetime.now().strftime('%X')) + "]")
                    sleep(cooldown)
                    lib.PrinrError("Process resumed... [" +
                                   str(datetime.now().strftime('%X')) + "]")
                    continue

            for h in tablehrefs:
                proch = h['href']  # fetch the URL param for each paste
                lib.PrintSuccess("params fetched... [" +
                                 str(datetime.now().strftime('%X')) + "]")
                lib.PrintStatus("Acting on param " + str(proch) + "... [" +
                                str(datetime.now().strftime('%X')) + "]")
                full_archpage, full_arch_url = parameter_connect(proch)
                sleep(5)
                item_soup = BeautifulSoup(full_archpage.text, 'html.parser')
                unprocessed = item_soup.find(
                    'textarea')  # Fetch the raw text in the paste.
                for tag in taglist:
                    unprocessed = str(unprocessed).replace(
                        tag, ""
                    )  # process the raw text by removing all html elements
                if amode == 'r':
                    if path.isdir(workpath) is True:
                        if blacklisting is True:
                            flagged = False
                            compare_text = re.sub(
                                r'\s+', '', unprocessed
                            )  # strip all whitespace for comparison
                            for b in blacklist:
                                b = re.sub(
                                    r'\s+', '',
                                    b)  # strip all whitespace for comparison
                                if b.lower() in compare_text.lower():
                                    lib.PrintStatus(
                                        "Blacklisted phrase detected, passing..."
                                    )
                                    flagged = True

                            if flagged is True:
                                continue
                            else:
                                arch_final_file = codecs.open(
                                    str(workpath) + str(full_arch_url).replace(
                                        ":", "-").replace(":", "-").replace(
                                            "/", "-") + ".txt", 'w+', 'utf-8')
                                arch_final_file.write(unprocessed)
                                arch_final_file.close()
                                arch_runs += 1
                                continue
                        elif blacklisting is False:
                            arch_final_file = codecs.open(
                                str(workpath) +
                                str(full_arch_url).replace(":", "-").replace(
                                    ":", "-").replace("/", "-") + ".txt", 'w+',
                                'utf-8')
                            arch_final_file.write(unprocessed)
                            arch_final_file.close()
                            arch_runs += 1
                            continue
                    else:
                        lib.PrintStatus("Making directory... [" +
                                        str(datetime.now().strftime('%X')) +
                                        "]")
                        if blacklisting is True:
                            flagged = False
                            compare_text = re.sub(
                                r'\s+', '', unprocessed
                            )  # strip all whitespace for comparison
                            for b in blacklist:
                                b = re.sub(
                                    r'\s+', '',
                                    b)  # strip all whitespace for comparison
                                if b.lower() in compare_text.lower():
                                    lib.PrintStatus(
                                        "Blacklisted phrase detected, passing..."
                                    )
                                    flagged = True

                            if flagged is True:
                                continue
                            else:
                                arch_final_file = codecs.open(
                                    str(workpath) + str(full_arch_url).replace(
                                        ":", "-").replace(":", "-").replace(
                                            "/", "-") + ".txt", 'w+', 'utf-8')
                                arch_final_file.write(unprocessed)
                                arch_final_file.close()
                                arch_runs += 1
                                continue
                        elif blacklisting is False:
                            arch_final_file = codecs.open(
                                str(workpath) +
                                str(full_arch_url).replace(":", "-").replace(
                                    ":", "-").replace("/", "-") + ".txt", 'w+',
                                'utf-8')
                            arch_final_file.write(unprocessed)
                            arch_final_file.close()
                            arch_runs += 1
                            continue
                elif amode == 'f':
                    if path.isdir(workpath) is True:
                        lib.PrintStatus("Running engine... [" +
                                        str(datetime.now().strftime('%X')) +
                                        "]")
                        if blacklisting is True:
                            flagged = False
                            compare_text = re.sub(
                                r'\s+', '', unprocessed
                            )  # strip all whitespace for comparison
                            for b in blacklist:
                                b = re.sub(
                                    r'\s+', '',
                                    b)  # strip all whitespace for comparison
                                if b.lower() in compare_text.lower():
                                    lib.PrintStatus(
                                        "Blacklisted phrase detected, passing..."
                                    )
                                    flagged = True

                            if flagged is True:
                                continue
                            else:
                                archive_engine(unprocessed, keylisting,
                                               reglisting)
                                arch_runs += 1
                                continue
                        else:
                            lib.PrintStatus(
                                "Running engine... [" +
                                str(datetime.now().strftime('%X')) + "]")
                            archive_engine(unprocessed, keylisting, reglisting)
                            arch_runs += 1
                            continue
        else:
            lib.PrintSuccess("Operation Finished... [" +
                             str(datetime.now().strftime('%X')) + "]")
            break
Beispiel #12
0
 def print_connecterror():
     lib.PrintError(f"""
 Exception occurred: {e}
 Possible causes: Poor/Non-functioning Internet connection or pastebin is unreachable
 Possible fixes: Troubleshoot internet connection or check status of {archive_url}
         """)
Beispiel #13
0
def scrape(scrape_input_method, displaymode, limiter, repo_crawl, link_type, directory_filtering, blacklisted_directories, verbosity):
	if scrape_input_method.lower() == 'm':
		url = input("Enter the URL: ")
		if url[len(url)-1] == ' ':
			url = url[:len(url)-1]
		urlpage = connect(url)
		if urlpage == 'connection failed':
			lib.PrintError("Connection to specified URL could not be established.")
			exit()
		else:
			lib.PrintStatus('Status: [200], Searching for API Keys...')
			if repo_crawl is False:
				better_search_execute(displaymode, urlpage, repo_crawl, verbosity)
			else:
				if link_type == 'profile':
					resources = get_repos(url)
					file_addresses = traverse_repos(resources, link_type, directory_filtering, blacklisted_directories, verbosity)
				elif link_type == 'repo':
					file_addresses = traverse_repos(url, link_type, directory_filtering, blacklisted_directories, verbosity)
				if len(file_addresses) > 0:
					executor = ThreadPoolExecutor(max_workers=len(file_addresses))
				else:
					lib.PrintError("Fatal Error: No File Addresses Were Returned")
					lib.PrintError("This is likely a mistyped, but valid, URL in the input.")
					lib.PrintError("This also occurs if a github repo link is provided when the profile option is enabled, or vice versa")
					exit()
				for addr in set(file_addresses):
					urlpage = connect(addr)
					executor.submit(better_search_execute(displaymode, urlpage, repo_crawl, verbosity))
					sleep(limiter)
			lib.PrintSuccess("Scanning complete.")
	else:
		while True:
			url_file = input("Enter the full path to the input file: ")
			if isfile(url_file) is True:
				break
			elif str(url_file) == "":
				pass
			else:
				lib.PrintError("No Such File Found.")
				continue
		with open(url_file) as ufile:
			count = 0
			for line in ufile.readlines():
				if repo_crawl is False:
					count += 1
					urlpage = connect(line.rstrip())
					if urlpage == 'connection failed':
						lib.PrintFailure(f"[Line: {count}] Connection failed on host {line}")
					else:
						better_search_execute(displaymode, urlpage, repo_crawl, verbosity)
						sleep(limiter)
				else:
					if link_type == 'profile':
						resources = get_repos(line)
					elif link_type == 'repo':
						resources = line
					file_addresses = traverse_repos(resources, link_type, directory_filtering, blacklisted_directories, verbosity)
					executor = ThreadPoolExecutor(max_workers=len(file_addresses))
					for addr in set(file_addresses):
						urlpage = connect(addr)
						executor.submit(better_search_execute(displaymode, urlpage, repo_crawl, verbosity))
						sleep(limiter)
Beispiel #14
0
         if stop_input == str('True'):
             stop_input = True
         else:
             stop_input = int(stop_input)
         limiter = int(parser.get('initial_vars', 'limiter'))
         cooldown = int(parser.get('initial_vars', 'cooldown'))
         blacklisting = parser.get('initial_vars', 'blacklisting')
         blacklist = parser.get('initial_vars', 'blacklist')
         reglisting = parser.getboolean('initial_vars', 'reglisting')
         reglist = parser.get('initial_vars', 'reglist')
         keylisting = parser.getboolean('initial_vars', 'keylisting')
         key_list = parser.get('initial_vars', 'key_list')
         arch_mode = parser.get('initial_vars', 'arch_mode')
         ArchiveSearch(stop_input, arch_mode)
     else:
         lib.PrintError("No such file found")
         continue
 elif configchoice.lower() == 'n':
     while True:
         workpath = input(
             "Enter the path you wish to save text documents to (enter curdir for current directory): "
         )
         if workpath.lower() == 'curdir':
             workpath = curdir
         if path.isdir(workpath):
             lib.PrintSuccess("Valid Path...")
             if workpath.endswith('\\'):
                 pass
             else:
                 workpath = workpath + str('\\')
             break
Beispiel #15
0
def manual_setup():
	while True:
		displaymode = input("[p]rint to screen, [s]ave to file, or [b]oth: ")
		if displaymode.lower() not in ['p', 's', 'b']:
			lib.PrintError("Invalid Input")
			continue
		break
	while True:
		scrape_input_method = input("[m]anual input (single url) or load from [f]ile: ")
		if scrape_input_method.lower() not in ['m', 'f']:
			lib.PrintError("Invalid Input")
			continue
		break
	while True:
		try:
			limiter = int(input("Enter the time between requests, in seconds: "))
			if limiter < 0:
				continue
			break
		except ValueError:
			lib.PrintError("Invalid Input. Enter a positive integer.")
			continue
	lib.PrintStatus("\nIf provided links to one (or multiple) github profiles, Keyring can crawl all repositories for secrets.")
	lib.PrintStatus("However, this means Keyring WILL NOT FUNCTION CORRECTLY if provided links to other pages in the same text file.")
	lib.PrintStatus("Large profiles will also take a fairly long time, as Keyring fetches ALL files from ALL repos.\n")
	while True:
		repocrawlchoice = input("Enable repo crawling? [y]/[n]: ")
		if repocrawlchoice.lower() not in ['y', 'n']:
			lib.PrintError("Invalid Input.")
			continue
		elif repocrawlchoice.lower() == 'y':
			repo_crawl = True
			while True:
				lib.PrintHighSeverity("Warning: Turning on verbosity will output a LOT when spidering large profiles.")
				verbosity = input("Select verbosity for spidering: [off]/[on]: ")
				if verbosity.lower() not in ['off', 'on']:
					lib.PrintError("Invalid Input.")
					continue
				else:
					break
			break
		elif repocrawlchoice.lower() == 'n':
			repo_crawl = False
			verbosity = 'off'
			break
	while True:
		savechoice = input("Save choices as config file? [y]/[n]: ")
		if savechoice.lower() == 'n':
			break
		elif savechoice.lower() == 'y':
			while True:
				if isdir(f'{curdir}/KRconfig') is False:
					lib.PrintError(f"Config directory not detected in {curdir}...")
					lib.PrintStatus(f"Making config directory...")
					mkdir(f'{curdir}/KRconfig')
					break
				else:
					break
			configname = input("Enter the name for this configuration: ")
			with open(f'{curdir}/KRconfig/{configname}.ini', 'w') as cfile:
				cfile.write(
f'''[initial_vars]
displaymode = {displaymode}
[scraping_vars]
scrape_input_method = {scrape_input_method}
limiter = {limiter}
repo_crawl = {repo_crawl}
verbosity = {verbosity}
''')
				break
	return displaymode, scrape_input_method, limiter, repo_crawl, verbosity
Beispiel #16
0
def load_config():
	while True:
		if isdir(f'{curdir}/KRconfig') is False:
			lib.PrintError(f"Config directory not detected in {curdir}...")
			lib.PrintError(f"Please move KRconfig directory into {curdir}")
			cont = input('Continue? [y/n]: ')
			if cont.lower() == 'y':
				continue
			elif cont.lower() == 'n':
				exit()
			elif cont == "":
				lib.DoNothing()
			else:
				lib.PrintFailure("Invalid Input")
				continue
		else:
			break
	config_files = {}
	count = 0
	onlyfiles = [f for f in listdir(f'{curdir}/KRconfig') if isfile(join(f'{curdir}/KRconfig', f))]
	for file in onlyfiles:
		if file.endswith('.ini'):
			count += 1
			config_files[file] = count
	if count == 0:
		lib.PrintStatus("No config files detected, making default...")
		with codecs.open(f'{curdir}/KRconfig/defaultconfig.ini', 'w', 'utf-8') as dconf:
			dconf.write(
'''[initial_vars]
displaymode = b
[scraping_vars]
scrape_input_method = m
limiter = 5
repo_crawl = False
verbosity = off''')
		config_files['Default Configuration'] = 1
		count += 1
	for k in config_files.keys():
		print(f"[{config_files[k]}]: {k}")
	while True:
		try:
			load_choice = int(input("Select which config file to load: "))
			if load_choice > count:
				raise ValueError
			break
		except ValueError:
			lib.PrintFailure("Invalid Input. Please enter the integer that corresponds with the desired config file.")
			continue
	for k in config_files.keys():
		if load_choice == config_files[k]:
			selected_file = k
	parser.read(f"{curdir}/KRconfig/{selected_file}", encoding='utf-8')
	# Initial Variables
	displaymode = parser.get('initial_vars', 'displaymode')
	# Scraping Variables
	scrape_input_method = parser.get('scraping_vars', 'scrape_input_method')
	limiter = int(parser.get('scraping_vars', 'limiter'))
	repo_crawl = parser.get('scraping_vars', 'repo_crawl')
	if repo_crawl == str('True'):
		repo_crawl = True
	else:
		repo_crawl = False
	verbosity = parser.get('scraping_vars', 'verbosity')
	return displaymode, scrape_input_method, limiter, repo_crawl, verbosity
Beispiel #17
0
def manual_setup():
	while True:
		displaymode = input("[p]rint to screen, [s]ave to file, or [b]oth: ")
		if displaymode == "":
			pass
			continue
		elif displaymode.lower() == 'p' or 's' or 'b':
			break
		else:
			lib.PrintError("Invalid Input.")
			continue
	while True:
		scrape_input_method = input("[m]anual input (single url) or load from [f]ile: ")
		if scrape_input_method.lower() == 'm' or 'f':
			break
		elif scrape_input_method == "":
			pass
			continue
		else:
			lib.PrintError("Invalid Input.")
			continue
	while True:
		try:
			limiter = int(input("Enter the time between requests, in seconds: "))
			if limiter < 0:
				continue
			elif limiter == "":
				pass
				continue
			break
		except ValueError:
			lib.PrintError("Invalid Input. Enter a positive integer.")
			continue
	lib.PrintStatus("If provided links to one (or multiple) github profiles, Keyring can crawl all repositories for secrets.")
	lib.PrintStatus("If provided links to github repositories, Keyring can crawl all files in that repository.")
	lib.PrintStatus("However, this means Keyring WILL NOT FUNCTION CORRECTLY if provided links to other pages in the same text file, or if profile and repo links are mixed.")
	lib.PrintStatus("Large profiles will also take a fairly long time, as Keyring fetches ALL files from ALL repos.")
	while True:
		repocrawlchoice = input("Enable repo crawling? [y]/[n]: ")
		if repocrawlchoice == "":
			pass
			continue
		elif repocrawlchoice.lower() == 'y':
			repo_crawl = True
			while True:
				lib.PrintHighSeverity("Warning: Turning on verbosity will output a LOT when spidering large profiles.")
				verbosity = input("Enable verbosity for spidering: [y]/[n]: ")
				if verbosity == "":
					pass
					continue
				elif verbosity.lower() == 'y' or 'n':
					break
				else:
					lib.PrintError("Invalid Input.")
					continue
			while True:
				link_type_input = input("Github [p]rofile links or Github [r]epository links?: ")
				if link_type_input == "":
					pass
					continue
				elif link_type_input.lower() == 'p':
					link_type = 'profile'
					break
				elif link_type_input.lower() == 'r':
					link_type = 'repo'
					break
				else:
					lib.PrintError("Invalid Input.")
					continue
			while True:
				lib.PrintStatus("Repositories may contain large directories with no value in crawling, such as dependency folders.")
				directory_filtering_status = input("Enable directory filtering: [y]/[n]: ")
				if directory_filtering_status.lower() == 'y':
					directory_filtering = True
					blacklisted_directories = []
					blacklisted_directory_input = input("Enter the directory names you wish to filter (separated by a single comma): ").split(',')
					for directory in blacklisted_directory_input:
						blacklisted_directories.append(directory)
					break
				elif directory_filtering_status.lower() == 'n':
					directory_filtering = False
					blacklisted_directories = [] #placeholder for configparser
					break
				elif directory_filtering_status == "":
					pass
					continue
				else:
					lib.PrintError("Invalid Input.")
					continue
			break
		elif repocrawlchoice.lower() == 'n':
			repo_crawl = False
			link_type = 'regular'
			directory_filtering = False
			blacklisted_directories = []
			verbosity = 'off'
			break
		else:
			lib.PrintError("Invalid Input.")
			continue
	while True:
		savechoice = input("Save choices as config file? [y]/[n]: ")
		if savechoice.lower() == 'n':
			break
		elif savechoice.lower() == 'y':
			while True:
				if isdir(f'{curdir}/KRconfig') is False:
					lib.PrintError(f"Config directory not detected in {curdir}...")
					lib.PrintStatus(f"Making config directory...")
					mkdir(f'{curdir}/KRconfig')
					break
				else:
					break
			configname = input("Enter the name for this configuration: ")
			with open(f'{curdir}/KRconfig/{configname}.ini', 'w') as cfile:
				cfile.write(
f'''[initial_vars]
displaymode = {displaymode}
[scraping_vars]
scrape_input_method = {scrape_input_method}
limiter = {limiter}
repo_crawl = {repo_crawl}
link_type = {link_type}
directory_filtering = {directory_filtering}
blacklisted_directories = {blacklisted_directories}
verbosity = {verbosity}
''')
		else:
			lib.PrintError("Invalid Input.")
			continue
		break
	return displaymode, scrape_input_method, limiter, repo_crawl, link_type, directory_filtering, blacklisted_directories, verbosity
Beispiel #18
0
 def print_timeouterror():
     lib.PrintError(f"""
 Exception occurred: {e}
 Possible causes: Too many requests made to {archive_url}
 Possible fixes: Check firewall settings and check the status of {archive_url}.
         """)
Beispiel #19
0
	def PrintGenericError():
		lib.PrintError(f"""
Exception occurred: {e}
		""")
Beispiel #20
0
 def print_genericerror():
     lib.PrintError(f"""
 Exception occurred: {e}
         """)
Beispiel #21
0
	def PrintConnectError():
		lib.PrintError(f"""
Exception occurred: {e}
Possible causes: Poor/Non-functioning Internet connection or {url} is unreachable
Possible fixes: Troubleshoot internet connection or check status of {url}
		""")