def ExecuteModule(self, Task_queue, Results_queue, domain): while True: Task = Task_queue.get() # If the queue is emepty exit this proc if Task is None: break # Inst the class try: ModuleName = Task Task = self.modules[Task] Module = Task.ClassName(domain) name = "[*] Starting: " + Module.name print helpers.color(name, status=True) # Try to start the module try: # Emails will be returned as a list Emails = Module.execute() if Emails: count = len(Emails) Length = "[*] " + Module.name + ": Gathered " + str(count) + " Email(s)!" print helpers.color(Length, status=True) for Email in Emails: Results_queue.put(Email) # Task_queue.task_done() else: Message = "[*] " + Module.name + " has completed with no Email(s)" print helpers.color(Message, status=True) except Exception as e: error = "[!] Error During Runtime in Module " + Module.name + ": " + str(e) print helpers.color(error, warning=True) except Exception as e: error = "[!] Error Loading Module: " + str(e) print helpers.color(error, warning=True)
def CleanResults(self, domain): # Clean Up results, remove dupplicates and enforce strict Domain reuslts (future) # Set Timeout or you wont leave the While loop SecondList = [] HtmlSecondList = [] # Validate the domain.. this can mess up but i dont want to miss # anything for item in self.ConsumerList: if domain in item: SecondList.append(item) FinalList = [] HtmlFinalList = [] # now the same for Html Results with magic for item in self.HtmlList: if domain in item: HtmlSecondList.append(item) # Itt over all items in the list for item in SecondList: # Check if the value is in the new list if item not in FinalList: # Add item to list and put back in the Queue FinalList.append(item) # results_queue.put(item) # Check to see we have dups (we will have dup emails) # But no Dup Sources (which we want) for item in HtmlSecondList: if item not in HtmlFinalList: HtmlFinalList.append(item) print helpers.color("[*] Completed Cleaning Results", status=True) return FinalList, HtmlFinalList
def process(self): # Get all the USER code Repos # https://github.com/search?p=2&q=enron.com+&ref=searchresults&type=Code&utf8=✓ UrlList = [] while self.Counter <= self.Depth: try: url = "https://github.com/search?p=" + str(self.Counter) + "&q=" + \ str(self.domain) + "+&ref=searchresults&type=Code&utf8=✓" r = requests.get(url, timeout=2) if r.status_code != 200: break except Exception as e: error = "[!] Major isself.Counter += 1sue with GitHub Search:" + str(e) print helpers.color(error, warning=True) RawHtml = r.content # Parse the results for our URLS) soup = BeautifulSoup(RawHtml) for a in soup.findAll('a', href=True): a = a['href'] if a.startswith('/'): UrlList.append(a) self.Counter += 1 # Now take all gathered URL's and gather the HTML content needed for Url in UrlList: try: Url = "https://github.com" + Url html = requests.get(Url, timeout=2) self.Html += html.content except Exception as e: error = "[!] Connection Timed out on Github Search:" + str(e) print helpers.color(error, warning=True)
def process(self): try: # This returns a JSON object url = "https://emailhunter.co/trial/v1/search?offset=0&domain=" + \ self.domain + "&format=json" r = requests.get(url) except Exception as e: error = "[!] Major issue with EmailHunter Search:" + str(e) print helpers.color(error, warning=True) try: results = r.json() # pprint(results) # Check to make sure we got data back from the API if results['status'] == "success": # The API starts at 0 for the first value x = 0 EmailCount = int(results['results']) # We will itirate of the Json object for the index objects while x < EmailCount: self.results.append(results['emails'][int(x)]['value']) x += 1 except Exception as e: pass if self.verbose: p = '[*] TEmal Hunter completed JSON request' print helpers.color(p, firewall=True)
def TaskControler(version): # Get all the options passed and pass it to the TaskConducter, this will # keep all the prcessing on the side. # need to pass the store true somehow to tell printer to restrict output cli_all, cli_domain, cli_list, cli_test, cli_scope, cli_verbose = cli_parser() cli_domain = cli_domain.lower() Task = TaskController.Conducter() Task.load_modules() if cli_list: Task.ListModules() V = VersionCheck.VersionCheck(version) V.VersionRequest() sys.exit(0) if not len(cli_domain) > 1: print helpers.color("[*] No Domain Supplied to start up!\n", warning=True) sys.exit(0) if cli_test: # setup a small easy test to activate certain modules V = VersionCheck.VersionCheck(version) V.VersionRequest() Task.TestModule(cli_domain, cli_test, verbose=cli_verbose, scope=cli_scope) if cli_all: V = VersionCheck.VersionCheck(version) V.VersionRequest() Task.TaskSelector(cli_domain, verbose=cli_verbose, scope=cli_scope)
def __init__(self, domain, verbose=False): self.apikey = True self.name = "Hunter API" self.description = "Search the Hunter DB for potential emails" self.domain = domain config = configparser.ConfigParser() self.results = [] self.verbose = verbose try: self.logger = logging.getLogger("SimplyEmail.Hunter") config.read('Common/SimplyEmail.ini') self.UserAgent = str(config['GlobalSettings']['UserAgent']) self.apikeyv = str(config['APIKeys']['Hunter']) self.RequestLimit = int(config['Hunter']['RequestLimit']) self.QuotaLimit = int(config['Hunter']['QuotaLimit']) self.EmailType = str(config['Hunter']['EmailType']) if self.EmailType == "Both": self.type = "" self.etype = "total" elif self.EmailType == "Personal": self.type = "&type=personal" self.etype = "personal_emails" elif self.EmailType == "Generic": self.type = "&type=generic" self.etype = "generic_emails" else: raise Exception("Email Type setting invalid") except Exception as e: self.logger.critical("Hunter module failed to __init__: " + str(e)) print helpers.color(" [*] Error in Hunter settings: " + str(e) + "\n", warning=True)
def __init__(self, Domain, verbose=False): self.apikey = False self.name = "Google XLSX Search for Emails" self.description = "Uses Google Dorking to search for emails" config = configparser.ConfigParser() try: self.logger = logging.getLogger("SimplyEmail.GoogleXlsxSearch") config.read("Common/SimplyEmail.ini") self.Domain = Domain self.Quanity = int(config["GoogleXlsxSearch"]["StartQuantity"]) self.Limit = int(config["GoogleXlsxSearch"]["QueryLimit"]) self.UserAgent = {"User-Agent": helpers.getua()} self.Counter = int(config["GoogleXlsxSearch"]["QueryStart"]) self.Sleep = int(config["SleepConfig"]["QuerySleep"]) self.Jitter = int(config["SleepConfig"]["QueryJitter"]) self.verbose = verbose self.urlList = [] self.Text = "" except Exception as e: self.logger.critical("GoogleXlsxSearch module failed to load: " + str(e)) print helpers.color( " [*] Major Settings for GoogleXlsxSearch are missing, EXITING!\n", warning=True, )
def get_lat_lng(self, mac_address=None): #Start with Credintial check try: self.response = send_user_check() response = self.check_cred_login() if response == 'false': print " [*] Unable to validate this user..." except: #Use this two let user know we had a true login.. print helpers.color('[*] WIGLE: This user was validated', bold=False) pass if mac_address == None: mac_address = self.mac_address if '-' in mac_address: mac_address = mac_address.replace('-', ':') try: self.query_response = self.send_query(mac_address) #Need to use Try statment since the "message" Key isnt always prsent when conducting query try: message = self.check_query_limit() if message == "too many queries": print " [*]" + message except: #Use pass since we dont have an error... pass response = self.parse_response() except IndexError: response = 'BSSID (MAC) location not known' return response print helpers.color( '[*] WIGLE: Lat / Long and SSID have been retrived', bold=False) return response
def TaskControler(version): # Get all the options passed and pass it to the TaskConducter, this will # keep all the prcessing on the side. # need to pass the store true somehow to tell printer to restrict output cli_all, cli_domain, cli_list, cli_test, cli_verbose = cli_parser() cli_domain = cli_domain.lower() Task = TaskController.Conducter() Task.load_modules() if cli_list: Task.ListModules() V = VersionCheck.VersionCheck(version) V.VersionRequest() sys.exit(0) if not len(cli_domain) > 1: print helpers.color("[*] No Domain Supplied to start up!\n", warning=True) sys.exit(0) if cli_test: # setup a small easy test to activate certain modules V = VersionCheck.VersionCheck(version) V.VersionRequest() Task.TestModule(cli_domain, cli_test, verbose=cli_verbose) if cli_all: V = VersionCheck.VersionCheck(version) V.VersionRequest() Task.TaskSelector(cli_domain, verbose=cli_verbose)
def search(self): while self.Counter <= self.Limit and self.Counter <= 1000: time.sleep(1) try: url = ( "http://www.google.com/search?num=" + str(self.Quanity) + "&start=" + str(self.Counter) + '&hl=en&meta=&q="%40' + self.Domain + '"' ) urly = ( "http://www.google.com/search?num=" + str(self.Quanity) + "&start=" + str(self.Counter) + '&hl=en&meta=&q=%40"' + self.Domain + '"' ) except Exception as e: error = "[!] Major issue with Google Search:" + str(e) print helpers.color(error, warning=True) try: r = requests.get(urly, headers=self.UserAgent) except Exception as e: error = "[!] Fail during Request to Google (Check Connection):" + str(e) print helpers.color(error, warning=True) results = r.content self.Html += results self.Counter += 100
def __init__(self, domain, verbose=False): self.apikey = False # Descriptions are required!!! self.name = "HTML Scrape of Target Website" self.description = "Html Scrape the target website for emails and data" # Settings we will pull from config file (We need required options in # config file) config = configparser.ConfigParser() try: config.read('Common/SimplyEmail.ini') self.verbose = verbose self.domain = domain self.useragent = "--user-agent=" + \ str(config['GlobalSettings']['UserAgent']) self.depth = "--level=" + str(config['HtmlScrape']['Depth']) self.wait = "--wait=" + str(config['HtmlScrape']['Wait']) self.limit_rate = "--limit-rate=" + \ str(config['HtmlScrape']['LimitRate']) self.timeout = "--read-timeout=" + \ str(config['HtmlScrape']['Timeout']) self.save = "--directory-prefix=" + \ str(config['HtmlScrape']['Save']) + str(self.domain) self.remove = str(config['HtmlScrape']['RemoveHTML']) except: print helpers.color(" [*] Major Settings for HTML are missing, EXITING!\n", warning=True)
def TaskStarter(version): # Get all the options passed and pass it to the TaskConducter, this will # keep all the processing on the side. # need to pass the store true somehow to tell printer to restrict output log = helpers.log() log.start() cli_all, cli_domain, cli_list, cli_test, cli_scope, cli_names, cli_verify, cli_verbose = cli_parser() cli_domain = cli_domain.lower() Task = TaskController.Conducter() Task.load_modules() if cli_list: log.infomsg("Tasked to List Modules", "Main") Task.ListModules() V = VersionCheck.VersionCheck(version) V.VersionRequest() sys.exit(0) if not len(cli_domain) > 1: log.warningmsg("Domain not supplied", "Main") print helpers.color("[*] No Domain Supplied to start up!\n", warning=True) sys.exit(0) if cli_test: # setup a small easy test to activate certain modules log.infomsg("Tasked to Test Module: " + cli_test, "Main") V = VersionCheck.VersionCheck(version) V.VersionRequest() Task.TestModule(cli_domain, cli_test, verbose=cli_verbose, scope=cli_scope, Names=cli_names, Verify=cli_verify) if cli_all: log.infomsg("Tasked to run all Modules on domain: " + cli_domain, "Main") V = VersionCheck.VersionCheck(version) V.VersionRequest() Task.TaskSelector(cli_domain, verbose=cli_verbose, scope=cli_scope, Names=cli_names, Verify=cli_verify)
def process(self): # Get all the USER code Repos # https://github.com/search?p=2&q=enron.com+&ref=searchresults&type=Code&utf8=✓ UrlList = [] while self.Counter <= self.Depth: try: url = "https://github.com/search?p=" + str(self.Counter) + "&q=" + \ str(self.domain) + "+&ref=searchresults&type=Code&utf8=✓" r = requests.get(url, timeout=2) if r.status_code != 200: break except Exception as e: error = "[!] Major isself.Counter += 1sue with GitHub Search:" + str( e) print helpers.color(error, warning=True) RawHtml = r.content # Parse the results for our URLS) soup = BeautifulSoup(RawHtml) for a in soup.findAll('a', href=True): a = a['href'] if a.startswith('/'): UrlList.append(a) self.Counter += 1 # Now take all gathered URL's and gather the HTML content needed for Url in UrlList: try: Url = "https://github.com" + Url html = requests.get(Url, timeout=2) self.Html += html.content except Exception as e: error = "[!] Connection Timed out on Github Search:" + str(e) print helpers.color(error, warning=True)
def process(self): try: url = "https://www.flickr.com/search/?text=%40" + self.domain r = requests.get(url) except Exception as e: error = "[!] Major issue with Flickr Search:" + str(e) print helpers.color(error, warning=True) self.results = r.content
def process(self): try: url = "http://api.hackertarget.com/whois/?q=" + \ self.domain r = requests.get(url) except Exception as e: error = "[!] Major issue with Whois Search:" + str(e) print helpers.color(error, warning=True) self.results = r.content
def process(self): try: url = "https://whoisology.com/archive_11/" + \ self.domain r = requests.get(url) except Exception as e: error = "[!] Major issue with Whoisology Search:" + str(e) print helpers.color(error, warning=True) self.results = r.content
def process(self): try: url = "http://pgp.rediris.es:11371/pks/lookup?search=" + \ self.domain + "&op=index" r = requests.get(url) except Exception as e: error = "[!] Major issue with PGP Search:" + str(e) print helpers.color(error, warning=True) self.results = r.content
def __init__(self, domain): self.name = "Searching Whois" self.description = "Search the Whois database for potential POC emails" self.domain = domain config = configparser.ConfigParser() self.results = "" try: config.read('Common/SimplyEmail.ini') self.UserAgent = str(config['GlobalSettings']['UserAgent']) except: print helpers.color("[*] Major Settings for Search Whois are missing, EXITING!\n", warning=True)
def __init__(self, domain): self.name = "EmailHunter Trial API" self.description = "Search the EmailHunter DB for potential emails" self.domain = domain config = configparser.ConfigParser() self.results = [] try: config.read('Common/SimplyEmail.ini') self.UserAgent = str(config['GlobalSettings']['UserAgent']) except: print helpers.color("[*] Major Settings for EmailHunter are missing, EXITING!\n", warning=True)
def process(self): try: if self.verbose: p = '[*] Requesting API on HackerTarget whois' print helpers.color(p, firewall=True) url = "http://api.hackertarget.com/whois/?q=" + \ self.domain r = requests.get(url) except Exception as e: error = "[!] Major issue with Whois Search:" + str(e) print helpers.color(error, warning=True) self.results = r.content
def __init__(self, domain): self.name = "Searching GitHub Code" self.description = "Search GitHub code for emails using a large pool of code searches" self.domain = domain config = configparser.ConfigParser() self.Html = "" try: config.read('Common/SimplyEmail.ini') self.Depth = int(config['GitHubSearch']['PageDepth']) self.Counter = int(config['GitHubSearch']['QueryStart']) except: print helpers.color("[*] Major Settings for GitHubSearch are missing, EXITING!\n", warning=True)
def process(self): dl = Download.Download(verbose=self.verbose) try: url = "https://www.flickr.com/search/?text=%40" + self.domain rawhtml = dl.requesturl(url, useragent=self.UserAgent) except Exception as e: error = " [!] Major issue with Flickr Search:" + str(e) print helpers.color(error, warning=True) self.results += rawhtml if self.verbose: p = ' [*] FlickrSearch has completed' print helpers.color(p, firewall=True)
def process(self): try: if self.verbose: p = '[*] Yahoo Whoisology request underway' print helpers.color(p, firewall=True) url = "https://whoisology.com/archive_11/" + \ self.domain r = requests.get(url) except Exception as e: error = "[!] Major issue with Whoisology Search:" + str(e) print helpers.color(error, warning=True) self.results = r.content
def __init__(self, domain, verbose=False): self.name = "EmailHunter Trial API" self.description = "Search the EmailHunter DB for potential emails" self.domain = domain config = configparser.ConfigParser() self.results = [] self.verbose = verbose try: config.read('Common/SimplyEmail.ini') self.UserAgent = str(config['GlobalSettings']['UserAgent']) except: print helpers.color("[*] Major Settings for EmailHunter are missing, EXITING!\n", warning=True)
def ListModules(self): print helpers.color(" [*] Available Modules are:\n", blue=True) lastBase = None x = 1 for name in self.modules: parts = name.split("/") if lastBase and parts[0] != lastBase: print "" lastBase = parts[0] print "\t%s)\t%s" % (x, '{0: <24}'.format(name)) x += 1 print ""
def __init__(self, Domain, verbose=False): self.apikey = False self.name = "OnInstagram Search For Instagram Users" self.description = "Uses OnInstagrams search engine" config = configparser.ConfigParser() try: config.read('Common/SimplyEmail.ini') self.Domain = Domain self.Html = "" self.verbose = verbose except: print helpers.color(" [*] Major Settings for OnInstagram are missing, EXITING!\n", warning=True)
def __init__(self, domain): self.name = "Searching Canary Paste Bin" self.description = "Search Canary for paste potential data dumps, this can take a bit but a great source" self.domain = domain config = configparser.ConfigParser() self.Html = "" try: config.read('Common/SimplyEmail.ini') self.Depth = int(config['CanaryPasteBin']['PageDepth']) self.Counter = int(config['CanaryPasteBin']['QueryStart']) except: print helpers.color("[*] Major Settings for Canary PasteBin Search are missing, EXITING!\n", warning=True)
def __init__(self, domain): self.name = "Searching Flicker" self.description = "Search the Flicker top relvant results for emails" self.domain = domain config = configparser.ConfigParser() self.results = "" try: config.read('Common/SimplyEmail.ini') self.HostName = str(config['FlickrSearch']['Hostname']) self.UserAgent = str(config['GlobalSettings']['UserAgent']) except: print helpers.color("[*] Major Settings for FlickrSearch are missing, EXITING!\n", warning=True)
def process(self): try: # page seems to dynamicaly expand :) url = "http://www.oninstagram.com/profile/search?query=" + \ self.Domain r = requests.get(url) except Exception as e: error = " [!] Major issue with OnInstagram Search:" + str(e) print helpers.color(error, warning=True) if self.verbose: p = ' [*] Instagram search Complete' print helpers.color(p, firewall=True) self.Html = r.content
def __init__(self, domain): self.name = "Searching Whois" self.description = "Search the Whois database for potential POC emails" self.domain = domain config = configparser.ConfigParser() self.results = "" try: config.read('Common/SimplyEmail.ini') self.UserAgent = str(config['GlobalSettings']['UserAgent']) except: print helpers.color( "[*] Major Settings for Search Whois are missing, EXITING!\n", warning=True)
def search(self): while self.Counter <= self.Limit and self.Counter <= 100: time.sleep(1) if self.verbose: p = '[*] Google PDF Search on page: ' + str(self.Counter) print helpers.color(p, firewall=True) try: urly = "https://www.google.com/search?q=site:" + self.Domain + "+filetype:pdf&start=" + str( self.Counter) except Exception as e: error = "[!] Major issue with Google Search:" + str(e) print helpers.color(error, warning=True) try: r = requests.get(urly) except Exception as e: error = "[!] Fail during Request to Google (Check Connection):" + \ str(e) print helpers.color(error, warning=True) RawHtml = r.content soup = BeautifulSoup(RawHtml) for a in soup.findAll('a'): try: # https://stackoverflow.com/questions/21934004/not-getting-proper-links- # from-google-search-results-using-mechanize-and-beautifu/22155412#22155412? # newreg=01f0ed80771f4dfaa269b15268b3f9a9 l = urlparse.parse_qs(urlparse.urlparse( a['href']).query)['q'][0] if l.startswith('http') or l.startswith('www'): if "webcache.googleusercontent.com" not in l: self.urlList.append(l) except: pass self.Counter += 10 # now download the required files try: for url in self.urlList: if self.verbose: p = '[*] Google PDF search downloading: ' + str(url) print helpers.color(p, firewall=True) try: FileName = self.download_file(url) self.Text += self.convert_pdf_to_txt(FileName) except Exception as e: pass try: os.remove(FileName) except Exception as e: print e except: print helpers.color("[*] No PDF's to download from Google!\n", firewall=True)
def process(self): try: if self.verbose: self.logger.info("Whoisology request started") p = ' [*] Whoisology request started' print helpers.color(p, firewall=True) url = "https://whoisology.com/archive_11/" + \ self.domain r = requests.get(url) except Exception as e: error = "[!] Major issue with Whoisology Search:" + str(e) self.logger.error("Whoisology can download source (Check Connection)") print helpers.color(error, warning=True) self.results = r.content
def title_screen(self): offtext = """------------------------------------------------------------ ______ ________ __ __ / \/ | / / | /$$$$$$ $$$$$$$$/ _____ ____ ______ $$/$$ | $$ \__$$/$$ |__ / \/ \ / \/ $$ | $$ \$$ | $$$$$$ $$$$ |$$$$$$ $$ $$ | $$$$$$ $$$$$/ $$ | $$ | $$ |/ $$ $$ $$ | / \__$$ $$ |_____$$ | $$ | $$ /$$$$$$$ $$ $$ | $$ $$/$$ $$ | $$ | $$ $$ $$ $$ $$ | $$$$$$/ $$$$$$$$/$$/ $$/ $$/ $$$$$$$/$$/$$/ ------------------------------------------------------------""" print helpers.color(offtext, bold=False)
def __init__(self, domain): self.name = "Searching GitHub Code" self.description = "Search GitHub code for emails using a large pool of code searches" self.domain = domain config = configparser.ConfigParser() self.Html = "" try: config.read('Common/SimplyEmail.ini') self.Depth = int(config['GitHubSearch']['PageDepth']) self.Counter = int(config['GitHubSearch']['QueryStart']) except: print helpers.color( "[*] Major Settings for GitHubSearch are missing, EXITING!\n", warning=True)
def __init__(self, Domain): self.name = "Ask Search for Emails" self.description = "Simple Ask Search for Emails" config = configparser.ConfigParser() try: config.read('Common/SimplyEmail.ini') self.UserAgent = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} self.PageLimit = int(config['AskSearch']['QueryPageLimit']) self.Counter = int(config['AskSearch']['QueryStart']) self.Domain = Domain self.Html = "" except: print helpers.color("[*] Major Settings for Ask Search are missing, EXITING!\n", warning=True)
def __init__(self, domain, verbose=False): self.name = "Searching PGP" self.description = "Search the PGP database for potential emails" self.domain = domain config = configparser.ConfigParser() self.results = "" try: config.read('Common/SimplyEmail.ini') self.server = str(config['SearchPGP']['KeyServer']) self.hostname = str(config['SearchPGP']['Hostname']) self.UserAgent = str(config['GlobalSettings']['UserAgent']) self.verbose = verbose except: print helpers.color("[*] Major Settings for SearchPGP are missing, EXITING!\n", warning=True)
def __init__(self, domain, verbose=False): self.apikey = False self.name = "Searching Flicker" self.description = "Search the Flicker top relvant results for emails" self.domain = domain config = configparser.ConfigParser() self.results = "" self.verbose = verbose try: config.read('Common/SimplyEmail.ini') self.HostName = str(config['FlickrSearch']['Hostname']) self.UserAgent = str(config['GlobalSettings']['UserAgent']) except: print helpers.color("[*] Major Settings for FlickrSearch are missing, EXITING!\n", warning=True)
def process(self): try: url = "http://pgp.rediris.es:11371/pks/lookup?search=" + \ self.domain + "&op=index" self.logger.info("Requesting PGP keys") r = requests.get(url) except Exception as e: error = " [!] Major issue with PGP Search:" + str(e) self.logger.error("Major issue with PGP search: " + str(e)) print helpers.color(error, warning=True) if self.verbose: p = ' [*] Searching PGP Complete' self.logger.info("SearchPGP Completed search") print helpers.color(p, firewall=True) self.results = r.content
def __init__(self, domain, verbose=False): self.apikey = True self.name = "Canar.io API PasteBin search" self.description = "Search Canar.io for paste potential data dumps, this can take a bit but a great source" self.domain = domain self.verbose = verbose config = configparser.ConfigParser() self.Html = "" try: config.read('Common/SimplyEmail.ini') self.Depth = int(config['CanaryPasteBin']['PageDepth']) self.Counter = int(config['CanaryPasteBin']['QueryStart']) self.apikeyv = str(config['APIKeys']['Canario']) except: print helpers.color(" [*] Major Settings for Canar.io Search are missing, EXITING!\n", warning=True)
def __init__(self, Domain, verbose=False): self.name = "Ask Search for Emails" self.description = "Simple Ask Search for Emails" config = configparser.ConfigParser() try: config.read('Common/SimplyEmail.ini') self.UserAgent = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} self.PageLimit = int(config['AskSearch']['QueryPageLimit']) self.Counter = int(config['AskSearch']['QueryStart']) self.Domain = Domain self.verbose = verbose self.Html = "" except: print helpers.color("[*] Major Settings for Ask Search are missing, EXITING!\n", warning=True)
def title_screen(self): self.logger.debug("Title_screen executed") offtext = """------------------------------------------------------------ ______ ________ __ __ / \/ | / / | /$$$$$$ $$$$$$$$/ _____ ____ ______ $$/$$ | $$ \__$$/$$ |__ / \/ \ / \/ $$ | $$ \$$ | $$$$$$ $$$$ |$$$$$$ $$ $$ | $$$$$$ $$$$$/ $$ | $$ | $$ |/ $$ $$ $$ | / \__$$ $$ |_____$$ | $$ | $$ /$$$$$$$ $$ $$ | $$ $$/$$ $$ | $$ | $$ $$ $$ $$ $$ | $$$$$$/ $$$$$$$$/$$/ $$/ $$/ $$$$$$$/$$/$$/ ------------------------------------------------------------""" print helpers.color(offtext, bold=False)
def __init__(self, domain, verbose=False): self.apikey = False self.name = "EmailHunter Trial API" self.description = "Search the EmailHunter DB for potential emails" self.domain = domain config = configparser.ConfigParser() self.results = [] self.verbose = verbose try: self.logger = logging.getLogger("SimplyEmail.EmailHunter") config.read('Common/SimplyEmail.ini') self.UserAgent = str(config['GlobalSettings']['UserAgent']) except Exception as e: self.logger.critical("EmailHunter module failed to __init__: " + str(e)) print helpers.color(" [*] Major Settings for EmailHunter are missing, EXITING!\n", warning=True)