def get_list_of_credentials(): """Return list of credentials taken from configuration files Returns: list. A list of tuples containing in the first the name of the platform, as read from the accounts.cfg file in the application folder. E. g.: list_creds.append(("<platform>", "<username>", "<password>")) """ list_creds = [] # If a accounts.cfg has not been found, creating it by copying from default config_path = os.path.join(configuration.get_config_path()["appPath"], "accounts.cfg") # Checking if the configuration file exists if not os.path.exists(config_path): # Copy the data from the default folder default_config_path = os.path.join( configuration.get_config_path()["appPathDefaults"], "accounts.cfg") try: with open(default_config_path) as file: cont = file.read() with open(config_path, "w") as output_file: output_file.write(cont) except Exception as e: raise errors.ConfigurationFileNotFoundError( config_path, default_config_path) return list_creds # Reading the configuration file config = ConfigParser() config.read(config_path) # Iterating through all the sections, which contain the platforms for platform in config.sections(): # Initializing values creds = {} incomplete = False # Iterating through parametgers for (param, value) in config.items(platform): if value == '': incomplete = True break creds[param] = value # Appending credentials if possible try: if not incomplete: list_creds.append( (platform, creds["login"], creds["password"])) except Exception: pass return list_creds
def get_list_of_api_keys(): """Return list of API keys Return: A dictionary containing the API Keys stored in a dictionary depending on the information required by each platform. """ dict_api_keys = {} # If a api_keys.cfg has not been found, creating it by copying from default config_path = os.path.join(configuration.get_config_path()["appPath"], "api_keys.cfg") # Checking if the configuration file exists if not os.path.exists(config_path): # Copy the data from the default folder default_config_path = os.path.join( configuration.get_config_path()["appPathDefaults"], "api_keys.cfg") try: with open(default_config_path) as file: cont = file.read() with open(config_path, "w") as output_file: output_file.write(cont) except Exception as e: raise errors.ConfigurationFileNotFoundError( config_path, default_config_path) return dict_api_keys # Reading the configuration file config = ConfigParser() config.read(config_path) # Iterating through all the sections, which contain the platforms for platform in config.sections(): # Initializing values platform_api = {} incomplete = False # Iterating through parametgers for (param, value) in config.items(platform): if value == '': incomplete = True break platform_api[param] = value # Loading the info in the dict if not incomplete: dictAPIeys[platform] = platform_api return dict_api_keys
def __init__(self): """Recovering an instance of a new Browser""" self.auth = None self.user_agents = [] self.proxies = {} self.timeout = 2 # Trying to read the configuration # -------------------------------- # If a current.cfg has not been found, creating it by copying from default config_path = os.path.join(configuration.get_config_path()["appPath"], "browser.cfg") # Checking if the configuration file exists if not os.path.exists(config_path): try: # Copy the data from the default folder default_config_path = os.path.join( configuration.get_config_path()["appPathDefaults"], "browser.cfg") with open(default_config_path) as file: cont = file.read() with open(config_path, "w") as output_file: output_file.write(cont) except Exception: print( "WARNING. No configuration file could be found and the default file was not found either, so configuration will be set as default." ) print(str(e)) print() # Storing configuration as default self.user_agents = [ 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/55.0.2883.87 Chrome/55.0.2883.87 Safari/537.36' ] self.proxies = {} return None # Reading the configuration file config = ConfigParser() config.read(config_path) proxy = {} # Iterating through all the sections, which contain the platforms for conf in config.sections(): if conf == "Browser": # Iterating through parametgers for (param, value) in config.items(conf): if param == "user_agent": if value != '': self.user_agents.append(value) else: self.user_agents = [ 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/55.0.2883.87 Chrome/55.0.2883.87 Safari/537.36' ] if param == "timeout": try: self.timeout = int(value) except: self.timeout = 2 else: proxy[conf] = {} # Iterating through parameters for (param, value) in config.items(conf): if value != '': proxy[conf][param] = value # Configuring the proxy as it will be used by br.set_proxies for p in proxy.keys(): # p ~= ProxyHTTP --> Protocol = p.lower()[5:] #print p, p.lower()[5:], proxy[p] try: # Adding credentials if they exist self.proxies[p.lower()[5:]] = proxy[p][ "username"] + ":" + proxy[p]["password"] + "@" + proxy[p][ "host"] + ":" + proxy[p]["port"] except: try: self.proxies[p.lower( )[5:]] = proxy[p]["host"] + ":" + proxy[p]["port"] except: # We are not adding this protocol to be proxied pass
def get_all_platform_objects(mode=None): """Method that recovers ALL the list of <Platform> classes to be processed The method dinamically loads all the wrappers stored under [OSRFrameworkHOME]/plugins/wrappers/. Args: mode (str):The mode of the search. The following can be chosen: ["phonefy", "usufy", "searchfy"]. Returns: Returns a list [] of <Platform> objects. """ list_all = [] ############################################################################ # -------------------------------------------------------------------------- # Dinamically collecting all the "official" modules # -------------------------------------------------------------------------- # A list that will contain all of the module names all_modules = [] # Grabbing all the module names for _, name, _ in pkgutil.iter_modules(osrframework.wrappers.__path__): all_modules.append("osrframework.wrappers." + name) # Iterating through all the module names to grab them for module_name in all_modules: # Importing the module my_module = importlib.import_module(module_name) # Getting all the classNames. classNames = [ m[0] for m in inspect.getmembers(my_module, inspect.isclass) if m[1].__module__ == module_name ] # Dinamically grabbing the first class of the module. IT SHOULD BE ALONE! MyClass = getattr(my_module, classNames[0]) # Instantiating the object new_instance = MyClass() # Adding to the list! list_all.append(new_instance) # -------------------------------------------------------------------------- # Loading user-defined wrappers under [OSRFrameworkHOME]/plugins/wrappers/ # -------------------------------------------------------------------------- # Creating the application paths paths = configuration.get_config_path() new_path = os.path.abspath(paths["appPathWrappers"]) # Inserting in the System Path if not new_path in sys.path: sys.path.append(new_path) user_imported_modules = {} for module in os.listdir(new_path): if module[-3:] == '.py': current = module.replace('.py', '') user_imported_modules[current] = __import__(current) del new_path user_classes = [] # Iterating through all the files for userModule in user_imported_modules.keys(): my_module = user_imported_modules[userModule] # Getting all the classNames. classNames = [ m[0] for m in inspect.getmembers(my_module, inspect.isclass) if m[1].__module__ == userModule ] # Dinamically grabbing the first class of the module. IT SHOULD BE ALONE! MyClass = getattr(my_module, classNames[0]) # Instantiating the object new_instance = MyClass() # Adding to the list! user_classes.append(new_instance) # -------------------------------------------------------------------------- # Overwriting original modules with the user plugins # -------------------------------------------------------------------------- list_to_add = [] for user_class in user_classes: overwritten = False for i, official_class in enumerate(list_all): # Checking if the name is the same if str(user_class) == str(official_class): # Replacing the official module if a user module exists for it list_all[i] = user_class # We stop iterating this loop overwritten = True break if not overwritten: # Appending the new class list_to_add.append(user_class) # Merging list_all and list_to_add list_all = list_all + list_to_add ############################################################################ ############################################################################ creds = credentials.get_credentials() for p in list_all: # Verify if there are credentials to be loaded if p.platformName.lower() in creds.keys(): p.setCredentials(creds[p.platformName.lower()]) if mode is None: return list_all else: # We are returning only those platforms which are required by the mode. selected = [] for p in list_all: if p._mode_is_valid(mode): selected.append(p) return selected
def main(params=None): """Main function to launch mailfy The function is created in this way so as to let other applications make use of the full configuration capabilities of the application. The parameters received are used as parsed by this modules `get_parser()`. Args: params: A list with the parameters as grabbed by the terminal. It is None when this is called by an entry_point. If it is called by osrf the data is already parsed. Returns: list. A list of i3visio entities. """ if params is None: parser = get_parser() args = parser.parse_args(params) else: args = params results = [] if not args.quiet: print(general.title(banner.text)) saying_hello = f""" Mailfy | Copyright (C) Yaiza Rubio & FĂ©lix Brezo (i3visio) 2014-2020 This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions. For additional info, visit <{general.LICENSE_URL}>. """ print(general.info(saying_hello)) # Displaying a warning if this is being run in a windows system if sys.platform == 'win32': print( general.warning( """OSRFramework has detected that you are running mailfy in a Windows system. As the "emailahoy" library is NOT working properly there, "validate_email" will be used instead. Verification may be slower though.""")) if args.license: general.showLicense() else: # processing only the given domains and excluding the ones provided extra_domains = [] for d in args.domains: if d not in args.exclude and not d == "all": extra_domains.append(d) # Two different arrays are mantained since there are some domains that cannot be safely verified if args.create_emails: potentially_existing_emails = grab_emails( nicks_file=args.create_emails, domains=EMAIL_DOMAINS + extra_domains, exclude_domains=args.exclude) potentially_leaked_emails = grab_emails( nicks_file=args.create_emails, domains=LEAKED_DOMAINS + extra_domains, exclude_domains=args.exclude) else: potentially_existing_emails = grab_emails( emails=args.emails, emails_file=args.emails_file, nicks=args.nicks, nicks_file=args.nicks_file, domains=EMAIL_DOMAINS + extra_domains, exclude_domains=args.exclude) potentially_leaked_emails = grab_emails( emails=args.emails, emails_file=args.emails_file, nicks=args.nicks, nicks_file=args.nicks_file, domains=LEAKED_DOMAINS + extra_domains, exclude_domains=args.exclude) emails = list( set(potentially_leaked_emails + potentially_existing_emails)) if not args.quiet: start_time = dt.datetime.now() print( f"\n{start_time}\t{general.emphasis('Step 1/5')}. Trying to determine if any of the following {general.emphasis(str(len(potentially_existing_emails)))} emails exist using emailahoy3...\n{general.emphasis(json.dumps(potentially_existing_emails, indent=2))}\n" ) print( general.emphasis("\tPress <Ctrl + C> to skip this step...\n")) # Perform searches, using different Threads try: results = verify_with_emailahoy_step_1(potentially_existing_emails, num_threads=args.threads) except KeyboardInterrupt: print( general.warning("\tStep 1 manually skipped by the user...\n")) results = [] # Grabbing the <Platform> objects platforms = platform_selection.get_platforms_by_name(args.platforms, mode="mailfy") names = [p.platformName for p in platforms] if not args.quiet: now = dt.datetime.now() print( f"\n{now}\t{general.emphasis('Step 2/5')}. Checking if the emails have been used to register accounts in {general.emphasis(str(len(platforms)))} platforms...\n{general.emphasis(json.dumps(names, indent=2))}\n" ) print( general.emphasis("\tPress <Ctrl + C> to skip this step...\n")) try: registered = process_mail_list_step_2(platforms=platforms, emails=emails) except KeyboardInterrupt: print( general.warning("\tStep 2 manually skipped by the user...\n")) registered = [] results += registered if not args.quiet: if len(results) > 0: for r in registered: print( f"\t[*] Linked account found: {general.success(r['value'])}" ) else: print(f"\t[*] No account found.") now = dt.datetime.now() print( f"\n{now}\t{general.emphasis('Step 3/5')}. Verifying if the provided emails have been leaked somewhere using HaveIBeenPwned.com...\n" ) print( general.emphasis("\tPress <Ctrl + C> to skip this step...\n")) all_keys = config_api_keys.get_list_of_api_keys() try: # Verify the existence of the mails found as leaked emails. for query in potentially_leaked_emails: # Iterate through the different leak platforms leaks = hibp.check_if_email_was_hacked( query, api_key=all_keys["haveibeenpwned_com"]["api_key"]) if len(leaks) > 0: if not args.quiet: print( f"\t[*] '{general.success(query)}' has been found in at least {general.success(len(leaks))} different leaks." ) else: if not args.quiet: print( f"\t[*] '{general.error(query)}' has NOT been found on any leak yet." ) results += leaks except KeyError: # API_Key not found config_path = os.path.join( configuration.get_config_path()["appPath"], "api_keys.cfg") print( "\t[*] " + general.warning("No API found for HaveIBeenPwned") + f". Request one at <https://haveibeenpwned.com/API/Key> and add it to '{config_path}'." ) except KeyboardInterrupt: print( general.warning("\tStep 3 manually skipped by the user...\n")) if not args.quiet: now = dt.datetime.now() print( f"\n{now}\t{general.emphasis('Step 4/5')}. Verifying if the provided emails have been leaked somewhere using Dehashed.com...\n" ) print( general.emphasis("\tPress <Ctrl + C> to skip this step...\n")) try: # Verify the existence of the mails found as leaked emails. for query in emails: try: # Iterate through the different leak platforms leaks = dehashed.check_if_email_was_hacked(query) if len(leaks) > 0: if not args.quiet: print( f"\t[*] '{general.success(query)}' has been found in at least {general.success(len(leaks))} different leaks as shown by Dehashed.com." ) else: if not args.quiet: print( f"\t[*] '{general.error(query)}' has NOT been found on any leak yet." ) results += leaks except Exception as e: print( general.warning( f"Something happened when querying Dehashed.com about '{email}'. Omitting..." )) except KeyboardInterrupt: print( general.warning("\tStep 4 manually skipped by the user...\n")) if not args.quiet: now = dt.datetime.now() print( f"\n{now}\t{general.emphasis('Step 5/5')}. Verifying if the provided emails have registered a domain using ViewDNS.info...\n" ) print( general.emphasis("\tPress <Ctrl + C> to skip this step...\n")) try: # Verify the existence of the mails found as leaked emails. for query in potentially_leaked_emails: try: # Iterate through the different leak platforms domains = viewdns.check_reverse_whois(query) if len(domains) > 0: if not args.quiet: print( f"\t[*] '{general.success(query)}' has registered at least {general.success(len(domains))} different domains as shown by ViewDNS.info." ) else: if not args.quiet: print( f"\t[*] '{general.error(query)}' has NOT registered a domain yet." ) results += domains except Exception as e: print( general.warning( f"Something happened when querying Viewdns.info about '{query}'. Omitting..." )) except KeyboardInterrupt: print( general.warning("\tStep 5 manually skipped by the user...\n")) # Trying to store the information recovered if args.output_folder != None: if not os.path.exists(args.output_folder): os.makedirs(args.output_folder) # Grabbing the results fileHeader = os.path.join(args.output_folder, args.file_header) for ext in args.extension: # Generating output files general.export_usufy(results, ext, fileHeader) # Showing the information gathered if requested if not args.quiet: now = dt.datetime.now() print(f"\n{now}\tResults obtained:\n") print(general.success(general.osrf_to_text_export(results))) now = dt.datetime.now() print( f"\n{now}\tYou can find all the information collected in the following files:" ) for ext in args.extension: # Showing the output files print(general.emphasis("\t" + fileHeader + "." + ext)) # Showing the execution time... if not args.quiet: end_time = dt.datetime.now() print("\n{end_time}\tFinishing execution...\n") print("Total time used:\t" + general.emphasis(str(end_time - start_time))) if not args.quiet: # Urging users to place an issue on Github... print(banner.footer) if params: return results
HERE = os.path.abspath(os.path.dirname(__file__)) # Importing the temporal scripts for the setup and taking the new version number NEW_VERSION = osrframework.__version__ print("[*] OSRFramework > Reading requirements...") with open("requirements.txt") as file: requirements = file.read().splitlines() # Depending on the place in which the project is going to be upgraded def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() # Creating the application paths paths = configuration.get_config_path() print("[*] OSRFramework > Launching the installation of the osrframework module...") # Launching the setup setup( name="osrframework", version=NEW_VERSION, description="OSRFramework - A set of AGPLv3+ OSINT tools developed by i3visio analysts for online research.", author="Felix Brezo and Yaiza Rubio", author_email="*****@*****.**", url="http://github.com/i3visio/osrframework", license="COPYING", keywords="python osint harvesting profiling username socialmedia forums", entry_points={ 'console_scripts': [ 'alias_generator = osrframework.alias_generator:main',