def test_steam(): info = extract( parse('https://steamcommunity.com/id/GabrielSantosMariano/')[0]) assert info.get('uid') == '76561198315585536' assert info.get('username') == 'GabrielSantosMariano' assert info.get('name') == 'Gabriel! Santos, Mariano.'
def test_behance(): info = extract(parse('https://www.behance.net/Skyratov', 'ilo0=1')[0]) assert info.get('uid') == '39065909' assert info.get('username') == 'Skyratov' assert info.get('last_name') == 'Skuratov' assert info.get('first_name') == 'Vasiliy'
def extract_ids_from_page(url, logger, timeout=5) -> dict: results = {} # url, headers reqs: List[Tuple[str, set]] = [(url, set())] try: # temporary workaround for URL mutations MVP from socid_extractor import mutate_url reqs += list(mutate_url(url)) except Exception as e: logger.warning(e) for req in reqs: url, headers = req print(f'Scanning webpage by URL {url}...') page, _ = parse(url, cookies_str='', headers=headers, timeout=timeout) logger.debug(page) info = extract(page) if not info: print('Nothing extracted') else: print(get_dict_ascii_tree(info.items(), new_line=False), ' ') for k, v in info.items(): if 'username' in k: results[v] = 'username' if k in SUPPORTED_IDS: results[v] = k return results
def test_medium(): info = extract(parse('https://medium.com/@lys1n')[0]) assert info.get('uid') == '4894fec6b289' assert info.get('username') == 'lys1n' assert info.get('twitter_username') == 'lys1n' assert info.get('name') == 'Марк Лясин' assert info.get('facebook_uid') == '1726256597385716'
def test_livejournal(): info = extract(parse('https://julia-klay.livejournal.com/')[0]) assert info.get('uid') == '83505610' assert info.get('name') == 'julia_klay' assert info.get('username') == 'julia_klay' assert info.get('is_personal') == 'True' assert info.get('is_community') == 'False'
def test_yandex_realty_offer_cookies(): cookies = open('yandex.test.cookies').read() info = extract( parse('https://realty.yandex.ru/offer/363951114410351104/', cookies)[0]) assert info.get('uid') == '86903473' assert info.get('name') == 'Севостьянова Мария Владимировна'
def get_messenger_info(self) -> dict: url = 'https://yandex.ru/messenger/api/registry/api/' data = {"method": "get_users_data", "params": {"guids": [self.identifier]}} r = requests.post(url, headers=HEADERS, cookies=self.cookies, files={'request': (None, json.dumps(data))}) info = extract(r.text) if info: info['URL'] = f'https://yandex.ru/chat#/user/{self.identifier}' return info
def test_google_documents_cookies(): cookies = open('google.test.cookies').read() info = extract( parse( 'https://docs.google.com/spreadsheets/d/1HtZKMLRXNsZ0HjtBmo0Gi03nUPiJIA4CC4jTYbCAnXw/edit#gid=0', cookies)[0]) assert info.get('org_domain') == 'breakoutcommerce.com' assert info.get('org_name') == 'Gooten'
def get_messenger_search_info(self) -> dict: url = 'https://yandex.ru/messenger/api/registry/api/' data = {"method": "search", "params": {"query": self.identifier, "limit": 10, "entities": ["messages", "users_and_chats"]}} r = requests.post(url, headers=HEADERS, cookies=self.cookies, files={'request': (None, json.dumps(data))}) info = extract(r.text) if info and info.get('yandex_messenger_guid'): info['URL'] = f'https://yandex.ru/chat#/user/{info["yandex_messenger_guid"]}' return info
def test_yandex_znatoki_user_profile(): info = extract( parse( 'https://yandex.ru/znatoki/user/e3795016-b18e-58ba-9112-21c301e53f37/' )[0]) assert info.get('uid') == 'e3795016-b18e-58ba-9112-21c301e53f37' assert info.get('yandex_uid') == '980797984' assert info.get('username') == 'uid-hwcuuacg' assert info.get('name') == 'Настя Рогозинская'
def simple_get_info_request(self, url: str, headers_updates: dict = {}, orig_url: str = None) -> dict: headers = dict(HEADERS) headers.update(headers_updates) r = requests.get(url, headers=headers, cookies=self.cookies) if 'enter_captcha_value' in r.text: info = {'Error': 'Captcha detected'} else: info = extract(r.text) if info: info['URL'] = orig_url or url return info
def test_500px(): info = extract(parse('https://500px.com/the-maksimov')[0]) assert info.get('uid') == '23896' assert info.get('username') == 'The-Maksimov' assert info.get('name') == 'Maxim Maximov' assert info.get('qq_uid') == None assert info.get('fb_uid') == None assert info.get('instagram_username') == 'the.maksimov' assert info.get('twitter_username') == 'The_Maksimov' assert info.get('website') == 'vk.com/id156603747' assert info.get('facebook_page') == 'facebook.com/the.maksimov' assert info.get('facebook_uid') == '100001789363632'
def simple_get_info_request(self, url: str, headers_updates: dict = None, orig_url: str = None) -> dict: headers = dict(HEADERS) headers.update(headers_updates if headers_updates else {}) r = requests.get(url, headers=headers, cookies=self.cookies) if 'enter_captcha_value' in r.text: info = {'Error': 'Captcha detected'} else: try: info = extract(r.text) except Exception as e: print(f'Error for URL {url}: {e}\n') info = {} if info: info['URL'] = orig_url or url return info
async def main(): version_string = '\n'.join([ f'%(prog)s {__version__}', f'Socid-extractor: {socid_version}', f'Aiohttp: {aiohttp.__version__}', f'Requests: {requests.__version__}', f'Python: {platform.python_version()}', ]) parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=f"Maigret v{__version__}") parser.add_argument("--version", action="version", version=version_string, help="Display version information and dependencies.") parser.add_argument("--info", "-vv", action="store_true", dest="info", default=False, help="Display service information.") parser.add_argument("--verbose", "-v", action="store_true", dest="verbose", default=False, help="Display extra information and metrics.") parser.add_argument( "-d", "--debug", "-vvv", action="store_true", dest="debug", default=False, help="Saving debugging information and sites responses in debug.txt.") parser.add_argument( "--site", action="append", metavar='SITE_NAME', dest="site_list", default=[], help= "Limit analysis to just the listed sites (use several times to specify more than one)" ) parser.add_argument( "--proxy", "-p", metavar='PROXY_URL', action="store", dest="proxy", default=None, help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080") parser.add_argument( "--json", "-j", metavar="JSON_FILE", dest="json_file", default=None, help="Load data from a JSON file or an online, valid, JSON file.") parser.add_argument( "--timeout", action="store", metavar='TIMEOUT', dest="timeout", type=timeout_check, default=10, help="Time (in seconds) to wait for response to requests." "Default timeout of 10.0s." "A longer timeout will be more likely to get results from slow sites." "On the other hand, this may cause a long delay to gather all results." ) parser.add_argument("-n", "--max-connections", action="store", type=int, dest="connections", default=100, help="Allowed number of concurrent connections.") parser.add_argument("-a", "--all-sites", action="store_true", dest="all_sites", default=False, help="Use all sites for scan.") parser.add_argument( "--top-sites", action="store", default=500, type=int, help="Count of sites for scan ranked by Alexa Top (default: 500).") parser.add_argument("--print-not-found", action="store_true", dest="print_not_found", default=False, help="Print sites where the username was not found.") parser.add_argument( "--print-errors", action="store_true", dest="print_check_errors", default=False, help= "Print errors messages: connection, captcha, site country ban, etc.") parser.add_argument("--no-color", action="store_true", dest="no_color", default=False, help="Don't color terminal output") parser.add_argument("--browse", "-b", action="store_true", dest="browse", default=False, help="Browse to all results on default bowser.") parser.add_argument( "--no-recursion", action="store_true", dest="disable_recursive_search", default=False, help= "Disable parsing pages for other usernames and recursive search by them." ) parser.add_argument( "--self-check", action="store_true", default=False, help= "Do self check for sites and database and disable non-working ones.") parser.add_argument( "--use-disabled-sites", action="store_true", default=False, help="Use disabled sites to search (may cause many false positives).") parser.add_argument( "--parse", dest="parse_url", default='', help="Parse page by URL and extract username and IDs to use for search." ) parser.add_argument("--id-type", dest="id_type", default='username', help="Specify identifier(s) type (default: username).") parser.add_argument( "username", nargs='+', metavar='USERNAMES', action="store", help="One or more usernames to check with social networks.") parser.add_argument("--tags", dest="tags", default='', help="Specify tags of sites.") # reports options parser.add_argument( "--folderoutput", "-fo", dest="folderoutput", default="reports", help= "If using multiple usernames, the output of the results will be saved to this folder." ) parser.add_argument("-T", "--txt", action="store_true", dest="txt", default=False, help="Create a TXT report (one report per username).") parser.add_argument("-C", "--csv", action="store_true", dest="csv", default=False, help="Create a CSV report (one report per username).") parser.add_argument( "-H", "--html", action="store_true", dest="html", default=False, help="Create an HTML report file (general report on all usernames).") parser.add_argument( "-X", "--xmind", action="store_true", dest="xmind", default=False, help="Generate an XMind 8 mindmap report (one report per username).") parser.add_argument( "-P", "--pdf", action="store_true", dest="pdf", default=False, help="Generate a PDF report (general report on all usernames).") args = parser.parse_args() # Logging log_level = logging.ERROR logging.basicConfig( format= '[%(filename)s:%(lineno)d] %(levelname)-3s %(asctime)s %(message)s', datefmt='%H:%M:%S', level=log_level) if args.debug: log_level = logging.DEBUG elif args.info: log_level = logging.INFO elif args.verbose: log_level = logging.WARNING logger = logging.getLogger('maigret') logger.setLevel(log_level) # Usernames initial list usernames = {u: args.id_type for u in args.username if u not in ['-']} recursive_search_enabled = not args.disable_recursive_search # Make prompts if args.proxy is not None: print("Using the proxy: " + args.proxy) if args.parse_url: page, _ = parse(args.parse_url, cookies_str='') info = extract(page) text = 'Extracted ID data from webpage: ' + ', '.join( [f'{a}: {b}' for a, b in info.items()]) print(text) for k, v in info.items(): if 'username' in k: usernames[v] = 'username' if k in supported_recursive_search_ids: usernames[v] = k if args.tags: args.tags = list(set(str(args.tags).split(','))) if args.json_file is None: args.json_file = \ os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources/data.json" ) if args.top_sites == 0 or args.all_sites: args.top_sites = sys.maxsize # Create object with all information about sites we are aware of. try: db = MaigretDatabase().load_from_file(args.json_file) site_data = db.ranked_sites_dict(top=args.top_sites, tags=args.tags, names=args.site_list) except Exception as error: print(f"ERROR: {error}") sys.exit(1) # Database self-checking if args.self_check: print('Maigret sites database self-checking...') is_need_update = await self_check(db, site_data, logger, max_connections=args.connections) if is_need_update: if input('Do you want to save changes permanently? [yYnN]\n' ).lower() == 'y': db.save_to_file(args.json_file) print('Database was successfully updated.') else: print( 'Updates will be applied only for current search session.') print(db.get_stats(site_data)) # Make reports folder is not exists os.makedirs(args.folderoutput, exist_ok=True) report_path = args.folderoutput # Define one report filename template report_filepath_tpl = os.path.join(args.folderoutput, 'report_{username}{postfix}') # Database consistency enabled_count = len( list(filter(lambda x: not x.disabled, site_data.values()))) print( f'Sites in database, enabled/total: {enabled_count}/{len(site_data)}') if not enabled_count: print('No sites to check, exiting!') sys.exit(2) if usernames == ['-']: # magic params to exit after init print('No usernames to check, exiting.') sys.exit(0) # Create notify object for query results. query_notify = QueryNotifyPrint( result=None, verbose=args.verbose, print_found_only=not args.print_not_found, skip_check_errors=not args.print_check_errors, color=not args.no_color) already_checked = set() general_results = [] while usernames: username, id_type = list(usernames.items())[0] del usernames[username] if username.lower() in already_checked: continue else: already_checked.add(username.lower()) # check for characters do not supported by sites generally found_unsupported_chars = set(unsupported_characters).intersection( set(username)) if found_unsupported_chars: pretty_chars_str = ','.join( map(lambda s: f'"{s}"', found_unsupported_chars)) print( f'Found unsupported URL characters: {pretty_chars_str}, skip search by username "{username}"' ) continue results = await maigret( username, dict(site_data), query_notify, proxy=args.proxy, timeout=args.timeout, recursive_search=recursive_search_enabled, id_type=id_type, debug=args.verbose, logger=logger, forced=args.use_disabled_sites, max_connections=args.connections, ) username_result = (username, id_type, results) general_results.append((username, id_type, results)) # TODO: tests for website_name in results: dictionary = results[website_name] # TODO: fix no site data issue if not dictionary: continue new_usernames = dictionary.get('ids_usernames') if new_usernames: for u, utype in new_usernames.items(): usernames[u] = utype # reporting for a one username if args.xmind: filename = report_filepath_tpl.format(username=username, postfix='.xmind') save_xmind_report(filename, username, results) print(f'XMind report for {username} saved in {filename}') if args.csv: filename = report_filepath_tpl.format(username=username, postfix='.csv') save_csv_report(filename, username, results) print(f'CSV report for {username} saved in {filename}') if args.txt: filename = report_filepath_tpl.format(username=username, postfix='.txt') save_txt_report(filename, username, results) print(f'TXT report for {username} saved in {filename}') # reporting for all the result if general_results: if args.html or args.pdf: print('Generating report info...') report_context = generate_report_context(general_results) # determine main username username = report_context['username'] if args.html: filename = report_filepath_tpl.format(username=username, postfix='.html') save_html_report(filename, report_context) print(f'HTML report on all usernames saved in {filename}') if args.pdf: filename = report_filepath_tpl.format(username=username, postfix='.pdf') save_pdf_report(filename, report_context) print(f'PDF report on all usernames saved in {filename}') # update database db.save_to_file(args.json_file)
async def main(): version_string = f"%(prog)s {__version__}\n" + \ f"{requests.__description__}: {requests.__version__}\n" + \ f"Python: {platform.python_version()}" parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, description=f"{module_name} (Version {__version__})") parser.add_argument("--version", action="version", version=version_string, help="Display version information and dependencies.") parser.add_argument("--verbose", "-v", action="store_true", dest="verbose", default=False, help="Display extra information and metrics.") parser.add_argument( "-d", "--debug", action="store_true", dest="debug", default=False, help="Saving debugging information and sites responses in debug.txt.") parser.add_argument( "--rank", "-r", action="store_true", dest="rank", default=False, help= "Present websites ordered by their Alexa.com global rank in popularity." ) parser.add_argument( "--folderoutput", "-fo", dest="folderoutput", help= "If using multiple usernames, the output of the results will be saved to this folder." ) parser.add_argument( "--output", "-o", dest="output", help= "If using single username, the output of the result will be saved to this file." ) parser.add_argument( "--tor", "-t", action="store_true", dest="tor", default=False, help= "Make requests over Tor; increases runtime; requires Tor to be installed and in system path." ) parser.add_argument( "--unique-tor", "-u", action="store_true", dest="unique_tor", default=False, help= "Make requests over Tor with new Tor circuit after each request; increases runtime; requires Tor to be installed and in system path." ) parser.add_argument("--csv", action="store_true", dest="csv", default=False, help="Create Comma-Separated Values (CSV) File.") parser.add_argument( "--site", action="append", metavar='SITE_NAME', dest="site_list", default=None, help= "Limit analysis to just the listed sites. Add multiple options to specify more than one site." ) parser.add_argument( "--proxy", "-p", metavar='PROXY_URL', action="store", dest="proxy", default=None, help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080") parser.add_argument( "--json", "-j", metavar="JSON_FILE", dest="json_file", default=None, help="Load data from a JSON file or an online, valid, JSON file.") parser.add_argument( "--timeout", action="store", metavar='TIMEOUT', dest="timeout", type=timeout_check, default=10, help="Time (in seconds) to wait for response to requests." "Default timeout of 10.0s." "A longer timeout will be more likely to get results from slow sites." "On the other hand, this may cause a long delay to gather all results." ) parser.add_argument( "--print-found", action="store_true", dest="print_found_only", default=False, help="Do not output sites where the username was not found.") parser.add_argument( "--skip-errors", action="store_true", dest="skip_check_errors", default=False, help= "Do not print errors messages: connection, captcha, site country ban, etc." ) parser.add_argument("--no-color", action="store_true", dest="no_color", default=False, help="Don't color terminal output") parser.add_argument("--browse", "-b", action="store_true", dest="browse", default=False, help="Browse to all results on default bowser.") parser.add_argument( "--ids", "-i", action="store_true", dest="ids_search", default=False, help= "Make scan of pages for other usernames and recursive search by them.") parser.add_argument( "--parse", dest="parse_url", default='', help="Parse page by URL and extract username and IDs to use for search." ) parser.add_argument( "username", nargs='+', metavar='USERNAMES', action="store", help="One or more usernames to check with social networks.") parser.add_argument("--tags", dest="tags", default='', help="Specify tags of sites.") args = parser.parse_args() # Logging log_level = logging.ERROR logging.basicConfig( format= '[%(filename)s:%(lineno)d] %(levelname)-3s %(asctime)s %(message)s', datefmt='%H:%M:%S', level=logging.ERROR) if args.debug: log_level = logging.DEBUG elif args.verbose: log_level = logging.WARNING logger = logging.getLogger('maigret') logger.setLevel(log_level) # Usernames initial list usernames = {u: 'username' for u in args.username if u not in ('-')} # TODO regex check on args.proxy if args.tor and (args.proxy is not None): raise Exception("Tor and Proxy cannot be set at the same time.") # Make prompts if args.proxy is not None: print("Using the proxy: " + args.proxy) if args.tor or args.unique_tor: print("Using Tor to make requests") print( "Warning: some websites might refuse connecting over Tor, so note that using this option might increase connection errors." ) # Check if both output methods are entered as input. if args.output is not None and args.folderoutput is not None: print("You can only use one of the output methods.") sys.exit(1) # Check validity for single username output. if args.output is not None and len(args.username) != 1: print("You can only use --output with a single username") sys.exit(1) if args.parse_url: page, _ = parse(args.parse_url, cookies_str='') info = extract(page) text = 'Extracted ID data from webpage: ' + ', '.join( [f'{a}: {b}' for a, b in info.items()]) print(text) for k, v in info.items(): if 'username' in k: usernames[v] = 'username' if k in supported_recursive_search_ids: usernames[v] = k if args.tags: args.tags = set(args.tags.split(',')) #Create object with all information about sites we are aware of. try: sites = SitesInformation(args.json_file) except Exception as error: print(f"ERROR: {error}") sys.exit(1) #Create original dictionary from SitesInformation() object. #Eventually, the rest of the code will be updated to use the new object #directly, but this will glue the two pieces together. site_data_all = {} for site in sites: site_data_all[site.name] = site.information if args.site_list is None: # Not desired to look at a sub-set of sites site_data = site_data_all else: # User desires to selectively run queries on a sub-set of the site list. # Make sure that the sites are supported & build up pruned site database. site_data = {} site_missing = [] for site in args.site_list: for existing_site in site_data_all: if site.lower() == existing_site.lower(): site_data[existing_site] = site_data_all[existing_site] if not site_data: # Build up list of sites not supported for future error message. site_missing.append(f"'{site}'") if site_missing: print( f"Error: Desired sites not found: {', '.join(site_missing)}.") sys.exit(1) if args.rank: # Sort data by rank site_dataCpy = dict(site_data) ranked_sites = sorted( site_data, key=lambda k: ("rank" not in k, site_data[k].get("rank", sys.maxsize))) site_data = {} for site in ranked_sites: site_data[site] = site_dataCpy.get(site) #Create notify object for query results. query_notify = QueryNotifyPrint(result=None, verbose=args.verbose, print_found_only=args.print_found_only, skip_check_errors=args.skip_check_errors, color=not args.no_color) already_checked = set() while usernames: username, id_type = list(usernames.items())[0] del usernames[username] if username.lower() in already_checked: continue else: already_checked.add(username.lower()) # check for characters do not supported by sites generally found_unsupported_chars = set(unsupported_characters).intersection( set(username)) if found_unsupported_chars: pretty_chars_str = ','.join( map(lambda s: f'"{s}"', found_unsupported_chars)) print( f'Found unsupported URL characters: {pretty_chars_str}, skip search by username "{username}"' ) continue results = await sherlock(username, site_data, query_notify, tor=args.tor, unique_tor=args.unique_tor, proxy=args.proxy, timeout=args.timeout, ids_search=args.ids_search, id_type=id_type, tags=args.tags, debug=args.verbose, logger=logger) if args.output: result_file = args.output elif args.folderoutput: # The usernames results should be stored in a targeted folder. # If the folder doesn't exist, create it first os.makedirs(args.folderoutput, exist_ok=True) result_file = os.path.join(args.folderoutput, f"{username}.txt") else: result_file = f"{username}.txt" with open(result_file, "w", encoding="utf-8") as file: exists_counter = 0 for website_name in results: dictionary = results[website_name] new_usernames = dictionary.get('ids_usernames') if new_usernames: for u, utype in new_usernames.items(): usernames[u] = utype if dictionary.get("status").status == QueryStatus.CLAIMED: exists_counter += 1 file.write(dictionary["url_user"] + "\n") file.write( f"Total Websites Username Detected On : {exists_counter}") if args.csv: with open(username + ".csv", "w", newline='', encoding="utf-8") as csv_report: writer = csv.writer(csv_report) writer.writerow([ 'username', 'name', 'url_main', 'url_user', 'exists', 'http_status', 'response_time_s' ]) for site in results: response_time_s = results[site]['status'].query_time if response_time_s is None: response_time_s = "" writer.writerow([ username, site, results[site]['url_main'], results[site]['url_user'], str(results[site]['status'].status), results[site]['http_status'], response_time_s ])
async def sherlock(username, site_data, query_notify, logger, tor=False, unique_tor=False, proxy=None, timeout=None, ids_search=False, id_type='username', tags=[], debug=False): """Run Sherlock Analysis. Checks for existence of username on various social media sites. Keyword Arguments: username -- String indicating username that report should be created against. site_data -- Dictionary containing all of the site data. query_notify -- Object with base type of QueryNotify(). This will be used to notify the caller about query results. tor -- Boolean indicating whether to use a tor circuit for the requests. unique_tor -- Boolean indicating whether to use a new tor circuit for each request. proxy -- String indicating the proxy URL timeout -- Time in seconds to wait before timing out request. Default is no timeout. ids_search -- Search for other usernames in website pages & recursive search by them. Return Value: Dictionary containing results from report. Key of dictionary is the name of the social network site, and the value is another dictionary with the following keys: url_main: URL of main site. url_user: URL of user on site (if account exists). status: QueryResult() object indicating results of test for account existence. http_status: HTTP status code of query which checked for existence on site. response_text: Text that came back from request. May be None if there was an HTTP error when checking for existence. """ #Notify caller that we are starting the query. query_notify.start(username, id_type) # Create session based on request methodology if tor or unique_tor: #Requests using Tor obfuscation underlying_request = TorRequest() underlying_session = underlying_request.session else: #Normal requests underlying_session = requests.session() underlying_request = requests.Request() #Limit number of workers to 20. #This is probably vastly overkill. if len(site_data) >= 20: max_workers = 20 else: max_workers = len(site_data) # TODO: connector connector = aiohttp.TCPConnector(ssl=False) session = aiohttp.ClientSession(connector=connector) # Results from analysis of all sites results_total = {} # First create futures for all requests. This allows for the requests to run in parallel for social_network, net_info in site_data.items(): # print(id_type) # print(social_network) if net_info.get('type', 'username') != id_type: continue site_tags = set(net_info.get('tags', [])) if tags: if not tags.intersection(site_tags): continue if 'disabled' in net_info and net_info['disabled']: continue # Results from analysis of this specific site results_site = {} # Record URL of main site results_site['url_main'] = net_info.get("urlMain") # A user agent is needed because some sites don't return the correct # information since they think that we are bots (Which we actually are...) headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11.1; rv:55.0) Gecko/20100101 Firefox/55.0', } if "headers" in net_info: # Override/append any extra headers required by a given site. headers.update(net_info["headers"]) # URL of user on site (if it exists) url = net_info.get('url').format(username) # Don't make request if username is invalid for the site regex_check = net_info.get("regexCheck") if regex_check and re.search(regex_check, username) is None: # No need to do the check at the site: this user name is not allowed. results_site['status'] = QueryResult(username, social_network, url, QueryStatus.ILLEGAL) results_site["url_user"] = "" results_site['http_status'] = "" results_site['response_text'] = "" query_notify.update(results_site['status']) else: # URL of user on site (if it exists) results_site["url_user"] = url url_probe = net_info.get("urlProbe") if url_probe is None: # Probe URL is normal one seen by people out on the web. url_probe = url else: # There is a special URL for probing existence separate # from where the user profile normally can be found. url_probe = url_probe.format(username) if (net_info["errorType"] == 'status_code' and net_info.get("request_head_only", True) == True): #In most cases when we are detecting by status code, #it is not necessary to get the entire body: we can #detect fine with just the HEAD response. request_method = session.head else: #Either this detect method needs the content associated #with the GET response, or this specific website will #not respond properly unless we request the whole page. request_method = session.get if net_info["errorType"] == "response_url": # Site forwards request to a different URL if username not # found. Disallow the redirect so we can capture the # http status from the original URL request. allow_redirects = False else: # Allow whatever redirect that the site wants to do. # The final result of the request will be what is available. allow_redirects = True def parse_cookies(cookies_str): cookies = SimpleCookie() cookies.load(cookies_str) return {key: morsel.value for key, morsel in cookies.items()} if os.path.exists(cookies_file): cookies_obj = cookielib.MozillaCookieJar(cookies_file) cookies_obj.load(ignore_discard=True, ignore_expires=True) else: cookies_obj = [] # This future starts running the request in a new thread, doesn't block the main thread if proxy is not None: proxies = {"http": proxy, "https": proxy} future = request_method( url=url_probe, headers=headers, proxies=proxies, allow_redirects=allow_redirects, timeout=timeout, ) else: future = request_method( url=url_probe, headers=headers, allow_redirects=allow_redirects, timeout=timeout, ) # Store future in data for access later net_info["request_future"] = future # Reset identify for tor (if needed) if unique_tor: underlying_request.reset_identity() # Add this site's results into final dictionary with all of the other results. results_total[social_network] = results_site # TODO: move into top-level function async def update_site_data_from_response(site, site_info): future = site_info.get('request_future') if not future: # ignore: search by incompatible id type return error_type = site_info['errorType'] site_data[site]['resp'] = await get_response(request_future=future, error_type=error_type, social_network=site, logger=logger) tasks = [] for social_network, net_info in site_data.items(): future = asyncio.ensure_future( update_site_data_from_response(social_network, net_info)) tasks.append(future) await asyncio.gather(*tasks) await session.close() # TODO: split to separate functions for social_network, net_info in site_data.items(): # Retrieve results again results_site = results_total.get(social_network) if not results_site: continue # Retrieve other site information again url = results_site.get("url_user") logger.debug(url) status = results_site.get("status") if status is not None: # We have already determined the user doesn't exist here continue # Get the expected error type error_type = net_info["errorType"] # Get the failure messages and comments failure_errors = net_info.get("errors", {}) # TODO: refactor resp = net_info.get('resp') if not resp: logger.error(f'No response for {social_network}') continue html_text, status_code, error_text, expection_text = resp # TODO: add elapsed request time counting response_time = None if debug: with open('debug.txt', 'a') as f: status = status_code or 'No response' f.write(f'url: {url}\nerror: {str(error_text)}\nr: {status}\n') if html_text: f.write(f'code: {status}\nresponse: {str(html_text)}\n') # TODO: move info separate module def detect_error_page(html_text, status_code, fail_flags, ignore_403): # Detect service restrictions such as a country restriction for flag, msg in fail_flags.items(): if flag in html_text: return 'Some site error', msg # Detect common restrictions such as provider censorship and bot protection for flag, msg in common_errors.items(): if flag in html_text: return 'Error', msg # Detect common site errors if status_code == 403 and not ignore_403: return 'Access denied', 'Access denied, use proxy/vpn' elif status_code >= 500: return f'Error {status_code}', f'Site error {status_code}' return None, None if status_code and not error_text: error_text, site_error_text = detect_error_page( html_text, status_code, failure_errors, 'ignore_403' in net_info) if error_text is not None: result = QueryResult(username, social_network, url, QueryStatus.UNKNOWN, query_time=response_time, context=error_text) elif error_type == "message": absence_flags = net_info.get("errorMsg") is_absence_flags_list = isinstance(absence_flags, list) absence_flags_set = set( absence_flags) if is_absence_flags_list else set( {absence_flags}) # Checks if the error message is in the HTML is_absence_detected = any([(absence_flag in html_text) for absence_flag in absence_flags_set]) if not is_absence_detected: result = QueryResult(username, social_network, url, QueryStatus.CLAIMED, query_time=response_time) else: result = QueryResult(username, social_network, url, QueryStatus.AVAILABLE, query_time=response_time) elif error_type == "status_code": # Checks if the status code of the response is 2XX if not status_code >= 300 or status_code < 200: result = QueryResult(username, social_network, url, QueryStatus.CLAIMED, query_time=response_time) else: result = QueryResult(username, social_network, url, QueryStatus.AVAILABLE, query_time=response_time) elif error_type == "response_url": # For this detection method, we have turned off the redirect. # So, there is no need to check the response URL: it will always # match the request. Instead, we will ensure that the response # code indicates that the request was successful (i.e. no 404, or # forward to some odd redirect). if 200 <= status_code < 300: result = QueryResult(username, social_network, url, QueryStatus.CLAIMED, query_time=response_time) else: result = QueryResult(username, social_network, url, QueryStatus.AVAILABLE, query_time=response_time) else: #It should be impossible to ever get here... raise ValueError(f"Unknown Error Type '{error_type}' for " f"site '{social_network}'") extracted_ids_data = '' if ids_search and result.status == QueryStatus.CLAIMED: try: extracted_ids_data = extract(html_text) except Exception as e: logger.warning(f'Error while parsing {social_network}: {e}', exc_info=True) if extracted_ids_data: new_usernames = {} for k, v in extracted_ids_data.items(): if 'username' in k: new_usernames[v] = 'username' if k in supported_recursive_search_ids: new_usernames[v] = k results_site['ids_usernames'] = new_usernames result.ids_data = extracted_ids_data #Notify caller about results of query. query_notify.update(result) # Save status of request results_site['status'] = result # Save results from request results_site['http_status'] = status_code results_site['response_text'] = html_text # Add this site's results into final dictionary with all of the other results. results_total[social_network] = results_site #Notify caller that all queries are finished. query_notify.finish() return results_total
def test_facebook_user_profile(): info = extract(parse('https://ru-ru.facebook.com/anatolijsharij/')[0]) assert info.get('uid') == '1486042157' assert info.get('username') == 'anatolijsharij'
def process_site_result(response, query_notify, logger, results_info: QueryResultWrapper, site: MaigretSite): if not response: return results_info fulltags = site.tags # Retrieve other site information again username = results_info["username"] is_parsing_enabled = results_info["parsing_enabled"] url = results_info.get("url_user") logger.info(url) status = results_info.get("status") if status is not None: # We have already determined the user doesn't exist here return results_info # Get the expected check type check_type = site.check_type # TODO: refactor if not response: logger.error(f"No response for {site.name}") return results_info html_text, status_code, check_error = response # TODO: add elapsed request time counting response_time = None if logger.level == logging.DEBUG: debug_response_logging(url, html_text, status_code, check_error) # additional check for errors if status_code and not check_error: check_error = detect_error_page(html_text, status_code, site.errors_dict, site.ignore403) # parsing activation is_need_activation = any( [s for s in site.activation.get("marks", []) if s in html_text]) if site.activation and html_text and is_need_activation: method = site.activation["method"] try: activate_fun = getattr(ParsingActivator(), method) # TODO: async call activate_fun(site, logger) except AttributeError: logger.warning( f"Activation method {method} for site {site.name} not found!") except Exception as e: logger.warning( f"Failed activation {method} for site {site.name}: {str(e)}", exc_info=True, ) # TODO: temporary check error site_name = site.pretty_name # presense flags # True by default presense_flags = site.presense_strs is_presense_detected = False if html_text: if not presense_flags: is_presense_detected = True site.stats["presense_flag"] = None else: for presense_flag in presense_flags: if presense_flag in html_text: is_presense_detected = True site.stats["presense_flag"] = presense_flag logger.debug(presense_flag) break def build_result(status, **kwargs): return QueryResult( username, site_name, url, status, query_time=response_time, tags=fulltags, **kwargs, ) if check_error: logger.warning(check_error) result = QueryResult( username, site_name, url, QueryStatus.UNKNOWN, query_time=response_time, error=check_error, context=str(CheckError), tags=fulltags, ) elif check_type == "message": # Checks if the error message is in the HTML is_absence_detected = any([(absence_flag in html_text) for absence_flag in site.absence_strs]) if not is_absence_detected and is_presense_detected: result = build_result(QueryStatus.CLAIMED) else: result = build_result(QueryStatus.AVAILABLE) elif check_type in "status_code": # Checks if the status code of the response is 2XX if 200 <= status_code < 300: result = build_result(QueryStatus.CLAIMED) else: result = build_result(QueryStatus.AVAILABLE) elif check_type == "response_url": # For this detection method, we have turned off the redirect. # So, there is no need to check the response URL: it will always # match the request. Instead, we will ensure that the response # code indicates that the request was successful (i.e. no 404, or # forward to some odd redirect). if 200 <= status_code < 300 and is_presense_detected: result = build_result(QueryStatus.CLAIMED) else: result = build_result(QueryStatus.AVAILABLE) else: # It should be impossible to ever get here... raise ValueError(f"Unknown check type '{check_type}' for " f"site '{site.name}'") extracted_ids_data = {} if is_parsing_enabled and result.status == QueryStatus.CLAIMED: try: extracted_ids_data = extract(html_text) except Exception as e: logger.warning(f"Error while parsing {site.name}: {e}", exc_info=True) if extracted_ids_data: new_usernames = {} for k, v in extracted_ids_data.items(): if "username" in k: new_usernames[v] = "username" if k in SUPPORTED_IDS: new_usernames[v] = k results_info["ids_usernames"] = new_usernames links = ascii_data_display(extracted_ids_data.get("links", "[]")) if "website" in extracted_ids_data: links.append(extracted_ids_data["website"]) results_info["ids_links"] = links result.ids_data = extracted_ids_data # Save status of request results_info["status"] = result # Save results from request results_info["http_status"] = status_code results_info["is_similar"] = site.similar_search # results_site['response_text'] = html_text results_info["rank"] = site.alexa_rank return results_info
def sherlock(username, site_data, query_notify, tor=False, unique_tor=False, proxy=None, timeout=None, ids_search=False, id_type='username', tags=[]): """Run Sherlock Analysis. Checks for existence of username on various social media sites. Keyword Arguments: username -- String indicating username that report should be created against. site_data -- Dictionary containing all of the site data. query_notify -- Object with base type of QueryNotify(). This will be used to notify the caller about query results. tor -- Boolean indicating whether to use a tor circuit for the requests. unique_tor -- Boolean indicating whether to use a new tor circuit for each request. proxy -- String indicating the proxy URL timeout -- Time in seconds to wait before timing out request. Default is no timeout. ids_search -- Search for other usernames in website pages & recursive search by them. Return Value: Dictionary containing results from report. Key of dictionary is the name of the social network site, and the value is another dictionary with the following keys: url_main: URL of main site. url_user: URL of user on site (if account exists). status: QueryResult() object indicating results of test for account existence. http_status: HTTP status code of query which checked for existence on site. response_text: Text that came back from request. May be None if there was an HTTP error when checking for existence. """ #Notify caller that we are starting the query. query_notify.start(username, id_type) # Create session based on request methodology if tor or unique_tor: #Requests using Tor obfuscation underlying_request = TorRequest() underlying_session = underlying_request.session else: #Normal requests underlying_session = requests.session() underlying_request = requests.Request() #Limit number of workers to 20. #This is probably vastly overkill. if len(site_data) >= 20: max_workers = 20 else: max_workers = len(site_data) #Create multi-threaded session for all requests. session = SherlockFuturesSession(max_workers=max_workers, session=underlying_session) # Results from analysis of all sites results_total = {} # First create futures for all requests. This allows for the requests to run in parallel for social_network, net_info in site_data.items(): # print(id_type) # print(social_network) if net_info.get('type', 'username') != id_type: continue site_tags = set(net_info.get('tags', [])) if tags: if not tags.intersection(site_tags): continue # Results from analysis of this specific site results_site = {} # Record URL of main site results_site['url_main'] = net_info.get("urlMain") # A user agent is needed because some sites don't return the correct # information since they think that we are bots (Which we actually are...) headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11.1; rv:55.0) Gecko/20100101 Firefox/55.0', } if "headers" in net_info: # Override/append any extra headers required by a given site. headers.update(net_info["headers"]) # URL of user on site (if it exists) url = net_info.get('url').format(username) # Don't make request if username is invalid for the site regex_check = net_info.get("regexCheck") if regex_check and re.search(regex_check, username) is None: # No need to do the check at the site: this user name is not allowed. results_site['status'] = QueryResult(username, social_network, url, QueryStatus.ILLEGAL) results_site["url_user"] = "" results_site['http_status'] = "" results_site['response_text'] = "" query_notify.update(results_site['status']) else: # URL of user on site (if it exists) results_site["url_user"] = url url_probe = net_info.get("urlProbe") if url_probe is None: # Probe URL is normal one seen by people out on the web. url_probe = url else: # There is a special URL for probing existence separate # from where the user profile normally can be found. url_probe = url_probe.format(username) if (net_info["errorType"] == 'status_code' and net_info.get("request_head_only", True) == True): #In most cases when we are detecting by status code, #it is not necessary to get the entire body: we can #detect fine with just the HEAD response. request_method = session.head else: #Either this detect method needs the content associated #with the GET response, or this specific website will #not respond properly unless we request the whole page. request_method = session.get if net_info["errorType"] == "response_url": # Site forwards request to a different URL if username not # found. Disallow the redirect so we can capture the # http status from the original URL request. allow_redirects = False else: # Allow whatever redirect that the site wants to do. # The final result of the request will be what is available. allow_redirects = True def parse_cookies(cookies_str): cookies = SimpleCookie() cookies.load(cookies_str) return {key: morsel.value for key, morsel in cookies.items()} # cookies_str = 'collections_gid=117041; cph=948; cpw=550; yandexuid=6894705951593339704; i=NJAxWCDEQdhKbNGBppYN/5sl4XuX2Lq/lgZELKOVfjfX3boBnqOMyP0s0MSwcBbeuqPaRqjWPrsSXORVLDlLJ7Qi+RI=; font_loaded=YSv1; yuidss=6894705951593339704; ymex=1908699704.yrts.1593339704; _ym_wasSynced=%7B%22time%22%3A1593339704889%2C%22params%22%3A%7B%22eu%22%3A0%7D%2C%22bkParams%22%3A%7B%7D%7D; gdpr=0; _ym_uid=1593339705197323602; _ym_d=1593339705; mda=0; _ym_isad=2; ar=1593339710541646-252043; _ym_visorc_10630330=b; spravka=dD0xNTkzMzM5NzIyO2k9NS4yMjguMjI0LjM3O3U9MTU5MzMzOTcyMjM2MDI4NTg0MjtoPWM3NThkYjU0MzYyMzViZDEwMzU3ZGY3NTUwYzViNDE1' cookies_str = '' if 'yandex' in url_probe: # import logging # logging.error(cookies_str) cookies = parse_cookies(cookies_str) else: cookies = None # This future starts running the request in a new thread, doesn't block the main thread if proxy is not None: proxies = {"http": proxy, "https": proxy} future = request_method( url=url_probe, headers=headers, proxies=proxies, allow_redirects=allow_redirects, timeout=timeout, # cookies=cookies ) else: future = request_method( url=url_probe, headers=headers, allow_redirects=allow_redirects, timeout=timeout, # cookies=cookies ) # Store future in data for access later net_info["request_future"] = future # Reset identify for tor (if needed) if unique_tor: underlying_request.reset_identity() # Add this site's results into final dictionary with all of the other results. results_total[social_network] = results_site # Open the file containing account links # Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses for social_network, net_info in site_data.items(): # Retrieve results again results_site = results_total.get(social_network) if not results_site: continue # Retrieve other site information again url = results_site.get("url_user") status = results_site.get("status") if status is not None: # We have already determined the user doesn't exist here continue # Get the expected error type error_type = net_info["errorType"] # Get the failure messages and comments failure_errors = net_info.get("errors", {}) # Retrieve future and ensure it has finished future = net_info["request_future"] r, error_text, expection_text = get_response( request_future=future, error_type=error_type, social_network=social_network) #Get response time for response of our request. try: response_time = r.elapsed except AttributeError: response_time = None # Attempt to get request information try: http_status = r.status_code except: http_status = "?" try: response_text = r.text.encode(r.encoding) # Extract IDs data from page except: response_text = "" # Detect failures such as a country restriction for text, comment in failure_errors.items(): if r.text and text in r.text: error_context = "Some error" error_text = comment break # workaround for 403 empty page if not r is None and r.status_code == 403: error_context = "Access denied" error_text = "Access denied, use proxy/vpn" # TODO: return error for captcha and some specific cases (CashMe) # make all result invalid extracted_ids_data = "" if ids_search and r: # print(r.text) extracted_ids_data = extract(r.text) if extracted_ids_data: new_usernames = {} for k, v in extracted_ids_data.items(): if 'username' in k: new_usernames[v] = 'username' if k in ('yandex_public_id', 'wikimapia_uid', 'gaia_id'): new_usernames[v] = k results_site['ids_usernames'] = new_usernames if error_text is not None: result = QueryResult(username, social_network, url, QueryStatus.UNKNOWN, ids_data=extracted_ids_data, query_time=response_time, context=error_text) elif error_type == "message": error = net_info.get("errorMsg") errors_set = set(error) if type(error) == list else set({error}) # Checks if the error message is in the HTML error_found = any([(err in r.text) for err in errors_set]) if not error_found: result = QueryResult(username, social_network, url, QueryStatus.CLAIMED, ids_data=extracted_ids_data, query_time=response_time) else: result = QueryResult(username, social_network, url, QueryStatus.AVAILABLE, ids_data=extracted_ids_data, query_time=response_time) elif error_type == "status_code": # Checks if the status code of the response is 2XX if not r.status_code >= 300 or r.status_code < 200: result = QueryResult(username, social_network, url, QueryStatus.CLAIMED, ids_data=extracted_ids_data, query_time=response_time) else: result = QueryResult(username, social_network, url, QueryStatus.AVAILABLE, ids_data=extracted_ids_data, query_time=response_time) elif error_type == "response_url": # For this detection method, we have turned off the redirect. # So, there is no need to check the response URL: it will always # match the request. Instead, we will ensure that the response # code indicates that the request was successful (i.e. no 404, or # forward to some odd redirect). if 200 <= r.status_code < 300: result = QueryResult(username, social_network, url, QueryStatus.CLAIMED, ids_data=extracted_ids_data, query_time=response_time) else: result = QueryResult(username, social_network, url, QueryStatus.AVAILABLE, ids_data=extracted_ids_data, query_time=response_time) else: #It should be impossible to ever get here... raise ValueError(f"Unknown Error Type '{error_type}' for " f"site '{social_network}'") #Notify caller about results of query. query_notify.update(result) # Save status of request results_site['status'] = result # Save results from request results_site['http_status'] = http_status results_site['response_text'] = response_text # Add this site's results into final dictionary with all of the other results. results_total[social_network] = results_site #Notify caller that all queries are finished. query_notify.finish() return results_total
def test_habr(): info = extract(parse('https://habr.com/ru/users/m1rko/')[0]) assert info.get('uid') == '1371978' assert info.get('username') == 'm1rko'
def process_site_result(response, query_notify, logger, results_info, net_info, social_network): if not response: return results_info fulltags = [] if ("tags" in net_info.keys()): fulltags = net_info["tags"] # Retrieve other site information again username = results_info['username'] is_parsing_enabled = results_info['parsing_enabled'] url = results_info.get("url_user") logger.debug(url) status = results_info.get("status") if status is not None: # We have already determined the user doesn't exist here return results_info # Get the expected error type error_type = net_info["errorType"] # Get the failure messages and comments failure_errors = net_info.get("errors", {}) # TODO: refactor if not response: logger.error(f'No response for {social_network}') return results_info html_text, status_code, error_text, expection_text = response site_error_text = '?' # TODO: add elapsed request time counting response_time = None if logger.level == logging.DEBUG: with open('debug.txt', 'a') as f: status = status_code or 'No response' f.write(f'url: {url}\nerror: {str(error_text)}\nr: {status}\n') if html_text: f.write(f'code: {status}\nresponse: {str(html_text)}\n') if status_code and not error_text: error_text, site_error_text = detect_error_page( html_text, status_code, failure_errors, 'ignore_403' in net_info) # presense flags # True by default presense_flags = net_info.get("presenseStrs", []) is_presense_detected = html_text and all([ (presense_flag in html_text) for presense_flag in presense_flags ]) or not presense_flags if error_text is not None: logger.debug(error_text) result = QueryResult(username, social_network, url, QueryStatus.UNKNOWN, query_time=response_time, context=f'{error_text}: {site_error_text}', tags=fulltags) elif error_type == "message": absence_flags = net_info.get("errorMsg") is_absence_flags_list = isinstance(absence_flags, list) absence_flags_set = set(absence_flags) if is_absence_flags_list else { absence_flags } # Checks if the error message is in the HTML is_absence_detected = any([(absence_flag in html_text) for absence_flag in absence_flags_set]) if not is_absence_detected and is_presense_detected: result = QueryResult(username, social_network, url, QueryStatus.CLAIMED, query_time=response_time, tags=fulltags) else: result = QueryResult(username, social_network, url, QueryStatus.AVAILABLE, query_time=response_time, tags=fulltags) elif error_type == "status_code": # Checks if the status code of the response is 2XX if (not status_code >= 300 or status_code < 200) and is_presense_detected: result = QueryResult(username, social_network, url, QueryStatus.CLAIMED, query_time=response_time, tags=fulltags) else: result = QueryResult(username, social_network, url, QueryStatus.AVAILABLE, query_time=response_time, tags=fulltags) elif error_type == "response_url": # For this detection method, we have turned off the redirect. # So, there is no need to check the response URL: it will always # match the request. Instead, we will ensure that the response # code indicates that the request was successful (i.e. no 404, or # forward to some odd redirect). if 200 <= status_code < 300 and is_presense_detected: result = QueryResult(username, social_network, url, QueryStatus.CLAIMED, query_time=response_time, tags=fulltags) else: result = QueryResult(username, social_network, url, QueryStatus.AVAILABLE, query_time=response_time, tags=fulltags) else: # It should be impossible to ever get here... raise ValueError(f"Unknown Error Type '{error_type}' for " f"site '{social_network}'") extracted_ids_data = {} if is_parsing_enabled and result.status == QueryStatus.CLAIMED: try: extracted_ids_data = extract(html_text) except Exception as e: logger.warning(f'Error while parsing {social_network}: {e}', exc_info=True) if extracted_ids_data: new_usernames = {} for k, v in extracted_ids_data.items(): if 'username' in k: new_usernames[v] = 'username' if k in supported_recursive_search_ids: new_usernames[v] = k results_info['ids_usernames'] = new_usernames result.ids_data = extracted_ids_data is_similar = net_info.get('similarSearch', False) # Notify caller about results of query. query_notify.update(result, is_similar) # Save status of request results_info['status'] = result # Save results from request results_info['http_status'] = status_code results_info['is_similar'] = is_similar # results_site['response_text'] = html_text results_info['rank'] = net_info.get('rank', 0) return results_info
def test_twitter(): info = extract(parse('https://twitter.com/esquireru')[0]) assert info.get('uid') == '163060799' assert info.get('username') == 'Esquire Russia' assert info.get('name') == 'esquireru'
def test_github(): info = extract(parse('https://github.com/soxoj')[0]) assert info.get('uid') == '31013580' assert info.get('username') == 'soxoj'
def test_facebook_group(): info = extract(parse('https://www.facebook.com/discordapp/')[0]) assert info.get('uid') == '858412104226521' assert info.get('username') == 'discordapp'
async def main(): version_string = '\n'.join([ f'%(prog)s {__version__}', f'Socid-extractor: {socid_version}', f'Aiohttp: {aiohttp.__version__}', f'Requests: {requests.__version__}', f'Python: {platform.python_version()}', ]) parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=f"Maigret v{__version__}") parser.add_argument("--version", action="version", version=version_string, help="Display version information and dependencies.") parser.add_argument("--info", "-vv", action="store_true", dest="info", default=False, help="Display service information.") parser.add_argument("--verbose", "-v", action="store_true", dest="verbose", default=False, help="Display extra information and metrics.") parser.add_argument( "-d", "--debug", "-vvv", action="store_true", dest="debug", default=False, help="Saving debugging information and sites responses in debug.txt.") parser.add_argument( "--site", action="append", metavar='SITE_NAME', dest="site_list", default=[], help= "Limit analysis to just the listed sites (use several times to specify more than one)" ) parser.add_argument( "--proxy", "-p", metavar='PROXY_URL', action="store", dest="proxy", default=None, help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080") parser.add_argument( "--db", metavar="DB_FILE", dest="db_file", default=None, help= "Load Maigret database from a JSON file or an online, valid, JSON file." ) parser.add_argument("--cookies-jar-file", metavar="COOKIE_FILE", dest="cookie_file", default=None, help="File with cookies.") parser.add_argument( "--timeout", action="store", metavar='TIMEOUT', dest="timeout", type=timeout_check, default=10, help="Time (in seconds) to wait for response to requests." "Default timeout of 10.0s. " "A longer timeout will be more likely to get results from slow sites." "On the other hand, this may cause a long delay to gather all results." ) parser.add_argument("-n", "--max-connections", action="store", type=int, dest="connections", default=100, help="Allowed number of concurrent connections.") parser.add_argument("-a", "--all-sites", action="store_true", dest="all_sites", default=False, help="Use all sites for scan.") parser.add_argument( "--top-sites", action="store", default=500, type=int, help="Count of sites for scan ranked by Alexa Top (default: 500).") parser.add_argument("--print-not-found", action="store_true", dest="print_not_found", default=False, help="Print sites where the username was not found.") parser.add_argument( "--print-errors", action="store_true", dest="print_check_errors", default=False, help= "Print errors messages: connection, captcha, site country ban, etc.") parser.add_argument("--submit", metavar='EXISTING_USER_URL', type=str, dest="new_site_to_submit", default=False, help="URL of existing profile in new site to submit.") parser.add_argument("--no-color", action="store_true", dest="no_color", default=False, help="Don't color terminal output") parser.add_argument("--no-progressbar", action="store_true", dest="no_progressbar", default=False, help="Don't show progressbar.") parser.add_argument("--browse", "-b", action="store_true", dest="browse", default=False, help="Browse to all results on default bowser.") parser.add_argument( "--no-recursion", action="store_true", dest="disable_recursive_search", default=False, help="Disable recursive search by additional data extracted from pages." ) parser.add_argument( "--no-extracting", action="store_true", dest="disable_extracting", default=False, help="Disable parsing pages for additional data and other usernames.") parser.add_argument( "--self-check", action="store_true", default=False, help= "Do self check for sites and database and disable non-working ones.") parser.add_argument("--stats", action="store_true", default=False, help="Show database statistics.") parser.add_argument( "--use-disabled-sites", action="store_true", default=False, help="Use disabled sites to search (may cause many false positives).") parser.add_argument( "--parse", dest="parse_url", default='', help="Parse page by URL and extract username and IDs to use for search." ) parser.add_argument("--id-type", dest="id_type", default='username', help="Specify identifier(s) type (default: username).") parser.add_argument( "--ignore-ids", action="append", metavar='IGNORED_IDS', dest="ignore_ids_list", default=[], help="Do not make search by the specified username or other ids.") parser.add_argument( "username", nargs='+', metavar='USERNAMES', action="store", help="One or more usernames to check with social networks.") parser.add_argument("--tags", dest="tags", default='', help="Specify tags of sites.") # reports options parser.add_argument( "--folderoutput", "-fo", dest="folderoutput", default="reports", help= "If using multiple usernames, the output of the results will be saved to this folder." ) parser.add_argument("-T", "--txt", action="store_true", dest="txt", default=False, help="Create a TXT report (one report per username).") parser.add_argument("-C", "--csv", action="store_true", dest="csv", default=False, help="Create a CSV report (one report per username).") parser.add_argument( "-H", "--html", action="store_true", dest="html", default=False, help="Create an HTML report file (general report on all usernames).") parser.add_argument( "-X", "--xmind", action="store_true", dest="xmind", default=False, help="Generate an XMind 8 mindmap report (one report per username).") parser.add_argument( "-P", "--pdf", action="store_true", dest="pdf", default=False, help="Generate a PDF report (general report on all usernames).") parser.add_argument( "-J", "--json", action="store", metavar='REPORT_TYPE', dest="json", default='', type=check_supported_json_format, help= f"Generate a JSON report of specific type: {', '.join(SUPPORTED_JSON_REPORT_FORMATS)}" " (one report per username).") args = parser.parse_args() # Logging log_level = logging.ERROR logging.basicConfig( format= '[%(filename)s:%(lineno)d] %(levelname)-3s %(asctime)s %(message)s', datefmt='%H:%M:%S', level=log_level) if args.debug: log_level = logging.DEBUG elif args.info: log_level = logging.INFO elif args.verbose: log_level = logging.WARNING logger = logging.getLogger('maigret') logger.setLevel(log_level) # Usernames initial list usernames = { u: args.id_type for u in args.username if u not in ['-'] and u not in args.ignore_ids_list } parsing_enabled = not args.disable_extracting recursive_search_enabled = not args.disable_recursive_search # Make prompts if args.proxy is not None: print("Using the proxy: " + args.proxy) if args.parse_url: # url, headers reqs = [(args.parse_url, set())] try: # temporary workaround for URL mutations MVP from socid_extractor import mutate_url reqs += list(mutate_url(args.parse_url)) except: pass for req in reqs: url, headers = req print(f'Scanning webpage by URL {url}...') page, _ = parse(url, cookies_str='', headers=headers) info = extract(page) if not info: print('Nothing extracted') else: print(get_dict_ascii_tree(info.items(), new_line=False), ' ') for k, v in info.items(): if 'username' in k: usernames[v] = 'username' if k in supported_recursive_search_ids: usernames[v] = k if args.tags: args.tags = list(set(str(args.tags).split(','))) if args.db_file is None: args.db_file = \ os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources/data.json" ) if args.top_sites == 0 or args.all_sites: args.top_sites = sys.maxsize # Create notify object for query results. query_notify = QueryNotifyPrint( result=None, verbose=args.verbose, print_found_only=not args.print_not_found, skip_check_errors=not args.print_check_errors, color=not args.no_color) # Create object with all information about sites we are aware of. db = MaigretDatabase().load_from_file(args.db_file) get_top_sites_for_id = lambda x: db.ranked_sites_dict(top=args.top_sites, tags=args.tags, names=args.site_list, disabled=False, id_type=x) site_data = get_top_sites_for_id(args.id_type) if args.new_site_to_submit: is_submitted = await submit_dialog(db, args.new_site_to_submit, args.cookie_file) if is_submitted: db.save_to_file(args.db_file) # Database self-checking if args.self_check: print('Maigret sites database self-checking...') is_need_update = await self_check(db, site_data, logger, max_connections=args.connections) if is_need_update: if input('Do you want to save changes permanently? [Yn]\n').lower( ) == 'y': db.save_to_file(args.db_file) print('Database was successfully updated.') else: print( 'Updates will be applied only for current search session.') print(db.get_scan_stats(site_data)) if args.stats: print(db.get_db_stats(db.sites_dict)) # Make reports folder is not exists os.makedirs(args.folderoutput, exist_ok=True) # Define one report filename template report_filepath_tpl = os.path.join(args.folderoutput, 'report_{username}{postfix}') # Database stats # TODO: verbose info about filtered sites # enabled_count = len(list(filter(lambda x: not x.disabled, site_data.values()))) # print(f'Sites in database, enabled/total: {enabled_count}/{len(site_data)}') if usernames == {}: # magic params to exit after init query_notify.warning('No usernames to check, exiting.') sys.exit(0) if not site_data: query_notify.warning('No sites to check, exiting!') sys.exit(2) else: query_notify.warning( f'Starting a search on top {len(site_data)} sites from the Maigret database...' ) if not args.all_sites: query_notify.warning( f'You can run search by full list of sites with flag `-a`', '!') already_checked = set() general_results = [] while usernames: username, id_type = list(usernames.items())[0] del usernames[username] if username.lower() in already_checked: continue else: already_checked.add(username.lower()) if username in args.ignore_ids_list: query_notify.warning( f'Skip a search by username {username} cause it\'s marked as ignored.' ) continue # check for characters do not supported by sites generally found_unsupported_chars = set(unsupported_characters).intersection( set(username)) if found_unsupported_chars: pretty_chars_str = ','.join( map(lambda s: f'"{s}"', found_unsupported_chars)) query_notify.warning( f'Found unsupported URL characters: {pretty_chars_str}, skip search by username "{username}"' ) continue sites_to_check = get_top_sites_for_id(id_type) results = await maigret( username=username, site_dict=dict(sites_to_check), query_notify=query_notify, proxy=args.proxy, timeout=args.timeout, is_parsing_enabled=parsing_enabled, id_type=id_type, debug=args.verbose, logger=logger, cookies=args.cookie_file, forced=args.use_disabled_sites, max_connections=args.connections, no_progressbar=args.no_progressbar, ) general_results.append((username, id_type, results)) # TODO: tests for website_name in results: dictionary = results[website_name] # TODO: fix no site data issue if not dictionary or not recursive_search_enabled: continue new_usernames = dictionary.get('ids_usernames') if new_usernames: for u, utype in new_usernames.items(): usernames[u] = utype for url in dictionary.get('ids_links', []): for s in db.sites: u = s.detect_username(url) if u: usernames[u] = 'username' # reporting for a one username if args.xmind: filename = report_filepath_tpl.format(username=username, postfix='.xmind') save_xmind_report(filename, username, results) query_notify.warning( f'XMind report for {username} saved in {filename}') if args.csv: filename = report_filepath_tpl.format(username=username, postfix='.csv') save_csv_report(filename, username, results) query_notify.warning( f'CSV report for {username} saved in {filename}') if args.txt: filename = report_filepath_tpl.format(username=username, postfix='.txt') save_txt_report(filename, username, results) query_notify.warning( f'TXT report for {username} saved in {filename}') if args.json: filename = report_filepath_tpl.format(username=username, postfix=f'_{args.json}.json') save_json_report(filename, username, results, report_type=args.json) query_notify.warning( f'JSON {args.json} report for {username} saved in {filename}') # reporting for all the result if general_results: if args.html or args.pdf: query_notify.warning('Generating report info...') report_context = generate_report_context(general_results) # determine main username username = report_context['username'] if args.html: filename = report_filepath_tpl.format(username=username, postfix='.html') save_html_report(filename, report_context) query_notify.warning( f'HTML report on all usernames saved in {filename}') if args.pdf: filename = report_filepath_tpl.format(username=username, postfix='.pdf') save_pdf_report(filename, report_context) query_notify.warning( f'PDF report on all usernames saved in {filename}') # update database db.save_to_file(args.db_file)
def main(): version_string = f"%(prog)s {__version__}\n" + \ f"{requests.__description__}: {requests.__version__}\n" + \ f"Python: {platform.python_version()}" parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, description=f"{module_name} (Version {__version__})") parser.add_argument("--version", action="version", version=version_string, help="Display version information and dependencies.") parser.add_argument( "--verbose", "-v", "-d", "--debug", action="store_true", dest="verbose", default=False, help="Display extra debugging information and metrics.") parser.add_argument( "--rank", "-r", action="store_true", dest="rank", default=False, help= "Present websites ordered by their Alexa.com global rank in popularity." ) parser.add_argument( "--folderoutput", "-fo", dest="folderoutput", help= "If using multiple usernames, the output of the results will be saved to this folder." ) parser.add_argument( "--output", "-o", dest="output", help= "If using single username, the output of the result will be saved to this file." ) parser.add_argument( "--tor", "-t", action="store_true", dest="tor", default=False, help= "Make requests over Tor; increases runtime; requires Tor to be installed and in system path." ) parser.add_argument( "--unique-tor", "-u", action="store_true", dest="unique_tor", default=False, help= "Make requests over Tor with new Tor circuit after each request; increases runtime; requires Tor to be installed and in system path." ) parser.add_argument("--csv", action="store_true", dest="csv", default=False, help="Create Comma-Separated Values (CSV) File.") parser.add_argument( "--site", action="append", metavar='SITE_NAME', dest="site_list", default=None, help= "Limit analysis to just the listed sites. Add multiple options to specify more than one site." ) parser.add_argument( "--proxy", "-p", metavar='PROXY_URL', action="store", dest="proxy", default=None, help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080") parser.add_argument( "--json", "-j", metavar="JSON_FILE", dest="json_file", default=None, help="Load data from a JSON file or an online, valid, JSON file.") parser.add_argument( "--timeout", action="store", metavar='TIMEOUT', dest="timeout", type=timeout_check, default=None, help="Time (in seconds) to wait for response to requests. " "Default timeout of 60.0s." "A longer timeout will be more likely to get results from slow sites." "On the other hand, this may cause a long delay to gather all results." ) parser.add_argument( "--print-found", action="store_true", dest="print_found_only", default=False, help="Do not output sites where the username was not found.") parser.add_argument( "--skip-errors", action="store_true", dest="skip_check_errors", default=False, help= "Do not print errors messages: connection, captcha, site country ban, etc." ) parser.add_argument("--no-color", action="store_true", dest="no_color", default=False, help="Don't color terminal output") parser.add_argument("--browse", "-b", action="store_true", dest="browse", default=False, help="Browse to all results on default bowser.") parser.add_argument( "--ids", "-i", action="store_true", dest="ids_search", default=False, help= "Make scan of pages for other usernames and recursive search by them.") parser.add_argument( "--parse", dest="parse_url", default='', help="Parse page by URL and extract username and IDs to use for search." ) parser.add_argument( "username", nargs='+', metavar='USERNAMES', action="store", help="One or more usernames to check with social networks.") parser.add_argument("--tags", dest="tags", default='', help="Specify tags of sites.") args = parser.parse_args() # Argument check # Usernames initial list usernames = {u: 'username' for u in args.username if u not in ('-')} # TODO regex check on args.proxy if args.tor and (args.proxy is not None): raise Exception("Tor and Proxy cannot be set at the same time.") # Make prompts if args.proxy is not None: print("Using the proxy: " + args.proxy) if args.tor or args.unique_tor: print("Using Tor to make requests") print( "Warning: some websites might refuse connecting over Tor, so note that using this option might increase connection errors." ) # Check if both output methods are entered as input. if args.output is not None and args.folderoutput is not None: print("You can only use one of the output methods.") sys.exit(1) # Check validity for single username output. if args.output is not None and len(args.username) != 1: print("You can only use --output with a single username") sys.exit(1) if args.parse_url: page, _ = parse( args.parse_url, cookies_str= 'collections_gid=213; cph=948; cpw=790; yandexuid=2146767031582893378; yuidss=2146767031582893378; gdpr=0; _ym_uid=1582893380492618461; mda=0; ymex=1898253380.yrts.1582893380#1900850969.yrtsi.1585490969; font_loaded=YSv1; yandex_gid=213; my=YwA=; _ym_uid=1582893380492618461; _ym_d=1593451737; L=XGJfaARJWEAARGILWAQKbXJUUU5NSEJHNAwrIxkaE11SHD4P.1593608730.14282.352228.74f1540484d115d5f534c370a0d54d14; yandex_login=danilovdelta; i=pQT2fDoFQAd1ZkIJW/qOXaKw+KI7LXUGoTQbUy5dPTdftfK7HFAnktwsf4MrRy4aQEk0sqxbZGY18+bnpKkrDgt29/8=; ys=udn.cDpkYW5pbG92ZGVsdGE%3D#wprid.1593608013100941-1715475084842016754100299-production-app-host-man-web-yp-306#ymrefl.DD2F275B69BCF594; zm=m-white_bender.webp.css-https%3As3home-static_KgOlxZDBNvw0efFr5riblj4yPtY%3Al; yp=1908968730.udn.cDpkYW5pbG92ZGVsdGE%3D#1595886694.ygu.1#1609637986.szm.2:1680x1050:1644x948#1596131262.csc.2#1908664615.sad.1593304615:1593304615:1#1908965951.multib.1; _ym_d=1593869990; yc=1594225567.zen.cach%3A1593969966; yabs-frequency=/5/0m0004s7_5u00000/8Y10RG00003uEo7ptt9m00000FWx8KRMFsq00000w3j-/; ys_fp=form-client%3DWeb%26form-page%3Dhttps%253A%252F%252Fyandex.ru%252Fchat%2523%252F%2540%252Fchats%252F1%25252F0%25252F964d3b91-5972-49c2-84d3-ed614622223f%2520%25D0%25AF%25D0%25BD%25D0%25B4%25D0%25B5%25D0%25BA%25D1%2581.%25D0%259C%25D0%25B5%25D1%2581%25D1%2581%25D0%25B5%25D0%25BD%25D0%25B4%25D0%25B6%25D0%25B5%25D1%2580%26form-referrer%3Dhttps%253A%252F%252Fyandex.ru%252Fchat%26form-browser%3DMozilla%252F5.0%2520(Macintosh%253B%2520Intel%2520Mac%2520OS%2520X%252010_15_5)%2520AppleWebKit%252F537.36%2520(KHTML%252C%2520like%2520Gecko)%2520Chrome%252F83.0.4103.116%2520Safari%252F537.36%26form-screen%3D1680%25C3%25971050%25C3%259730%26form-window%3D792%25C3%2597948%26form-app_version%3D2.8.0%26form-reqid%3D1593966167731077-1230441077775610555700303-production-app-host-sas-web-yp-249; skid=8069161091593972389; device_id="a9eb41b4cb3b056e5da4f9a4029a9e7cfea081196"; cycada=xPXy0sesbr5pVmRDiBiYZnAFhHtmn6zZ/YSDpCUU2Gs=; Session_id=3:1594143924.5.1.1593295629841:JeDkBQ:f.1|611645851.-1.0.1:114943352|33600788.310322.2.2:310322|219601.339772.5aiiRX9iIGUU6gzDuKnO4dqTM24; sessionid2=3:1594143924.5.1.1593295629841:JeDkBQ:f.1|611645851.-1.0.1:114943352|33600788.310322.2.2:310322|219601.678091.QGFa-AEA5z46AzNAmKFAL4_4jdM; _ym_isad=2; active-browser-timestamp=1594143926414; q-csrf-token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiIzMzYwMDc4OCIsImV4cCI6MTU5NDIzMDMzMX0.w4FkWaag4t1D7j42MD2ILP0oenqZiIjo4iOZnshCiwY; ar=1594145799547993-792214; _ym_visorc_10630330=w; spravka=dD0xNTk0MTQ1ODMwO2k9NS4yMjguMjI0LjM3O3U9MTU5NDE0NTgzMDI5MTk5NTkwMjtoPWMyZTI1Mjk4NmVmZjFhNGNjMGZhYmIwZWQ3ZDIyMmZk' ) info = extract(page) text = 'Extracted ID data from webpage: ' + ', '.join( [f'{a}: {b}' for a, b in info.items()]) print(text) for k, v in info.items(): if 'username' in k: usernames[v] = 'username' if k in ('yandex_public_id', 'wikimapia_uid', 'gaia_id'): usernames[v] = k if args.tags: args.tags = set(args.tags.split(',')) #Create object with all information about sites we are aware of. try: sites = SitesInformation(args.json_file) except Exception as error: print(f"ERROR: {error}") sys.exit(1) #Create original dictionary from SitesInformation() object. #Eventually, the rest of the code will be updated to use the new object #directly, but this will glue the two pieces together. site_data_all = {} for site in sites: site_data_all[site.name] = site.information if args.site_list is None: # Not desired to look at a sub-set of sites site_data = site_data_all else: # User desires to selectively run queries on a sub-set of the site list. # Make sure that the sites are supported & build up pruned site database. site_data = {} site_missing = [] for site in args.site_list: for existing_site in site_data_all: if site.lower() == existing_site.lower(): site_data[existing_site] = site_data_all[existing_site] if not site_data: # Build up list of sites not supported for future error message. site_missing.append(f"'{site}'") if site_missing: print( f"Error: Desired sites not found: {', '.join(site_missing)}.") sys.exit(1) if args.rank: # Sort data by rank site_dataCpy = dict(site_data) ranked_sites = sorted( site_data, key=lambda k: ("rank" not in k, site_data[k].get("rank", sys.maxsize))) site_data = {} for site in ranked_sites: site_data[site] = site_dataCpy.get(site) #Create notify object for query results. query_notify = QueryNotifyPrint(result=None, verbose=args.verbose, print_found_only=args.print_found_only, skip_check_errors=args.skip_check_errors, color=not args.no_color) already_checked = set() while usernames: username, id_type = list(usernames.items())[0] del usernames[username] if username.lower() in already_checked: continue else: already_checked.add(username.lower()) results = sherlock(username, site_data, query_notify, tor=args.tor, unique_tor=args.unique_tor, proxy=args.proxy, timeout=args.timeout, ids_search=args.ids_search, id_type=id_type, tags=args.tags) if args.output: result_file = args.output elif args.folderoutput: # The usernames results should be stored in a targeted folder. # If the folder doesn't exist, create it first os.makedirs(args.folderoutput, exist_ok=True) result_file = os.path.join(args.folderoutput, f"{username}.txt") else: result_file = f"{username}.txt" with open(result_file, "w", encoding="utf-8") as file: exists_counter = 0 for website_name in results: dictionary = results[website_name] new_usernames = dictionary.get('ids_usernames') if new_usernames: for u, utype in new_usernames.items(): usernames[u] = utype if dictionary.get("status").status == QueryStatus.CLAIMED: exists_counter += 1 file.write(dictionary["url_user"] + "\n") file.write( f"Total Websites Username Detected On : {exists_counter}") if args.csv: with open(username + ".csv", "w", newline='', encoding="utf-8") as csv_report: writer = csv.writer(csv_report) writer.writerow([ 'username', 'name', 'url_main', 'url_user', 'exists', 'http_status', 'response_time_s' ]) for site in results: response_time_s = results[site]['status'].query_time if response_time_s is None: response_time_s = "" writer.writerow([ username, site, results[site]['url_main'], results[site]['url_user'], str(results[site]['status'].status), results[site]['http_status'], response_time_s ])
def test_yandex_disk_photos(): info = extract(parse('https://yadi.sk/a/oiySK_wg3Vv5p4')[0]) assert info.get('uid') == '38569641' assert info.get('username') == 'nikitina-nm' assert info.get('name') == 'Вербочка'
def test_reddit(): info = extract(parse('https://www.reddit.com/user/postvolta/')[0]) assert info.get('uid') == 't2_dexuehm' assert info.get('username') == 'postvolta'
def process_site_result(response, query_notify, logger, results_info, site: MaigretSite): if not response: return results_info fulltags = site.tags # Retrieve other site information again username = results_info['username'] is_parsing_enabled = results_info['parsing_enabled'] url = results_info.get("url_user") logger.debug(url) status = results_info.get("status") if status is not None: # We have already determined the user doesn't exist here return results_info # Get the expected check type check_type = site.check_type # Get the failure messages and comments failure_errors = site.errors # TODO: refactor if not response: logger.error(f'No response for {site.name}') return results_info html_text, status_code, error_text, expection_text = response site_error_text = '?' # TODO: add elapsed request time counting response_time = None if logger.level == logging.DEBUG: with open('debug.txt', 'a') as f: status = status_code or 'No response' f.write(f'url: {url}\nerror: {str(error_text)}\nr: {status}\n') if html_text: f.write(f'code: {status}\nresponse: {str(html_text)}\n') if status_code and not error_text: error_text, site_error_text = detect_error_page( html_text, status_code, failure_errors, site.ignore_403) if site.activation and html_text: is_need_activation = any( [s for s in site.activation['marks'] if s in html_text]) if is_need_activation: method = site.activation['method'] try: activate_fun = getattr(ParsingActivator(), method) # TODO: async call activate_fun(site, logger) except AttributeError: logger.warning( f'Activation method {method} for site {site.name} not found!' ) # presense flags # True by default presense_flags = site.presense_strs is_presense_detected = False if html_text: if not presense_flags: is_presense_detected = True site.stats['presense_flag'] = None else: for presense_flag in presense_flags: if presense_flag in html_text: is_presense_detected = True site.stats['presense_flag'] = presense_flag logger.info(presense_flag) break if error_text is not None: logger.debug(error_text) result = QueryResult(username, site.name, url, QueryStatus.UNKNOWN, query_time=response_time, context=f'{error_text}: {site_error_text}', tags=fulltags) elif check_type == "message": absence_flags = site.absence_strs is_absence_flags_list = isinstance(absence_flags, list) absence_flags_set = set(absence_flags) if is_absence_flags_list else { absence_flags } # Checks if the error message is in the HTML is_absence_detected = any([(absence_flag in html_text) for absence_flag in absence_flags_set]) if not is_absence_detected and is_presense_detected: result = QueryResult(username, site.name, url, QueryStatus.CLAIMED, query_time=response_time, tags=fulltags) else: result = QueryResult(username, site.name, url, QueryStatus.AVAILABLE, query_time=response_time, tags=fulltags) elif check_type == "status_code": # Checks if the status code of the response is 2XX if (not status_code >= 300 or status_code < 200) and is_presense_detected: result = QueryResult(username, site.name, url, QueryStatus.CLAIMED, query_time=response_time, tags=fulltags) else: result = QueryResult(username, site.name, url, QueryStatus.AVAILABLE, query_time=response_time, tags=fulltags) elif check_type == "response_url": # For this detection method, we have turned off the redirect. # So, there is no need to check the response URL: it will always # match the request. Instead, we will ensure that the response # code indicates that the request was successful (i.e. no 404, or # forward to some odd redirect). if 200 <= status_code < 300 and is_presense_detected: result = QueryResult(username, site.name, url, QueryStatus.CLAIMED, query_time=response_time, tags=fulltags) else: result = QueryResult(username, site.name, url, QueryStatus.AVAILABLE, query_time=response_time, tags=fulltags) else: # It should be impossible to ever get here... raise ValueError(f"Unknown check type '{check_type}' for " f"site '{site.name}'") extracted_ids_data = {} if is_parsing_enabled and result.status == QueryStatus.CLAIMED: try: extracted_ids_data = extract(html_text) except Exception as e: logger.warning(f'Error while parsing {site.name}: {e}', exc_info=True) if extracted_ids_data: new_usernames = {} for k, v in extracted_ids_data.items(): if 'username' in k: new_usernames[v] = 'username' if k in supported_recursive_search_ids: new_usernames[v] = k results_info['ids_usernames'] = new_usernames result.ids_data = extracted_ids_data # Notify caller about results of query. query_notify.update(result, site.similar_search) # Save status of request results_info['status'] = result # Save results from request results_info['http_status'] = status_code results_info['is_similar'] = site.similar_search # results_site['response_text'] = html_text results_info['rank'] = site.alexa_rank return results_info
async def main(): version_string = f"%(prog)s {__version__}\n" + \ f"{requests.__description__}: {requests.__version__}\n" + \ f"Python: {platform.python_version()}" parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=f"Maigret v{__version__}" ) parser.add_argument("--version", action="version", version=version_string, help="Display version information and dependencies." ) parser.add_argument("--info", action="store_true", dest="info", default=False, help="Display service information." ) parser.add_argument("--verbose", "-v", action="store_true", dest="verbose", default=False, help="Display extra information and metrics." ) parser.add_argument("-d", "--debug", action="store_true", dest="debug", default=False, help="Saving debugging information and sites responses in debug.txt." ) parser.add_argument("--folderoutput", "-fo", dest="folderoutput", default="reports", help="If using multiple usernames, the output of the results will be saved to this folder." ) parser.add_argument("--csv", action="store_true", dest="csv", default=False, help="Create Comma-Separated Values (CSV) File." ) parser.add_argument("--html", action="store_true", dest="html", default=False, help="Create HTML report file." ) parser.add_argument("--site", action="append", metavar='SITE_NAME', dest="site_list", default=[], help="Limit analysis to just the listed sites (use several times to specify more than one)" ) parser.add_argument("--proxy", "-p", metavar='PROXY_URL', action="store", dest="proxy", default=None, help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080" ) parser.add_argument("--json", "-j", metavar="JSON_FILE", dest="json_file", default=None, help="Load data from a JSON file or an online, valid, JSON file.") parser.add_argument("--timeout", action="store", metavar='TIMEOUT', dest="timeout", type=timeout_check, default=10, help="Time (in seconds) to wait for response to requests." "Default timeout of 10.0s." "A longer timeout will be more likely to get results from slow sites." "On the other hand, this may cause a long delay to gather all results." ) parser.add_argument("--top-sites", action="store", default=500, type=int, help="Count of sites for checking ranked by Alexa Top (default: 500)." ) parser.add_argument("--print-not-found", action="store_true", dest="print_not_found", default=False, help="Print sites where the username was not found." ) parser.add_argument("--print-errors", action="store_true", dest="print_check_errors", default=False, help="Print errors messages: connection, captcha, site country ban, etc." ) parser.add_argument("--no-color", action="store_true", dest="no_color", default=False, help="Don't color terminal output" ) parser.add_argument("--browse", "-b", action="store_true", dest="browse", default=False, help="Browse to all results on default bowser." ) parser.add_argument("--no-recursion", action="store_true", dest="disable_recursive_search", default=False, help="Disable parsing pages for other usernames and recursive search by them." ) parser.add_argument("--self-check", action="store_true", default=False, help="Do self check for sites and database and disable non-working ones." ) parser.add_argument("--use-disabled-sites", action="store_true", default=False, help="Use disabled sites to search (may cause many false positives)." ) parser.add_argument("--parse", dest="parse_url", default='', help="Parse page by URL and extract username and IDs to use for search." ) parser.add_argument("username", nargs='+', metavar='USERNAMES', action="store", help="One or more usernames to check with social networks." ) parser.add_argument("--tags", dest="tags", default='', help="Specify tags of sites." ) parser.add_argument("-x","--xmind", action="store_true", dest="xmind", default=False, help="Generate an xmind 8 mindmap" ) parser.add_argument("-P", "--pdf", action="store_true", dest="pdf", default=False, help="Generate a pdf report" ) args = parser.parse_args() # Logging log_level = logging.ERROR logging.basicConfig( format='[%(filename)s:%(lineno)d] %(levelname)-3s %(asctime)s %(message)s', datefmt='%H:%M:%S', level=log_level ) if args.debug: log_level = logging.DEBUG elif args.info: log_level = logging.INFO elif args.verbose: log_level = logging.WARNING logger = logging.getLogger('maigret') logger.setLevel(log_level) # Usernames initial list usernames = { u: 'username' for u in args.username if u not in ['-'] } recursive_search_enabled = not args.disable_recursive_search # Make prompts if args.proxy is not None: print("Using the proxy: " + args.proxy) if args.parse_url: page, _ = parse(args.parse_url, cookies_str='') info = extract(page) text = 'Extracted ID data from webpage: ' + ', '.join([f'{a}: {b}' for a, b in info.items()]) print(text) for k, v in info.items(): if 'username' in k: usernames[v] = 'username' if k in supported_recursive_search_ids: usernames[v] = k if args.tags: args.tags = list(set(str(args.tags).split(','))) if args.json_file is None: args.json_file = \ os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources/data.json" ) if args.top_sites == 0: args.top_sites = sys.maxsize # Create object with all information about sites we are aware of. try: db = MaigretDatabase().load_from_file(args.json_file) site_data = db.ranked_sites_dict(top=args.top_sites, tags=args.tags, names=args.site_list) except Exception as error: print(f"ERROR: {error}") sys.exit(1) # Database self-checking if args.self_check: print('Maigret sites database self-checking...') await self_check(db, site_data, logger) if input('Do you want to save changes permanently? [yYnN]\n').lower() == 'y': db.save_to_file(args.json_file) print('Database was successfully updated.') else: print('Updates will be applied only for current search session.') # Database consistency enabled_count = len(list(filter(lambda x: not x.disabled, site_data.values()))) print(f'Sites in database, enabled/total: {enabled_count}/{len(site_data)}') if not enabled_count: print('No sites to check, exiting!') sys.exit(2) if usernames == ['-']: # magic params to exit after init print('No usernames to check, exiting.') sys.exit(0) # Create notify object for query results. query_notify = QueryNotifyPrint(result=None, verbose=args.verbose, print_found_only=not args.print_not_found, skip_check_errors=not args.print_check_errors, color=not args.no_color) already_checked = set() general_results = [] while usernames: username, id_type = list(usernames.items())[0] del usernames[username] if username.lower() in already_checked: continue else: already_checked.add(username.lower()) # check for characters do not supported by sites generally found_unsupported_chars = set(unsupported_characters).intersection(set(username)) if found_unsupported_chars: pretty_chars_str = ','.join(map(lambda s: f'"{s}"', found_unsupported_chars)) print(f'Found unsupported URL characters: {pretty_chars_str}, skip search by username "{username}"') continue results = await maigret(username, dict(site_data), query_notify, proxy=args.proxy, timeout=args.timeout, recursive_search=recursive_search_enabled, id_type=id_type, tags=args.tags, debug=args.verbose, logger=logger, forced=args.use_disabled_sites, ) general_results.append((username, id_type, results)) if args.folderoutput: # The usernames results should be stored in a targeted folder. # If the folder doesn't exist, create it first os.makedirs(args.folderoutput, exist_ok=True) result_path = os.path.join(args.folderoutput, f"{username}.") else: result_path = os.path.join("reports", f"{username}.") if args.xmind: genxmindfile(result_path+"xmind", username, results) with open(result_path+"txt", "w", encoding="utf-8") as file: exists_counter = 0 for website_name in results: dictionary = results[website_name] # TODO: fix no site data issue if not dictionary: continue new_usernames = dictionary.get('ids_usernames') if new_usernames: for u, utype in new_usernames.items(): usernames[u] = utype if dictionary.get("status").status == QueryStatus.CLAIMED: exists_counter += 1 file.write(dictionary["url_user"] + "\n") file.write(f"Total Websites Username Detected On : {exists_counter}") file.close() if args.csv: save_csv_report(username, results, result_path+"csv") pathPDF = None pathHTML = None if args.html: pathHTML = result_path+"html" if args.pdf: pathPDF = result_path+"pdf" if pathPDF or pathHTML: save_html_pdf_report(general_results,pathHTML,pathPDF) db.save_to_file(args.json_file)