def presentResult(predictions): if len(predictions) > 0: argmax = Counter(predictions) console.section("Result") if len(argmax.most_common(n=1)) > 0: (most_common_str, _) = argmax.most_common(n=1)[0] else: most_common_str = 'None' console.task("Google says it could be: {0}".format(most_common_str)) else: console.failure("No predictions found")
def getValidLinksAndImg(self, label): if len(self.known_face_encodings) <= 0: console.failure('No Face Encodings found!') console.failure( 'Did you call `loadKnown(label)` before calling this method?') return [] valid_url = [] valid_img = [] valid_indexes = self.constructIndexes(label) for index in valid_indexes: valid_url.append(self.profile_list[index]) valid_img.append(self.profile_img[index]) return valid_url, valid_img
def downloadCSV(self): console.task('Waiting for page to finish') while "Loading" in self.driver.page_source: sys.stdout.write(".") sys.stdout.flush() time.sleep(1) print('') console.task('Downloading CSV') time.sleep(2) try: dl = self.driver.find_elements_by_xpath('//*[@id="dltop"]')[0] dl.click() except: console.failure('No Results...') self.driver.close()
def main(skipFB=False, skipY=False, FBUrls=[], jsonRep=None): if not skipFB: # collect user input console.prompt('Enter the persons name to find on FB: ') name = input('') while not name: console.prompt('Enter the persons name to find on FB: ') name = input('') else: console.task('Skipping FB Search') name = "Unknown" console.prompt( 'How many jitters, higher is better [max 100] (default=70): ') num_jitters = input('') if not num_jitters: console.task('Settings jitters to 70') num_jitters = 70 num_jitters = int(num_jitters) if num_jitters > 100: console.subfailure('Dude wtf?!') num_jitters = 100 console.subfailure('Using 100 jitters...') if not skipFB: # grab profile urls f = FBGrabber(name) f.grabData() # do face recognition on those profile images r = FaceRecog(f.getProfileLinks(), f.getProfileImages(), num_jitters=num_jitters) r.loadKnown(name) profile_links, profile_imgs = r.getValidLinksAndImg(name) console.section('Result') console.task('Found the following Profiles:') for i in range(len(profile_links)): console.subtask(profile_links[i]) else: if len(FBUrls) > 0: f = FBProfileGrabber(FBUrls) img_urls = f.grabLinks() #FBURLS are our profile links synchron with img_urls # so FBURLS[0] <=> img_urls[0] r = FaceRecog(FBUrls, img_urls, num_jitters=num_jitters) r.loadKnown(name) profile_links, profile_imgs = r.getValidLinksAndImg(name) console.section('Result') console.task('Found the following Profiles:') for i in range(len(profile_links)): console.subtask(profile_links[i]) else: profile_links = [] profile_imgs = [] # google reverse image search on profile pics g = GoogleGrabber() for img in profile_imgs: g.collectLinks(img) # google reverse image search on reference pic g.collectLinksLocal() rev_links, predictions = g.finish() #TODO: Fix yandex search #if not skipY: if False: yandex = YandexGrabber() for img in profile_imgs: yandex.collectLinks(img) yandex.collectLinksLocal() #add to rev_links for e in yandex.finish(): rev_links.append(e) else: console.task('Skipping Yandex Search') rev_links = list(set(rev_links)) instaNames = parseInstaUsername(filterInstaLinks(rev_links)) validatedInstaNames = [] console.section("Validating Instagram Profiles") for un in instaNames: console.task("Validating Profile: '{0}'".format(un)) if validateInstaUser(un, num_jitters): validatedInstaNames.append(un) raider_img_list = profile_imgs for v in validatedInstaNames: l = getInstaLinks(v) for li in l: raider_img_list.append(li) if len(raider_img_list) <= 0: console.failure('No Links found...') else: console.task('RIP Imageraider') rev_links = list(set(rev_links)) predictions = list(set(predictions)) console.section('Links') print(rev_links) console.section('Predictions') try: predictions = [x.lower() for x in predictions] except: predictions = [] print(predictions) presentResult(predictions) for pl in profile_links: rev_links.append(pl) rev_links = list(set(rev_links)) #estimate age ageEstimator = PictrievGrabber() if len(validatedInstaNames) > 0: for v in validatedInstaNames: l = getInstaLinks(v) if len(l) >= cfg.instaLimit(): l = l[:cfg.instaLimit()] for li in l: ageEstimator.collectAges(li) age = ageEstimator.finish() else: console.failure('No Instagram Images to upload...') #ageEstimator.finish() age = "Unknown" if jsonRep: console.section("Dumping JSON Report") makeJSONReport(name, rev_links, predictions, validatedInstaNames, age, jsonRep) else: console.section("Creating PDF Report") makeReport(name, rev_links, predictions, validatedInstaNames, age) p = os.path.join(tempfile.gettempdir(), 'imageraider') if os.path.isdir(p): pathlist = Path(p).glob('**/*') for path in pathlist: s_p = str(path) os.remove(s_p) console.task("KTHXBYE")
'--json', nargs='?', help='Generates a json report. Specify a Filename') parser.add_argument( '-fbList', '--facebookList', nargs='?', help= "A file which contains Links to Facebook Profiles. '--skipfb' options must be enabled to use this" ) args = parser.parse_args() if args.json: jsonRepFile = args.json if os.path.isfile(jsonRepFile): console.failure("File '{}' already exists".format(jsonRepFile)) sys.exit(-1) else: jsonRepFile = None if args.facebookList and args.skipfb: if os.path.isfile(args.facebookList): with open(args.facebookList, 'r') as f: content = f.readlines() content = [x.strip() for x in content] #TODO: fix yandex #main(skipFB=args.skipfb, skipY=args.skipyandex, FBUrls=content) main(skipFB=args.skipfb, skipY=None, FBUrls=content, jsonRep=jsonRepFile)
help='Generates a json report. Specify a Filename') parser.add_argument( '-fbList', '--facebookList', nargs='?', help= "A file which contains Links to Facebook Profiles. '--skipfb' options must be enabled to use this" ) args = parser.parse_args() if args.docker: aDocker = args.docker if args.name: aName = args.name else: console.failure("Please supply a name using the --name flag") sys.exit(-2) else: aName = None aDocker = False if args.json: jsonRepFile = args.json if os.path.isfile(jsonRepFile): console.failure("File '{}' already exists".format(jsonRepFile)) sys.exit(-1) else: jsonRepFile = None if args.facebookList and args.skipfb: if os.path.isfile(args.facebookList):
help='Skips the ImageRaider Reverse Search') #parser.add_argument('-sY', '--skipyandex', action='store_true', help='Skips the Yandex Reverse Search') parser.add_argument( '-fbList', '--facebookList', nargs='?', help= "A file which contains Links to Facebook Profiles. '--skipfb' options must be enabled to use this" ) args = parser.parse_args() if args.facebookList and args.skipfb: if os.path.isfile(args.facebookList): with open(args.facebookList, 'r') as f: content = f.readlines() content = [x.strip() for x in content] #TODO: fix yandex #main(skipFB=args.skipfb, skipIR=args.skipir, skipY=args.skipyandex, FBUrls=content) main(skipFB=args.skipfb, skipIR=args.skipir, skipY=None, FBUrls=content) else: console.failure("File '{}' does not exist".format( args.facebookList)) sys.exit(-1) else: #TODO: fix yandex #main(skipFB=args.skipfb, skipIR=args.skipir, skipY=args.skipyandex, FBUrls=[]) main(skipFB=args.skipfb, skipIR=args.skipir, skipY=None, FBUrls=[])
def main(): # collect user input console.prompt('Enter the persons name to find on FB: ') name = input('') console.prompt('How many jitters, higher is better [max 100]: ') num_jitters = input('') num_jitters = int(num_jitters) if num_jitters > 100: console.subfailure('Dude wtf?!') num_jitters = 100 console.subfailure('Using 100 jitters...') # grab profile urls f = FBGrabber(name) f.grabData() # do face recognition on those profile images r = FaceRecog(f.getProfileLinks(), f.getProfileImages(), num_jitters=num_jitters) r.loadKnown(name) profile_links, profile_imgs = r.getValidLinksAndImg(name) console.section('Result') console.task('Found the following Profiles:') for i in range(len(profile_links)): console.subtask(profile_links[i]) # google reverse image search on profile pics g = GoogleGrabber() for img in profile_imgs: g.collectLinks(img) # google reverse image search on reference pic g.collectLinksLocal() rev_links, predictions = g.finish() yandex = YandexGrabber() for img in profile_imgs: yandex.collectLinks(img) #add to rev_links for e in yandex.finish(): rev_links.append(e) rev_links = list(set(rev_links)) instaNames = parseInstaUsername(filterInstaLinks(rev_links)) validatedInstaNames = [] console.section("Validating Instagram Profiles") for un in instaNames: console.task("Validating Profile: '{0}'".format(un)) if validateInstaUser(un, num_jitters): validatedInstaNames.append(un) raider_img_list = profile_imgs for v in validatedInstaNames: l = getInstaLinks(v) for li in l: raider_img_list.append(li) if len(raider_img_list) <= 0: console.failure('No Links founds...') else: raider = ImageRaiderGrabber() raider.insertImageLinks(raider_img_list) raider.downloadCSV() raider_links = raider.processCSV() for raider_link in raider_links: rev_links.append(raider_link) rev_links = list(set(rev_links)) predictions = list(set(predictions)) console.section('Links') print(rev_links) console.section('Predictions') try: predictions = [x.lower() for x in predictions] except: predictions = [] print(predictions) presentResult(predictions) console.section("Creating PDF Report") makeReport(name, rev_links, predictions, validatedInstaNames) p = os.path.join(tempfile.gettempdir(), 'imageraider') if os.path.isdir(p): pathlist = Path(p).glob('**/*') for path in pathlist: s_p = str(path) os.remove(s_p) console.task("KTHXBYE")