def googleit(self, app, t_multiproc, dork, page): time.sleep(0.2) sys.stdout.write('progress: %d/%d\r' % (t_multiproc['n_current'], t_multiproc['n_total'])) t_multiproc['n_current'] = t_multiproc['n_current'] + 1 # if not dork in self.results: # self.results[dork] = [] # self.results[dork].append( 'http://ex.com' ) # self.results[dork].append( 'http://ex.com' ) # return # cmd = 'google --start 0 --stop=5 --rua ' + dork # output = subprocess.check_output( cmd, shell=True ).decode('utf-8') # print(output) # for link in output.split("\n"): # if app.config['googledorks']['urldecode']: # link = urllib.parse.unquote( link ) # if not dork in self.results: # self.results[dork] = [] # self.results[dork].append( link ) search_results = goop.search(dork, app.config['googledorks']['fb_cookie'], page=page) # print(search_results) for i in search_results: link = search_results[i]['url'] if app.config['googledorks']['urldecode']: link = urllib.parse.unquote(link) if not dork in self.results: self.results[dork] = [] self.results[dork].append(link)
def search(url, dork, dato): msgResult(url, dato) separador() for page in range(peticiones()): result = goop.search(url, dork, dato, cookie, page=page, full=True) for each in result: if args.output != None: output(result[each]['url']) print('%s' % (result[each]['url']))
def doMultiSearch(page): zero_result = 0 for i in range(page - 5, page - 1): if i != page and i in gg_history and gg_history[i] == 0: zero_result = zero_result + 1 if zero_result < 3: s_results = goop.search(gg_search, fb_cookie, page=page) # sys.stdout.write( '[+] grabbing page %d/%d... (%d)\n' % (page,end_page,len(s_results)) ) gg_history[page] = len(s_results) # print(s_results) for i in s_results: print(s_results[i]['url']) # t_results.append( s_results ) else: for i in range(page, end_page): gg_history[i] = 0
def doMultiSearch( term, numbers_only, urldecode, page ): zero_result = 0 for i in range(page-5,page-1): if i != page and i in page_history and page_history[i] == 0: zero_result = zero_result + 1 if zero_result < 3: s_results = goop.search( term, fb_cookie, page=page ) # print(s_results) # print(s_results) page_history[page] = len(s_results) if not numbers_only: for i in s_results: if urldecode: print( urllib.parse.unquote(s_results[i]['url']) ) else: print( s_results[i]['url'] ) else: for i in range(page,end_page): page_history[i] = 0
def doMultiSearch( page ): zero_result = 0 for i in range(page-5,page-1): if i != page and i in gg_history and gg_history[i] == 0: zero_result = zero_result + 1 if zero_result < 3: s_results = goop.search( gg_search, fb_cookie, page=page ) sys.stdout.write( '[+] grabbing page %d/%d... (%d)\n' % (page,end_page,len(s_results)) ) gg_history[page] = len(s_results) # print(s_results) for i in s_results: pseudo = mod.extractPseudoFromUrl( s_results[i]['url'] ) if len(pseudo) and not pseudo in t_history: t_history.append( pseudo ) t_results.append( s_results[i] ) else: for i in range(page,end_page): gg_history[i] = 0
import sys import json from goop import goop green = '\033[92m' white = '\033[97m' yellow = '\033[93m' end = '\033[0m' cookie = '<your facebook cookie>' for page in range(int(sys.argv[2])): result = goop.search(sys.argv[1], cookie, page=page, full=True) for each in result: print('''%s%s\n%s%s\n%s%s%s\n''' % (green, result[each]['text'], yellow, result[each]['url'], white, result[each]['summary'], end))