def sending(self, url): r = sender.send_get(self.options, url, cookies=None) results = [] if r: response = r.text if 'class="errormsg"' in response: utils.print_bad("No entry found for: {0}".format(self.query)) return soup = utils.soup(response) result = self.analyze(soup) if not result: return False # checking if we have more than one pages pages = self.check_pages(soup) if pages: utils.print_info("Detect pages {0} for query".format(str(len(pages)))) for page in pages: page_url = self.baseURL + page r1 = sender.send_get(self.options, page_url, cookies=None) if r1: response1 = r1.text soup1 = utils.soup(response1) self.analyze(soup1) results.extend(result) return results
def optimize(self, query): url = 'https://www.shodan.io/search/_summary?query={0}'.format(query) utils.print_good("Analyze first page for more result") r = sender.send_get(self.options, url, self.cookies) if r.status_code == 200: soup = utils.soup(r.text) else: return False query_by_cities = [] # check if query have country filter or not if 'country' in query: links = soup.find_all("a") country = utils.get_country_code(utils.url_decode(query)) for link in links: if 'city' in link.get('href'): item = { 'url': link.get('href'), 'city': link.text, 'country': country } utils.print_debug(self.options, item) query_by_cities.append(item) else: links = soup.find_all("a") countries = [] for link in links: if 'country' in link.get('href'): countries.append(utils.get_country_code(utils.url_decode(link.get('href')))) utils.print_debug(self.options, countries) for country in countries: # seding request again to get city country_query = utils.url_encode(' country:"{0}"'.format(country)) url = 'https://www.shodan.io/search/_summary?query={0}{1}'.format( query, country_query) r1 = sender.send_get(self.options, url, self.cookies) utils.random_sleep(5, 8) utils.print_info( "Sleep for couple seconds because Shodan server is really strict") if r1.status_code == 200: soup1 = utils.soup(r1.text) links = soup1.find_all("a") # countries = [] for link in links: if 'city' in link.get('href'): # countries.append(utils.get_city_name( # utils.url_decode(link.get('href')))) item = { 'url': link.get('href'), 'city': link.text, 'country': country } utils.print_debug(self.options, item) query_by_cities.append(item) utils.print_debug(self.options, query_by_cities) return query_by_cities
def github(self, url): result = [] r = sender.send_get(self.options, url, cookies=None) if r.status_code == 200: response = r.text # store raw json raw_file_path = self.options[ 'raw'] + '/write_up_github_{0}.html'.format( self.query.replace(' ', '_')) if self.options.get('store_content'): utils.just_write(raw_file_path, response) utils.print_debug( self.options, "Writing raw response to: {0}".format(raw_file_path)) soup = utils.soup(response) # Custom here body = soup.find_all('article', 'markdown-body')[0] links = body.findChildren('a') for link in links: if self.query.lower() in link.text.lower(): item = { 'Query': self.query, 'Title': link.text, 'Content': link.text, 'External_url': link.get('href'), 'Source': url, 'Warning': 'Write-Up', 'Raw': raw_file_path } utils.print_debug(self.options, item) result.append(item) return result
def do_login(self): utils.print_info("Reauthen using credentials from: {0}".format( self.options.get('config'))) login_url = 'https://censys.io/login' r = sender.send_get(self.options, login_url, cookies=None) if r.status_code == 200: cookies = r.cookies form = utils.soup(r.text).find_all("form") if form: inputs = form[0].findChildren('input') for tag in inputs: if tag.get('name') == 'csrf_token': csrf_token = tag.get('value') username, password = utils.get_cred(self.options, source='censys') data = {"csrf_token": csrf_token, "came_from": "/", "from_censys_owned_external": "False", "login": username, "password": password} really_login_url = 'https://censys.io/login' r1 = sender.send_post( self.options, really_login_url, cookies, data, follow=False) if r1.status_code == 302: for item in r1.cookies.items(): if item[1]: censys_cookies = item[1] utils.set_session( self.options, censys_cookies, source='censys') return censys_cookies return False
def do_login(self): utils.print_info("Reauthen using credentials from: {0}".format( self.options.get('config'))) login_url = 'https://account.shodan.io/login' r = sender.send_get(self.options, login_url, cookies=None) if r.status_code == 200: cookies = r.cookies form = utils.soup(r.text).find_all("form") if form: inputs = form[0].findChildren('input') for tag in inputs: if tag.get('name') == 'csrf_token': csrf_token = tag.get('value') username, password = utils.get_cred(self.options, source='shodan') data = {"username": username, "password": password, "grant_type": "password", "continue": "https://www.shodan.io/", "csrf_token": csrf_token, "login_submit": "Login"} really_login_url = 'https://account.shodan.io/login' r1 = sender.send_post( self.options, really_login_url, cookies, data, follow=False) if r1.status_code == 302: for item in r1.cookies.items(): if item.get('polito'): shodan_cookies = item.get('polito') utils.set_session( self.options, shodan_cookies, source='shodan') return shodan_cookies return False
def sending(self, url): # sending request and return the response utils.print_debug(self.options, url) # catch error when session timeout try: r = sender.send_get(self.options, url, self.cookies) except: r = False if r: response = r.text if self.options['store_content']: ts = str(int(time.time())) raw_file = self.options['raw'] + \ "/fofa/{0}_{1}".format(utils.url_encode( url.replace(self.base_url, '')).replace('/', '_'), ts) utils.just_write(raw_file, response) soup = utils.soup(response) self.analyze(soup) # checking if there is many pages or not page_num = self.check_pages(soup) # if you're log in and have many results if page_num and self.logged_in and not self.options[ 'disable_pages']: utils.print_info("Continue grab more pages") self.pages(page_num)
def tweet(self, tag): results = [] query = '#{0} #{1}'.format(self.query, tag) # @TODO improve by increase the the position url = 'https://twitter.com/search?vertical=default&q={0}&src=unkn'.format( utils.url_encode(query)) r = sender.send_get(self.options, url, cookies=None) if r.status_code == 200: response = r.text # store raw json raw_file_path = self.options['raw'] + '/tweets_{1}_{0}.html'.format( self.query.replace(' ', '_'), tag) if self.options.get('store_content'): utils.just_write(raw_file_path, response) utils.print_debug( self.options, "Writing raw response to: {0}".format(raw_file_path)) soup = utils.soup(response) # Custom here divs = soup.find_all('div', 'original-tweet') for div in divs: content = div.findChildren('p', 'TweetTextSize')[0].text.strip() links = [ x.get('data-expanded-url') for x in div.findChildren('a') if 't.co' in x.get('href') ] # print(links) if len(links) == 0: external_url = 'N/A' else: external_url = '|'.join([str(x) for x in links]) item = { 'Query': self.query, 'Title': query, 'Content': content, 'External_url': external_url, 'Source': url, 'Warning': 'Tweet', 'Raw': raw_file_path } utils.print_debug(self.options, item) results.append(item) return results
def get_num_pages(self, url): summary_url = 'https://www.shodan.io/search/_summary?{0}'.format( utils.get_query(url)) r = sender.send_get(self.options, summary_url, self.cookies) if r.status_code == 200: soup = utils.soup(r.text) results_total = soup.find_all('div', 'bignumber')[ 0].text.replace(',', '') page_num = str(int(int(results_total) / 10)) utils.print_good("Detect posible {0} pages per {1} result".format( page_num, results_total)) return page_num return False
def sending(self, url): # sending request and return the response r = sender.send_get(self.options, url, self.cookies) if r: response = r.text if self.options['store_content']: ts = str(int(time.time())) raw_file = self.options['raw'] + \ "/shodan/{0}_{1}".format(utils.url_encode( url.replace(self.base_url, '')).replace('/', '_'), ts) utils.just_write(raw_file, response) soup = utils.soup(response) self.analyze(soup) # checking if there is many pages or not if self.logged_in and not self.options['disable_pages']: utils.print_info("Continue grab more pages") self.pages(self.get_num_pages(url))
def pages(self, page_num): for i in range(2, int(page_num) + 1): utils.print_info("Get more result from page: {0}".format(str(i))) utils.random_sleep(1, 2) query = utils.url_encode(self.options['censys_query']) url = 'https://censys.io/ipv4/_search?q={1}&page={0}'.format( str(i), query) r = sender.send_get(self.options, url, self.cookies) if r.status_code == 200: response = r.text if 'class="alert alert-danger"' in response: utils.print_bad("Reach to the limit at page {0}".format( str(i))) return else: soup = utils.soup(response) self.analyze(soup)
def do_login(self): utils.print_info("Reauthen using credentials from: {0}".format( self.options.get('config'))) login_url = 'https://i.nosec.org/login?service=http%3A%2F%2Ffofa.so%2Fusers%2Fservice' r = sender.send_get(self.options, login_url, cookies=None) if r.status_code == 200: cookies = r.cookies form = utils.soup(r.text).find(id="login-form") inputs = form.findChildren('input') for tag in inputs: if tag.get('name') == 'authenticity_token': authenticity_token = tag.get('value') if tag.get('name') == 'lt': lt = tag.get('value') if tag.get('name') == 'authenticity_token': authenticity_token = tag.get('value') username, password = utils.get_cred(self.options, source='fofa') data = { "utf8": "\xe2\x9c\x93", "authenticity_token": authenticity_token, "lt": lt, "service": "http://fofa.so/users/service", "username": username, "password": password, "rememberMe": "1", "button": '' } really_login_url = 'https://i.nosec.org/login' r1 = sender.send_post(self.options, really_login_url, cookies, data) if r1.status_code == 200: fofa_cookie = r1.cookies.get('_fofapro_ars_session') utils.set_session(self.options, fofa_cookie, source='fofa') return fofa_cookie return False
def pages(self, page_num): for i in range(2, int(page_num) + 1): utils.print_info("Get more result from page: {0}".format(str(i))) query = utils.url_encode( utils.just_b64_encode(self.options['fofa_query'])) url = 'https://fofa.so/result?page={0}&qbase64={1}'.format( str(i), query) utils.print_debug(self.options, url) r = sender.send_get(self.options, url, self.cookies) if r.status_code == 200: response = r.text if 'class="error"' in response: utils.print_bad("Reach to the limit at page {0}".format( str(i))) return else: soup = utils.soup(response) self.analyze(soup)
def pages(self, page_num): for i in range(2, int(page_num) + 1): utils.print_info("Sleep for couple seconds because Shodan server is really strict") utils.random_sleep(3, 6) utils.print_info("Get more result from page: {0}".format(str(i))) query = utils.url_encode(self.options['shodan_query']) url = 'https://www.shodan.io/search?query={1}&page={0}'.format( str(i), query) r = sender.send_get(self.options, url, self.cookies) if r.status_code == 200: response = r.text if 'class="alert alert-error text-center"' in response: utils.print_bad( "Reach to the limit at page {0}".format(str(i))) return else: soup = utils.soup(response) self.analyze(soup)
def initial(self): product = utils.url_encode(self.query) url = 'https://www.cvedetails.com/product-search.php?vendor_id=0&search={0}'.format( product) # get summary table products = [] r = sender.send_get(self.options, url, cookies=None) if r.status_code == 200: response = r.text if 'class="errormsg"' in response: utils.print_bad("No entry found for: {0}".format(self.query)) return summary_table = utils.soup(response).find_all("table", "listtable") # <table class = "listtable" if summary_table: trs = summary_table[0].findChildren('tr') if len(trs) <= 1: utils.print_bad( "No entry found for: {0}".format(self.query)) return for tr in trs[1:]: for td in tr.findChildren('td'): if td.a: if 'See all vulnerabilities' in td.a.get('title'): products.append(td.a.get('href')) final = [] # if found product and have vulnerabilities, go get it if products: for url in products: results = self.sending(self.baseURL + url) if results: final.extend(results) # self.details(products) # print(final) # write final output self.conclude(final)
def optimize(self, query): utils.print_good("Analyze result by country and city for more result") # custom headers for stats custom_headers = { "User-Agent": "Mozilla/5.0 (X11; FreeBSD amd64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36", "Accept": "text/html, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate", "X-Requested-With": "XMLHttpRequest", "Connection": "close" } url = 'https://fofa.so/search/result_stats?qbase64={0}'.format(query) r = sender.send_get(self.options, url, self.cookies, headers=custom_headers) if r.status_code == 200: html_data = r.text.replace('\/', '/').replace('\\"', '"').replace("\'", "'") soup = utils.soup(html_data) query_by_cities = [] # custom here country_divs = soup.find_all("div", "class_sf") for div in country_divs: city_links = div.findChildren("a") for link in city_links: query_by_cities.append({ 'url': self.base_url + link.get('href'), 'city': link.text, }) # utils.print_debug(self.options, query_by_cities) return query_by_cities return False
def optimize(self, query): utils.print_good("Analyze metadata page for more result") raw_query = utils.url_decode(query) if 'location.country' in raw_query: country = utils.get_country_code(raw_query, source='censys') query = raw_query.replace(country, '').replace( 'AND ' + country, '').replace('and ' + country, '') url = 'https://censys.io/ipv4/metadata?q={0}'.format(query) r = sender.send_get(self.options, url, self.cookies) if r.status_code == 200: soup = utils.soup(r.text) else: return False query_by_countries = [] # check if query have country filter or not divs = soup.find_all("div", 'left-table') country_tables = [] for div in divs: if 'Country Breakdown' in div.h6.text: country_tables = div.find_all('tr') for row in country_tables: item = { 'url': 'N/A', 'country': 'N/A' } tds = row.find('td') for td in tds: if td.findChildren('a'): item['url'] = self.base_url + td.a.get('href') item['country'] = td.a.text query_by_countries.append(item) utils.print_debug(self.options, query_by_countries) return query_by_countries
def sending(self, url): # sending request and return the response r = sender.send_get(self.options, url, self.cookies) if r: response = r.text if 'ratelimit' in response: utils.print_bad('Looks like you get block from Censys. Consider using Proxy') return False if self.options['store_content']: ts = str(int(time.time())) raw_file = self.options['raw'] + \ "/censys/{0}_{1}".format(utils.url_encode( url.replace(self.base_url, '')).replace('/', '_'), ts) utils.just_write(raw_file, response) soup = utils.soup(response) self.analyze(soup) # checking if there is many pages or not if not self.options['disable_pages']: utils.print_info("Continue grab more pages") self.pages(self.get_num_pages(soup))