Exemplo n.º 1
0
    def sending(self, url):
        # sending request and return the response
        utils.print_debug(self.options, url)
        # catch error when session timeout
        try:
            r = sender.send_get(self.options, url, self.cookies)
        except:
            r = False
        if r:
            response = r.text
            if self.options['store_content']:
                ts = str(int(time.time()))
                raw_file = self.options['raw'] + \
                    "/fofa/{0}_{1}".format(utils.url_encode(
                        url.replace(self.base_url, '')).replace('/', '_'), ts)
                utils.just_write(raw_file, response)

            soup = utils.soup(response)
            self.analyze(soup)
            # checking if there is many pages or not
            page_num = self.check_pages(soup)
            # if you're log in and have many results
            if page_num and self.logged_in and not self.options[
                    'disable_pages']:
                utils.print_info("Continue grab more pages")
                self.pages(page_num)
Exemplo n.º 2
0
    def analyze(self, response):
        warns = response.get('data').get('search')
        total = response.get('total')

        if total == 0:
            utils.print_info("No exploit found for {0}".format(self.query))
            return False

        # store raw json
        raw_file_path = self.options['raw'] + '/vulners_{0}.json'.format(
            self.query.replace(' ', '_'))
        if self.options.get('store_content'):
            utils.just_write(raw_file_path, response, is_json=True)
            utils.print_debug(
                self.options,
                "Writing raw response to: {0}".format(raw_file_path))

        results = []
        for warn in warns:
            item = {
                'Query': self.query,
                'Title': warn.get('_source').get('title'),
                'Score': warn.get('_source').get('cvss').get('score'),
                'External_url': warn.get('_source').get('href'),
                'CVE': warn.get('_source').get('id'),
                'ID': warn.get('_id'),
                'Published': warn.get('_source').get('published'),
                'Source': "https://vulners.com/cve/" + warn.get('_id'),
                'Warning': 'Info',
                'Raw': raw_file_path,
            }
            utils.print_debug(self.options, item)
            results.append(item)

        return results
Exemplo n.º 3
0
def module_query(options):
    utils.print_debug(options, options)
    query = options.get(
        'target', False) if options.get('target') else options.get('query')
    utils.print_info("Query: {0}".format(query))

    if 'exploit' in options.get('module'):
        if '|' in options.get('target'):
            options['product'] = options.get('target').split('|')[0].strip()

            if options['relatively']:
                utils.print_info("Running with relative version")
                exact_version = options.get('target').split('|')[1].strip()
                if '.' in exact_version:
                    options['version'] = exact_version.split('.')[0] + "."
            else:
                options['version'] = options.get('target').split(
                    '|')[1].strip()
        else:
            options['product'] = options.get('target')

        sploitus.Sploitus(options)
        vulners.Vulners(options)
        writeups.Writeups(options)
        # cvedetails.Cvedetails(options)

    if 'ip' in options.get('module'):
        iposint.IPOsint(options)

    # -m git -t 'sam'
    if 'git' in options.get('module'):
        gitsearch.GitSearch(options)
Exemplo n.º 4
0
    def analyze(self, soup):
        result = []
        # custom here
        divs = soup.findAll(True, {'class': ['SearchResult', 'result']})
        for div in divs:
            element = {
                'raw_ip': 'N/A',
                'result_title': 'N/A',
                'external_url': 'N/A'
            }

            # getting sumary div
            link_sum = div.find_all("a", "SearchResult__title-text")[0]
            element['raw_ip'] = link_sum.get('href').replace('/ipv4/', '')  # ip
            element['external_url'] = link_sum.get(
                'href').replace('/ipv4/', '')

            element['result_title'] = link_sum.span.text.replace(
                '(', '').replace(')', '')

            utils.print_debug(self.options, element)
            result.append(element)

        output = []
        for item in result:
            if item.get('raw_ip'):
                output.append(item.get('external_url'))
            elif item.get('external_url'):
                output.append(item.get('raw_ip'))
            elif item.get('result_title'):
                output.append(item.get('result_title'))

        really_data = "\n".join(output)
        print(really_data)
        utils.just_write(self.output, really_data + "\n")
Exemplo n.º 5
0
    def github(self, url):
        result = []
        r = sender.send_get(self.options, url, cookies=None)
        if r.status_code == 200:
            response = r.text
            # store raw json
            raw_file_path = self.options[
                'raw'] + '/write_up_github_{0}.html'.format(
                    self.query.replace(' ', '_'))
            if self.options.get('store_content'):
                utils.just_write(raw_file_path, response)
                utils.print_debug(
                    self.options,
                    "Writing raw response to: {0}".format(raw_file_path))

            soup = utils.soup(response)

            # Custom here
            body = soup.find_all('article', 'markdown-body')[0]
            links = body.findChildren('a')
            for link in links:
                if self.query.lower() in link.text.lower():
                    item = {
                        'Query': self.query,
                        'Title': link.text,
                        'Content': link.text,
                        'External_url': link.get('href'),
                        'Source': url,
                        'Warning': 'Write-Up',
                        'Raw': raw_file_path
                    }
                    utils.print_debug(self.options, item)
                    result.append(item)

        return result
Exemplo n.º 6
0
 def check_pages(self, soup):
     utils.print_debug(self.options, "Checking for more pages")
     div_pages = soup.find_all('div', 'paging')
     if div_pages:
         pages = []
         links = div_pages[1].find_all('a')
         # print(links)
         for link in links:
             if '(This Page)' not in link.text:
                 pages.append(link.get('href'))
         return pages
     else:
         return False
Exemplo n.º 7
0
    def optimize(self, query):
        url = 'https://www.shodan.io/search/_summary?query={0}'.format(query)
        utils.print_good("Analyze first page for more result")
        r = sender.send_get(self.options, url, self.cookies)
        
        if r.status_code == 200:
            soup = utils.soup(r.text)
        else:
            return False
        
        query_by_cities = []
        # check if query have country filter or not
        if 'country' in query:
            links = soup.find_all("a")
            country = utils.get_country_code(utils.url_decode(query))

            for link in links:
                if 'city' in link.get('href'):
                    item = {
                        'url': link.get('href'),
                        'city': link.text,
                        'country': country
                    }
                    utils.print_debug(self.options, item)
                    query_by_cities.append(item)
        else:
            links = soup.find_all("a")
            countries = []
            for link in links:
                if 'country' in link.get('href'):
                    countries.append(utils.get_country_code(utils.url_decode(link.get('href'))))
            utils.print_debug(self.options, countries)

            for country in countries:
                # seding request again to get city
                country_query = utils.url_encode(' country:"{0}"'.format(country))
                url = 'https://www.shodan.io/search/_summary?query={0}{1}'.format(
                    query, country_query)
                r1 = sender.send_get(self.options, url, self.cookies)
                utils.random_sleep(5, 8)
                utils.print_info(
                    "Sleep for couple seconds because Shodan server is really strict")
                if r1.status_code == 200:
                    soup1 = utils.soup(r1.text)
                    links = soup1.find_all("a")
                    # countries = []
                    for link in links:
                        if 'city' in link.get('href'):
                            # countries.append(utils.get_city_name(
                            #     utils.url_decode(link.get('href'))))
                            item = {
                                'url': link.get('href'),
                                'city': link.text,
                                'country': country
                            }
                            utils.print_debug(self.options, item)
                            query_by_cities.append(item)

        utils.print_debug(self.options, query_by_cities)
        return query_by_cities
Exemplo n.º 8
0
def single_query(options):
    utils.print_debug(options, options)
    utils.print_info("Query: {0}".format(options.get('query')))
    if not options.get('source'):
        utils.print_bad("You need to specify Search engine")
        return

    if 'fofa' in options.get('source'):
        fofa.Fofa(options)

    if 'shodan' in options.get('source'):
        shodan.Shodan(options)

    if 'censys' in options.get('source'):
        censys.Censys(options)
Exemplo n.º 9
0
    def tweet(self, tag):
        results = []
        query = '#{0} #{1}'.format(self.query, tag)
        # @TODO improve by increase the the position
        url = 'https://twitter.com/search?vertical=default&q={0}&src=unkn'.format(
            utils.url_encode(query))
        r = sender.send_get(self.options, url, cookies=None)
        if r.status_code == 200:
            response = r.text

            # store raw json
            raw_file_path = self.options['raw'] + '/tweets_{1}_{0}.html'.format(
                self.query.replace(' ', '_'), tag)
            if self.options.get('store_content'):
                utils.just_write(raw_file_path, response)
                utils.print_debug(
                    self.options,
                    "Writing raw response to: {0}".format(raw_file_path))
            soup = utils.soup(response)

            # Custom here
            divs = soup.find_all('div', 'original-tweet')
            for div in divs:
                content = div.findChildren('p',
                                           'TweetTextSize')[0].text.strip()
                links = [
                    x.get('data-expanded-url') for x in div.findChildren('a')
                    if 't.co' in x.get('href')
                ]
                # print(links)
                if len(links) == 0:
                    external_url = 'N/A'
                else:
                    external_url = '|'.join([str(x) for x in links])

                item = {
                    'Query': self.query,
                    'Title': query,
                    'Content': content,
                    'External_url': external_url,
                    'Source': url,
                    'Warning': 'Tweet',
                    'Raw': raw_file_path
                }
                utils.print_debug(self.options, item)
                results.append(item)

        return results
Exemplo n.º 10
0
    def analyze(self, soup):
        utils.print_debug(self.options, "Analyze response")

        results = []
        vuln_table = soup.find(id="vulnslisttable")
        if vuln_table:
            rows = vuln_table.find_all('tr', 'srrowns')
            full_rows = vuln_table.find_all('td', 'cvesummarylong')

            for i in range(len(rows)):
                item = {
                    'Query': "N/A",
                    'CVE': "N/A",
                    'CVE URL': "N/A",
                    'Type': "N/A",
                    'Score': "N/A",
                    'Condition': "N/A",
                    'Descriptions': "N/A",
                }
                row = rows[i]
                row_detail = row.findChildren('td')
                cve = row_detail[1].a.text
                cve_url = self.baseURL + row_detail[1].a.get('href')
                vuln_type = row_detail[4].text.strip()
                score = row_detail[7].div.text
                condition = row_detail[9].text

                desc = full_rows[i].text.strip()

                item = {
                    'Query': self.query,
                    'CVE': cve,
                    'CVE URL': cve_url,
                    'Type': vuln_type,
                    'Score': score,
                    'Condition': condition,
                    'Descriptions': desc,
                }
                results.append(item)
        
        if results:
            return results
        else:
            return False
Exemplo n.º 11
0
    def check_session(self):
        utils.print_debug(self.options, "Checking session for FoFa")
        sess_url = 'https://fofa.so/user/users/info'
        r = sender.send_get(self.options, sess_url, self.cookies)

        if r.status_code == 302 or '/login' in r.text:
            utils.print_bad(
                "Look like fofa session is invalid. You gonna get very litlle result"
            )
            new_cookie = self.do_login()
            if new_cookie:
                utils.print_good("Reauthen success")
                self.cookies = {"_fofapro_ars_session": new_cookie}

            return False
        elif r.status_code == 200:
            utils.print_good("Getting result as authenticated user")
            return True

        return False
Exemplo n.º 12
0
    def check_session(self):
        utils.print_debug(self.options, "Checking session for Censys")
        sess_url = 'https://censys.io/account'

        r = sender.send_get(self.options, sess_url, self.cookies)

        if r.status_code == 302 or '/login' in r.text:
            utils.print_bad("Look like Censys session is invalid.")
            new_cookie = self.do_login()
            if new_cookie:
                utils.print_good("Reauthen success")
                self.cookies = {"auth_tkt": new_cookie}
                return True

            return False
        elif r.status_code == 200:
            utils.print_good("Getting result as authenticated user")
            return True

        return False
Exemplo n.º 13
0
    def get_num_pages(self, soup):
        # soup = utils.soup(r.text)
        summary_tag = soup.find_all(
            'span', 'SearchResultSectionHeader__statistic')
        if len(summary_tag) == 0:
            return False

        for tag in summary_tag:
            if 'Page:' in tag.text:
                page_num = tag.text.split('Page: ')[1].split(
                    '/')[1].replace(',', '')

            if 'Results:' in tag.text:
                # print(tag.text)
                results_total = tag.text.split('Results: ')[1].replace(',', '')
                utils.print_debug(self.options, results_total)

        utils.print_good("Detect posible {0} pages per {1} result".format(
            page_num.strip(), results_total.strip()))
        return page_num
Exemplo n.º 14
0
    def pages(self, page_num):
        for i in range(2, int(page_num) + 1):
            utils.print_info("Get more result from page: {0}".format(str(i)))

            query = utils.url_encode(
                utils.just_b64_encode(self.options['fofa_query']))
            url = 'https://fofa.so/result?page={0}&qbase64={1}'.format(
                str(i), query)
            utils.print_debug(self.options, url)
            r = sender.send_get(self.options, url, self.cookies)

            if r.status_code == 200:
                response = r.text
                if 'class="error"' in response:
                    utils.print_bad("Reach to the limit at page {0}".format(
                        str(i)))
                    return
                else:
                    soup = utils.soup(response)
                    self.analyze(soup)
Exemplo n.º 15
0
    def pages(self, page_num):
        more_output = []
        for i in range(1, int(page_num) + 1):
            utils.print_debug(self.options, "Sleep for couple seconds")
            utils.random_sleep(1, 3)
            utils.print_info("Get more result from page: {0}".format(str(i)))

            data = {"type": "exploits", "sort": "default",
                    "query": self.query,
                    "title": not self.options.get('relatively'), "offset": i * 10}
            
            r = sender.send_post(
                self.options, self.base_url, data, is_json=True)
            if r.status_code == 200:
                response = json.loads(r.text)
                if self.analyze(response):
                    more_output += self.analyze(response)
                else:
                    return False

        return more_output
Exemplo n.º 16
0
    def analyze(self, response):
        exploits = response.get('exploits')
        utils.print_debug(self.options, len(exploits))
        if len(exploits) == 0:
            utils.print_info(
                "No exploit found for {0}".format(self.query))
            return False

        # store raw json
        raw_file_path = self.options['raw'] + '/sploitus_{0}.json'.format(
            self.query.replace(' ', '_'))
        if self.options.get('store_content'):
            utils.just_write(raw_file_path, response, is_json=True)
            utils.print_debug(self.options, "Writing raw response to: {0}".format(raw_file_path))

        results = []
        for exploit in exploits:
            item = {
                'Query': self.query,
                'Title': exploit.get('title'),
                'Score': str(exploit.get('score')),
                'External_url': exploit.get('href'),
                'CVE': str(utils.get_cve(exploit.get('source'))),
                'ID': exploit.get('id'),
                'Published': exploit.get('published'),
                'Source': self.base_url + 'exploit?id=' + exploit.get('id'),
                'Warning': 'High',
                'Raw': raw_file_path,
            }
            utils.print_debug(self.options, item)
            results.append(item)

        return results
Exemplo n.º 17
0
    def optimize(self, query):
        utils.print_good("Analyze metadata page for more result")

        raw_query = utils.url_decode(query)
        if 'location.country' in raw_query:
            country = utils.get_country_code(raw_query, source='censys')
            query = raw_query.replace(country, '').replace(
                'AND ' + country, '').replace('and ' + country, '')

        url = 'https://censys.io/ipv4/metadata?q={0}'.format(query)
        r = sender.send_get(self.options, url, self.cookies)

        if r.status_code == 200:
            soup = utils.soup(r.text)
        else:
            return False

        query_by_countries = []
        # check if query have country filter or not
        divs = soup.find_all("div", 'left-table')
        country_tables = []
        for div in divs:
            if 'Country Breakdown' in div.h6.text:
                country_tables = div.find_all('tr')

        for row in country_tables:
            item = {
                'url': 'N/A',
                'country': 'N/A'
            }

            tds = row.find('td')
            for td in tds:
                if td.findChildren('a'):
                    item['url'] = self.base_url + td.a.get('href')
                    item['country'] = td.a.text
                query_by_countries.append(item)

        utils.print_debug(self.options, query_by_countries)
        return query_by_countries
Exemplo n.º 18
0
    def analyze(self, soup):
        result = []
        # custom here
        divs = soup.find_all("div", "search-result")
        for div in divs:
            element = {
                'raw_ip': 'N/A',
                'result_title': 'N/A',
                'external_url': 'N/A'
            }

            # getting sumary div
            div_sum = div.find_all("div", "search-result-summary")[0]
            element['raw_ip'] = div_sum.span.text  # ip

            div_detail = div.find_all("div", "ip")[0]
            links = div_detail.find_all("a")
            for link in links:
                if '/host/' in link.get('href'):
                    element['result_title'] = link.text

                if link.get('class') and 'fa-external-link' in link.get('class'):
                    element['external_url'] = link.get('href')

            utils.print_debug(self.options, element)
            result.append(element)

        output = []
        for item in result:
            if item.get('external_url') and item.get('external_url') != 'N/A':
                output.append(item.get('external_url'))
            elif item.get('result_title') and item.get('result_title') != 'N/A':
                output.append(item.get('result_title'))
            elif item.get('raw_ip') and item.get('raw_ip') != 'N/A':
                output.append(item.get('raw_ip'))

        really_data = "\n".join(output)
        print(really_data)
        utils.just_write(self.output, really_data + "\n")
Exemplo n.º 19
0
    def check_session(self):
        utils.print_debug(self.options, "Checking session for ZoomEye")
        sess_url = 'https://www.zoomeye.org/user'

        # get jwt if it was set
        if self.jwt.get('Cube-Authorization'
                        ) and self.jwt.get('Cube-Authorization') != 'None':
            self.headers['Cube-Authorization'] = self.jwt.get(
                'Cube-Authorization')

        r = sender.send_get(self.options, sess_url, headers=self.headers)

        if not r or 'login required' in r.text:
            utils.print_bad("Look like ZoomEye session is invalid.")
            # delete jwt header to prevent getting 401 code
            del self.headers['Cube-Authorization']
            return False
        elif 'uuid' in r.text or 'nickname' in r.text:
            utils.print_good("Getting result as authenticated user")
            return True

        return False
Exemplo n.º 20
0
def module_query(options):
    utils.print_debug(options, options)
    utils.print_info("Query: {0}".format(options.get('target')))

    if 'exploit' in options.get('module'):
        if '|' in options.get('target'):
            options['product'] = options.get('target').split('|')[0].strip()

            if options['relatively']:
                utils.print_info("Running with relative version")
                exact_version = options.get('target').split('|')[1].strip()
                if '.' in exact_version:
                    options['version'] = exact_version.split('.')[0] + "."
            else:
                options['version'] = options.get(
                    'target').split('|')[1].strip()
        else:
            options['product'] = options.get('target')

        sploitus.Sploitus(options)
        vulners.Vulners(options)
        writeups.Writeups(options)
        cvedetails.Cvedetails(options)
Exemplo n.º 21
0
    def analyze(self, json_response):
        result = []
        # custom here
        items = json_response.get('matches')
        if not items:
            return False

        for item in items:
            external_url = item.get('portinfo').get(
                'service') + "://" + item.get('ip') + ":" + str(
                    item.get('portinfo').get('port'))

            element = {
                'raw_ip':
                item.get('ip'),
                'raw_scheme':
                item.get('ip') + ":" + str(item.get('portinfo').get('port')),
                'external_url':
                external_url,
            }

            utils.print_debug(self.options, element)
            result.append(element)

        output = []
        for item in result:
            if item.get('external_url') and item.get('external_url') != 'N/A':
                output.append(item.get('external_url'))
            elif item.get('raw_scheme') and item.get('raw_scheme') != 'N/A':
                output.append(item.get('raw_scheme'))
            elif item.get('raw_ip') and item.get('raw_ip') != 'N/A':
                output.append(item.get('raw_ip'))

        really_data = "\n".join(output)
        print(really_data)
        utils.just_write(self.output, really_data + "\n")