コード例 #1
0
ファイル: check_robots.py プロジェクト: dmknght/NGfuzz
def check(url):
    try:
        browser = mechanicalsoup.StatefulBrowser()

        pathlist, valid = [], []
        for line in str(
                browser.open("%srobots.txt" % (url) if "robots.txt" not in
                             url else url).text).split("\n"):
            lineStr = str(line)
            path = lineStr.split(': /')
            if "Disallow" == path[0]:
                pathlist.append(path[1].replace("\n", "").replace("\r", ""))
                pathlist = list(set(pathlist))
            try:
                inx = pathlist.index("/")
                del pathlist[inx]
            except:
                pass
        if pathlist:
            events.info("%srobots.txt" %
                        (url) if "robots.txt" not in url else url,
                        info="Robots")
            url = url.replace("robots.txt", "") if "robots.txt" in url else url

            for p in pathlist:
                disurl = url + p
                resp = browser.open(disurl)
                if resp.status_code == 200 or resp.status_code < 400:
                    events.sub_info(disurl, info="Found")
                    valid.append(disurl)
    except:
        pass
    finally:
        browser.close()
        return valid
コード例 #2
0
 def check_section(header, name, value):
     try:
         if header[name] == value:
             events.sub_info(header[name], name)
         else:
             events.sub_vuln_low("%s" % (name), "%s" % (header[name]))
     except:
         events.sub_vuln_low(name, "Missing Header")
コード例 #3
0
def cross_origin(header):
    try:
        if header["Access-Control-Allow-Origin"] == "*":
            events.sub_vuln_med(
                "Cross Origin", "Access-Control-Allow-Origin: %s" %
                (header["Access-Control-Allow-Origin"]))
        else:
            events.sub_info("Cross Origin",
                            header["Access-Control-Allow-Origin"])
    except:
        pass
コード例 #4
0
def start(url):
    import mechanicalsoup

    events.info(url, info="Checking")
    domain = cores.get_domain(url)

    try:
        import socket  # , GeoIP
        ip_addr = socket.gethostbyname(domain)
        events.sub_info(ip_addr, info="IP Address")
        # ip_info = GeoIP.GeoIP()
        # events.sub_info("%s" %(ip_info.country_name_by_name(ip_addr)), info =
    except:
        pass
    try:
        browser = mechanicalsoup.StatefulBrowser()
        response = browser.open(url)
        try:
            title = str(browser.get_current_page().title.text.replace(
                "\n", ""))
        except UnicodeEncodeError:
            title = str(
                browser.get_current_page().title.text.encode('utf-8')).replace(
                    "\n", "")
        except Exception as error:
            # events.error(error, "Page title")
            title = "No Title"
        events.info(title, info="Title")

        if response.status_code > 500:
            events.error("Server error: %s" % (response.status_code))
            return False
        elif response.status_code == 404:
            events.error("Link not found: %s" % (response.status_code))
            return False
        elif response.status_code == 403:
            events.error("Forbidden: %s" % (response.status_code))
            return False

        if str(browser.get_url()) != url:
            events.info("%s" % (browser.get_url()), info="MOVED")

        analysis_functions = [
            header_info, header_analysis, http_method, cross_origin
        ]
        for func in analysis_functions:
            func(response.headers)

    except Exception as error:
        events.error(error, "Footprinting")
    finally:
        browser.close()
コード例 #5
0
def http_method(header):
    # (OTG-CONFIG-006)
    try:
        if "PUT" or "DELETE" or "TRACE" or "CONNECT" in (
                header["Access-Control-Allow-Methods"] or header["Allow"]):
            events.sub_vuln_med(
                "HTTP Methods",
                (header["Access-Control-Allow-Methods"] or header["Allow"]))
        else:
            events.sub_info(
                "HTTP Methods",
                (header["Access-Control-Allow-Methods"] or header["Allow"]))
    except:
        pass
コード例 #6
0
def header_info(header):
    # print(header)
    events.success(header["Date"], info="Header")
    try:
        events.sub_info(header["Server"], "Server")
        events.sub_info(header["X-Powered-By"], "X-Powered-By")
        if header["X-Powered-By"] == "ASP.NET":
            events.sub_info(header["X-AspNet-Version"], "X-AspNet-Version")
    except:
        pass
コード例 #7
0
ファイル: spider.py プロジェクト: dmknght/NGfuzz
def spider(url, branch=True):
    media_exts = ('.7z', '.aac', '.aiff', '.au', '.avi', '.bin', '.bmp',
                  '.cab', '.dll', '.dmp', '.ear', '.exe', '.flv', '.gif',
                  '.gz', '.image', '.iso', '.jar', '.jpeg', '.jpg', '.mkv',
                  '.mov', '.mp3', '.mp4', '.mpeg', '.mpg', '.pdf', '.png',
                  '.ps', '.rar', '.scm', '.so', '.tar', '.tar.gz', '.tif',
                  '.war', '.wav', '.wmv', '.zip')
    # TODO Add more extensions to skip
    # from modules.recon import check_robots
    all_urls = {}

    # all_urls = check_robots.check(url) # TODO edit here
    # link = cores.get_params(url).keys()[0]
    # link, params = link.keys()[0], link.values()[0]

    if branch == False:
        scope = cores.check_url(cores.get_domain(url))
    else:
        if url[-1] == "/":
            scope = url
        else:
            scope = cores.check_url("/".join(url.split("/")[2:-1]))
    all_urls.update({
        cores.get_params(scope).keys()[0]:
        cores.get_params(scope).values()[0]
    })
    visited = []
    import mechanicalsoup
    try:
        # TODO Invalid url protocol or DOM url
        browser = mechanicalsoup.StatefulBrowser()
        i = 0
        while all_urls.keys() != visited:
            try:
                spider_url = all_urls.keys()[i]
            except IndexError:
                break
            if spider_url not in visited:
                browser.open(spider_url)
                visited.append(spider_url)
                current_url = browser.get_url()
                if current_url != spider_url:
                    all_urls.update({
                        cores.get_params(current_url).keys()[0]:
                        cores.get_params(current_url).values()[0]
                    })

                # Start get all link in current page
                for link in browser.links():
                    link = cores.get_params(link.attrs['href'])
                    link, params = link.keys()[0], link.values()[0]

                    if link:
                        # DONT OPEN DOC FILES
                        # if link.split(".")[-1] not in media_exts and "?" not in link:
                        if link.endswith(media_exts) and "?" not in link:
                            pass
                        elif link:
                            # Remove "/" in last character
                            # if link[-1] == "/":
                            if link.endswith("/"):
                                last_slash = True
                                link = link[:-1]
                            else:
                                last_slash = False
                            # if link and "://" not in link:
                            if not link.startswith("://"):
                                # if link[:3] == "../":
                                if link.startswith("../"):
                                    # TODO bug in parse url http://192.168.56.103/owaspbricks/config/../login-pages.html,http://192.168.56.103/owaspbricks/config/../config/
                                    # Link with above level
                                    link = "/".join(
                                        spider_url.split("/")
                                        [:-2]) + link.replace("..", "")
                                    link = link + "/" if last_slash else link
                                else:
                                    # Move `/foo/`, `foo/` and `./foo/` to 1 format
                                    # if link[:2] == "./":
                                    if link.startswith("./"):
                                        link = link[2:]
                                    # elif link[:1] == "/":
                                    elif link.startswith("/"):
                                        link = link[1:]
                                    if len(link.split("/")) == 1:
                                        link = "/".join(
                                            spider_url.split("/")
                                            [:-1]) + "/" + link
                                        link = link + "/" if last_slash else link
                                    else:
                                        link = spider_url + link + "/" if last_slash else spider_url + link

                                # If URL is good
                                if link and scope in link and "javascript:__" not in link and "javascript:" not in link and "mailto:" not in link:
                                    # If url is not visited
                                    if link not in all_urls.keys():
                                        link = cores.check_url(link)
                                        resp = browser.open(link)
                                        if resp.status_code < 400:
                                            all_urls.update({link: params})

                                            # Check if current url redirect us to other url with parameter
                                            current_url = browser.get_url()
                                            # If link is redirected
                                            if current_url != link:
                                                all_urls.update(
                                                    cores.get_params(
                                                        current_url))

                                    # Else, update new parameters only
                                    else:  # Check and add params here
                                        if params.keys(
                                        )[0] not in all_urls[link].keys()[0]:
                                            all_urls[link].update(params)
                            else:
                                if scope in link:
                                    resp = browser.open(link)
                                    if resp.status_code < 400:
                                        all_urls.update({link: params})
                                        # Check if current url redirect us to other url with parameter
                                        current_url = browser.get_url()
                                        # If link is redirected
                                        if current_url != link:
                                            all_urls.update(
                                                cores.get_params(current_url))

            i += 1
    except AttributeError:
        pass
    except Exception as error:
        from cores import events
        events.error(error, "Spider")
    finally:
        from cores import events
        events.sub_info("Completed", "Spider")
        try:
            browser.close()
        except:
            pass
        return all_urls