Пример #1
0
def header():
    MAYOR_VERSION = 1
    MINOR_VERSION = 1
    REVISION = 1
    VERSION = {
	"MAYOR_VERSION": MAYOR_VERSION,
	"MINOR_VERSION": MINOR_VERSION,
	"REVISION": REVISION
    }

    PROGRAM_BANNER = open(FileUtils.buildPath('banner.txt')).read().format(**VERSION)
    message = Style.BRIGHT + Fore.RED + PROGRAM_BANNER + Style.RESET_ALL
    write(message)
Пример #2
0
def header():
    MAYOR_VERSION = 1
    MINOR_VERSION = 5
    REVISION = 0
    VERSION = {
        "MAYOR_VERSION": MAYOR_VERSION,
        "MINOR_VERSION": MINOR_VERSION,
        "REVISION": REVISION
    }

    PROGRAM_BANNER = open(FileUtils.buildPath("banner.txt")).read().format(**VERSION)
    message = Style.BRIGHT + Fore.MAGENTA + PROGRAM_BANNER + Style.RESET_ALL
    write(message)
Пример #3
0
def header():
    MAYOR_VERSION = 1
    MINOR_VERSION = 0
    REVISION = 0
    VERSION = {
        "MAYOR_VERSION": MAYOR_VERSION,
        "MINOR_VERSION": MINOR_VERSION,
        "REVISION": REVISION
    }

    PROGRAM_BANNER = open(FileUtils.buildPath("banner.txt")).read().format(**VERSION)
    message = Style.BRIGHT + Fore.CYAN + PROGRAM_BANNER + Style.RESET_ALL
    write(message)
    __version__ = '1.0'
Пример #4
0
def crowl(dirs, url, args):

    # args strings
    domain = args.url
    wlist = args.wordlist
    delay = args.delay
    random_agent = args.randomAgent
    auth_type = args.authType.lower() if args.authType is not None else ""
    auth_cred = "".join(
        args.authCred).rsplit(':') if args.authCred is not None else ""
    proxy = "".join(args.proxy) if args.proxy is not None else None

    # init count valid url
    count = 0

    # get domain
    extracted = tldextract.extract(url)
    domain = "{}.{}".format(extracted.domain, extracted.suffix)

    if not os.path.exists("reports"):
        os.makedirs("reports")
    logfile = open("reports/" + domain + "_logs.txt", "w+")

    # init user agent
    if random_agent == True:
        ua = UserAgent()

    # init default user agent
    headers = {'User-Agent': 'CrawlBox'}

    # init default proxy
    proxies = {"http": proxy, "https": proxy}

    for dir in dirs:

        dir = dir.replace("\n", "")
        dir = "%s" % (dir)

        res = ""
        save = 0
        f_url = url + "/" + dir

        # add cookie header

        if random_agent == True:
            headers = {'User-Agent': ua.random}

        # make request with different type of authentication
        if auth_type == "basic":
            try:
                ress = requests.get(f_url,
                                    headers=headers,
                                    auth=HTTPBasicAuth(auth_cred[0],
                                                       auth_cred[1]),
                                    allow_redirects=False,
                                    proxies=proxies,
                                    verify=False)
            except requests.exceptions.ProxyError:
                exit(write("Check your proxy please! "))

        elif auth_type == "digest":
            try:
                ress = requests.get(f_url,
                                    headers=headers,
                                    auth=HTTPDigestAuth(
                                        auth_cred[0], auth_cred[1]),
                                    allow_redirects=False,
                                    proxies=proxies,
                                    verify=False)
            except requests.exceptions.ProxyError:
                exit(write("Check your proxy please! "))

        elif auth_type == "ntlm":
            try:
                ress = requests.get(f_url,
                                    headers=headers,
                                    auth=HttpNtlmAuth(auth_cred[0],
                                                      auth_cred[1]),
                                    allow_redirects=False,
                                    proxies=proxies,
                                    verify=False)
            except requests.exceptions.ProxyError:
                exit(write("Check your proxy please! "))

        else:
            try:
                ress = requests.get(f_url,
                                    headers=headers,
                                    allow_redirects=False,
                                    proxies=proxies,
                                    verify=False)
            except requests.exceptions.ProxyError:
                exit(write("Check your proxy please! "))

        response = ress.status_code

        # size
        try:
            if (ress.headers['content-length'] is not None):
                size = int(ress.headers['content-length'])
            else:
                size = 0

        except (KeyError, ValueError, TypeError):
            size = len(ress.content)
        finally:
            f_size = FileUtils.sizeHuman(size)

        # check reponse
        if (response == 200 or response == 302 or response == 304):
            res = "[+] %s - %s : HTTP %s Found" % (f_url, f_size, response)
            res = Fore.GREEN + res + Style.RESET_ALL
            save = 1
            count += 1
        elif (response == 401):
            res = "[-] %s - %s : HTTP %s : Unauthorized" % (f_url, f_size,
                                                            response)
            res = message = Fore.YELLOW + res + Style.RESET_ALL
        elif (response == 403):
            res = "[-] %s - %s : HTTP %s : Needs authorization" % (
                f_url, f_size, response)
            res = Fore.BLUE + res + Style.RESET_ALL
        elif (response == 404):
            res = "[-] %s - %s : HTTP %s : Not Found" % (f_url, f_size,
                                                         response)
        elif (response == 405):
            res = "[-] %s - %s : HTTP %s : Method Not Allowed" % (
                f_url, f_size, response)
        elif (response == 406):
            res = "[-] %s - %s : HTTP %s : Not Acceptable" % (f_url, f_size,
                                                              response)
        else:
            res = "[-] %s - %s : HTTP %s : Unknown response" % (f_url, f_size,
                                                                response)

        # print result
        if response != "":
            write(res)

        # save founded url log
        if save == 1:
            found = url + dir
            logfile.writelines(found + "\n")

        if delay > 0:
            time.sleep(float(delay))
            print "Sleeping for %s seconds" % str(delay)

    write("\n\n[+]Found : %s directory" % (count))
    logfile.close()
Пример #5
0
def crowl(dirs, url, args):

    # args strings
    domain = args.url
    wlist = args.wordlist
    delay = args.delay
    random_agent = args.randomAgent
    auth_type = args.authType.lower() if args.authType is not None else ""
    auth_cred = "".join(args.authCred).rsplit(':') if args.authCred is not None else ""
    proxy = "".join(args.proxy) if args.proxy is not None else None

    # init count valid url
    count = 0

    # get domain
    extracted = tldextract.extract(url)
    domain = "{}.{}".format(extracted.domain, extracted.suffix)

    if not os.path.exists("reports"):
        os.makedirs("reports")
    logfile = open("reports/" + domain + "_logs.txt", "w+")

    # init user agent
    if random_agent == True:
        ua = UserAgent()
     
    # init default user agent    
    headers = { 'User-Agent':  'CyberCrowl' }
    
    # init default proxy 
    proxies = {"http": proxy,"https": proxy}
    
    for dir in dirs:

        dir = dir.replace("\n", "")
        dir = "%s" % (dir)

        res = ""
        save = 0
        if url.endswith('/'):
            f_url  = url + dir
        else:
            f_url  = url + "/" + dir
        
        # add cookie header
        
        if random_agent == True:
            headers = { 'User-Agent':  ua.random }
                        
        
        # make request with different type of authentication
        if auth_type == "basic":
            try:
                ress = requests.get(f_url, headers=headers ,auth=HTTPBasicAuth(auth_cred[0], auth_cred[1]),allow_redirects=False, proxies=proxies, verify=False)
            except requests.exceptions.ConnectionError:
                exit(write("Error Connecting!"))
            except requests.exceptions.ProxyError:
                exit(write("Check your proxy please! "))
                
        elif auth_type == "digest":
            try:
                ress = requests.get(f_url, headers=headers ,auth=HTTPDigestAuth(auth_cred[0], auth_cred[1]),allow_redirects=False, proxies=proxies, verify=False)
            except requests.exceptions.ConnectionError:
                exit(write("Error Connecting!"))
            except requests.exceptions.ProxyError:
                exit(write("Check your proxy please! "))
                
        elif auth_type == "ntlm":
            try:
                ress = requests.get(f_url, headers=headers ,auth=HttpNtlmAuth(auth_cred[0], auth_cred[1]),allow_redirects=False, proxies=proxies, verify=False)
            except requests.exceptions.ConnectionError:
                exit(write("Error Connecting!"))
            except requests.exceptions.ProxyError:
                exit(write("Check your proxy please! "))

        else:
            try:
                ress = requests.get(f_url, headers=headers ,allow_redirects=False,verify=False)
            except requests.exceptions.ConnectionError:
                exit(write("Error Connecting!"))
            except requests.exceptions.ProxyError:
                exit(write("Check your proxy please! "))
                
        response = ress.status_code

        # size
        try:
            if (ress.headers['content-length'] is not None):
                size = int(ress.headers['content-length'])
            else:
                size = 0 
                
        except (KeyError, ValueError, TypeError):
            size = len(ress.content)
        finally:
            f_size = FileUtils.sizeHuman(size)

        # check reponse
        if (response == 200 or response == 302 or response == 304):
            res = "[+] %s - %s : HTTP %s Found" % (f_url, f_size, response)
            res = Fore.GREEN + res + Style.RESET_ALL
            save = 1
            count += 1
        elif (response == 401):
            res = "[-] %s - %s : HTTP %s : Unauthorized" % (f_url, f_size, response)
            res = message = Fore.YELLOW + res + Style.RESET_ALL
        elif (response == 403):
            res = "[-] %s - %s : HTTP %s : Needs authorization" % (f_url, f_size, response)
            res = Fore.BLUE + res + Style.RESET_ALL
        elif (response == 404):
            res = "[-] %s - %s : HTTP %s : Not Found" % (f_url, f_size, response)
        elif (response == 405):
            res = "[-] %s - %s : HTTP %s : Method Not Allowed" % (f_url, f_size, response)
        elif (response == 406):
            res = "[-] %s - %s : HTTP %s : Not Acceptable" % (f_url, f_size, response)
        else :
            res = "[-] %s - %s : HTTP %s : Unknown response" % (f_url, f_size, response)


        # print result
        if response != "":
            write(res)

        # save founded url log
        if save == 1:
            found = url + dir
            logfile.writelines(found + "\n")

        if delay > 0:
            time.sleep(float(delay))
            print "Sleeping for %s seconds" % str(delay)

    write("\n\n[+]Found : %s directory" % (count))
    logfile.close()
Пример #6
0
def crowl(dirs, url, delay):
    count = 0

    # get domain
    extracted = tldextract.extract(url)
    domain = "{}.{}".format(extracted.domain, extracted.suffix)
    if domain.startswith('localhost') or domain.startswith('127.0.0.1'):
        domain = domain.replace(".", "")

    logfile = open(domain + "_logs.txt", "w")

    for d in dirs:

        d = d.replace("\n", "")
        d = "%s" % (d)

        res = ""
        save = 0

        # split url
        url_s = url
        path = '/'
        if url.find('/') != -1:
            url_s = url.rsplit('/', 1)[0]
            path += url.rsplit('/', 1)[1]
        conn = httplib.HTTPConnection(url_s)
        conn.request("GET", path + d)
        ress = conn.getresponse()
        response = ress.status

        # size
        try:
            if (ress.getheader('content-length') is not None):
                size = int(ress.getheader('content-length'))
            else:
                size = 0
        except (KeyError, ValueError, TypeError):
            size = len(ress.body)
        finally:
            f_size = FileUtils.sizeHuman(size)

        # check reponse
        if (response == 200 or response == 302 or response == 304):
            res = "[+] %s - %s : HTTP %s Found" % (url_s + path + d, f_size,
                                                   response)
            res = Fore.GREEN + res + Style.RESET_ALL
            save = 1
            count += 1
        if (response == 401):
            res = "[-] %s - %s : HTTP %s : Unauthorized" % (url_s + path + d,
                                                            f_size, response)
            res = message = Fore.YELLOW + res + Style.RESET_ALL
        if (response == 403):
            res = "[-] %s - %s : HTTP %s : Needs authorization" % (
                url_s + path + d, f_size, response)
            res = Fore.BLUE + res + Style.RESET_ALL
        if (response == 404):
            res = "[-] %s - %s : HTTP %s : Not Found" % (url_s + path + d,
                                                         f_size, response)

        # print result
        if response != "":
            write(res)

        # save founded url log
        if save == 1:
            found = url + '/' + d
            logfile.writelines(found + "\n")

        if delay > 0:
            time.sleep(float(delay))
            print "Sleeping for %s seconds" % str(delay)

    write("\n\n[+]Found : %s directory" % (count))
    logfile.close()
Пример #7
0
def crowl(dirs, url, delay):
    count = 0

    # get domain
    extracted = tldextract.extract(url)
    domain = "{}.{}".format(extracted.domain, extracted.suffix)

    if not os.path.exists("reports"):
        os.makedirs("reports")
    logfile = open("reports/" + domain + "_logs.txt", "w+")

    for d in dirs:

        d = d.replace("\n", "")
        d = "%s" % (d)

        res = ""
        save = 0
        f_url = url + "/" + d
        ress = requests.get(f_url, allow_redirects=False)
        response = ress.status_code

        # size
        try:
            if (ress.headers['content-length'] is not None):
                size = int(ress.headers['content-length'])
            else:
                size = 0
        except (KeyError, ValueError, TypeError):
            size = len(ress.content)
        finally:
            f_size = FileUtils.sizeHuman(size)

        # check reponse
        if (response == 200 or response == 302 or response == 304):
            res = "[+] %s - %s : HTTP %s Found" % (f_url, f_size, response)
            res = Fore.GREEN + res + Style.RESET_ALL
            save = 1
            count += 1
        elif (response == 401):
            res = "[-] %s - %s : HTTP %s : Unauthorized" % (f_url, f_size,
                                                            response)
            res = message = Fore.YELLOW + res + Style.RESET_ALL
        elif (response == 403):
            res = "[-] %s - %s : HTTP %s : Needs authorization" % (
                f_url, f_size, response)
            res = Fore.BLUE + res + Style.RESET_ALL
        elif (response == 404):
            res = "[-] %s - %s : HTTP %s : Not Found" % (f_url, f_size,
                                                         response)
        elif (response == 405):
            res = "[-] %s - %s : HTTP %s : Method Not Allowed" % (
                f_url, f_size, response)
        elif (response == 406):
            res = "[-] %s - %s : HTTP %s : Not Acceptable" % (f_url, f_size,
                                                              response)
        else:
            res = "[-] %s - %s : HTTP %s : Unknown response" % (f_url, f_size,
                                                                response)

        # print result
        if response != "":
            write(res)

        # save founded url log
        if save == 1:
            found = url + d
            logfile.writelines(found + "\n")

        if delay > 0:
            time.sleep(float(delay))
            print "Sleeping for %s seconds" % str(delay)

    write("\n\n[+]Found : %s directory" % (count))
    logfile.close()
Пример #8
0
def crowl(dirs, url, delay):
    count = 0

    # get domain
    extracted = tldextract.extract(url)
    domain = "{}.{}".format(extracted.domain, extracted.suffix)

    if not os.path.exists("reports"):
        os.makedirs("reports")
    logfile = open("reports/" + domain + "_logs.txt", "w+")

    for d in dirs:

        d = d.replace("\n", "")
        d = "%s" % (d)

        res = ""
        save = 0
        f_url  = url + "/" + d
        ress = requests.get(f_url, allow_redirects=False)
        response = ress.status_code

        # size
        try:
            if (ress.headers['content-length'] is not None):
                size = int(ress.headers['content-length'])
            else:
                size = 0 
        except (KeyError, ValueError, TypeError):
            size = len(ress.content)
        finally:
            f_size = FileUtils.sizeHuman(size)

        # check reponse
        if (response == 200 or response == 302 or response == 304):
            res = "[+] %s - %s : HTTP %s Found" % (f_url, f_size, response)
            res = Fore.GREEN + res + Style.RESET_ALL
            save = 1
            count += 1
        elif (response == 401):
            res = "[-] %s - %s : HTTP %s : Unauthorized" % (f_url, f_size, response)
            res = message = Fore.YELLOW + res + Style.RESET_ALL
        elif (response == 403):
            res = "[-] %s - %s : HTTP %s : Needs authorization" % (f_url, f_size, response)
            res = Fore.BLUE + res + Style.RESET_ALL
        elif (response == 404):
            res = "[-] %s - %s : HTTP %s : Not Found" % (f_url, f_size, response)
        elif (response == 405):
            res = "[-] %s - %s : HTTP %s : Method Not Allowed" % (f_url, f_size, response)
        elif (response == 406):
            res = "[-] %s - %s : HTTP %s : Not Acceptable" % (f_url, f_size, response)
        else :
            res = "[-] %s - %s : HTTP %s : Unknown response" % (f_url, f_size, response)


        # print result
        if response != "":
            write(res)

        # save founded url log
        if save == 1:
            found = url + d
            logfile.writelines(found + "\n")

        if delay > 0:
            time.sleep(float(delay))
            print "Sleeping for %s seconds" % str(delay)

    write("\n\n[+]Found : %s directory" % (count))
    logfile.close()
Пример #9
0
def crowl(dirs, url):
    count = 0

    #get domain
    extracted = tldextract.extract(url)
    domain = "{}.{}".format(extracted.domain, extracted.suffix)
    if domain.startswith('localhost') or domain.startswith('127.0.0.1'):
        domain = domain.replace(".", "")

    logfile = open(domain+"_logs.txt", "w")

    for d in dirs:

        d = d.replace("\n", "")
        d = "%s" % (d)

        res = ""
        save = 0

        # split url
        url_s = url
        path = '/'
        if url.find('/') != -1:
            url_s = url.rsplit('/', 1)[0]
            path += url.rsplit('/', 1)[1]
        conn = httplib.HTTPConnection(url_s)
        conn.request("GET", path+d)
        ress = conn.getresponse()
        response = ress.status

        #size
        try:
            size = int(ress.getheader('content-length'))
        except (KeyError, ValueError):
            size = len(ress.body)
        finally:
            f_size = FileUtils.sizeHuman(size)

        #check reponse
        if (response == 200 or response == 302 or response == 304):
          res = "[+] %s - %s : HTTP %s Found" % (url_s+path+d,f_size,response)
          res = Fore.GREEN + res + Style.RESET_ALL
          save = 1
          count +=1
        if (response == 401):
          res = "[-] %s - %s : HTTP %s : Unauthorized" % (url_s+path+d,f_size,response)
          res = message = Fore.YELLOW + res + Style.RESET_ALL
        if (response == 403):
          res = "[-] %s - %s : HTTP %s : Needs authorization" % (url_s+path+d,f_size,response)
          res = Fore.BLUE + res + Style.RESET_ALL
        if (response == 404):
          res = "[-] %s - %s : HTTP %s : Not Found" % (url_s+path+d,f_size,response)

        #print result
        if response != "":
          write(res)

        #save founded url log
        if save == 1:
            found = url+d
            logfile.writelines(found + "\n")

    write("\n\n[+]Found : %s directory" % (count))
    logfile.close()