예제 #1
0
def crtshQuery(domain):
    found = list()
    if os.path.isfile(domain + ".sub.crtsh") == False or os.path.getsize(
            domain + ".sub.crtsh") == 0:
        try:
            rawlist = crtshAPI().search(domain)[0]
        except:
            rawlist = list()

        for item in rawlist:
            item = json.loads(item)
            for k, v in item.items():
                if k == "name_value":
                    if '@' in v:
                        continue
                    if '*' in v:
                        continue
                    if checkFqdn(v) == False:
                        continue
                    if '\n' in v:
                        for tok in v.split('\n'):
                            found.append(tok)
                    else:
                        found.append(v)

        found = list(set(found))
        saveFile(domain + ".sub.crtsh", found)
    else:

        temp = readFile(domain + ".sub.crtsh")
        for item in temp:
            if len(item) > 2:
                found.append(item.rstrip("\n"))
    return found
예제 #2
0
def TargetDiscovery(domain,wordlist,skipdiscovery,hostlist):
    print("[*] Subdomain discovery phase")
    ips = list()
    hosts = list()
    if skipdiscovery == False:
        print("  + Running amass")
        execAmass(domain)

        print("  + Running sublist3r")
        execSublist3r(domain)
    
        print("  + Running WayBack machine query")
        wayback_found_list = WayBackMachine(domain)
    
        print("  + Running subfinder (bruteforce mode)")
        execSubfinder(domain,wordlist)

        print("  + Parsing subfinder report")
        subfinder_found_list = parseSubfinder(domain)
        for item in subfinder_found_list:
            hosts.append(item.rstrip("\n"))

        print("  + Parsing WayBack machine report")
        for item in wayback_found_list:
            hosts.append(item.rstrip("\n"))

        print("  + Parsing amass report")
        amass_found_list = parseAmass(domain)
        for item in amass_found_list:
            hosts.append(item.rstrip("\n"))

        print("  + Parsing sublist3r report")
        sublist3r_found_list = parseSublist3r(domain)
        for item in sublist3r_found_list:
            hosts.append(item.rstrip("\n"))

    else:
        hosts = readFile(hostlist)

    #just to make sure...
    uhosts = filterTargetDomainList(list(set(hosts)),domain)

    saveFile(domain + ".hosts", uhosts)
    print("  + Hosts file created: " + domain + ".hosts")

    print("  + Running massdns")
    execMassdns(domain,RESOLVERS)
    print("  + Parsing massdns report")
    massdns_iplist = parseMassdns(domain)
    for nip in massdns_iplist:
        ips.append(nip)

    unique_ips = list(set(ips))
    saveFile(domain + ".ips", unique_ips)
    print("  + IPs file created: " + domain + ".ips")
    print("[*] Done: %d ips and %d hosts discovered" % (len(unique_ips), len(uhosts)))

    return unique_ips,uhosts
예제 #3
0
def parseMassdns(domain):
    iplist = list()
    content = readFile(domain + ".massdns")
    for item in content:
        if " CNAME " in item:  # no cnames, please
            continue
        ip = item.split()[2]
        if isGlobalIpv4(ip):
            iplist.append(ip.rstrip("\n"))
    return iplist
예제 #4
0
def parseAmassStruct(domain):
    a_file = readFile(domain + ".amass")
    aux = list()
    for amass_item in a_file:
        hosts = dict()
        host_amass = amass_item.split(',')[0].rstrip('\n')
        ip_amass = amass_item.split(',')[1].rstrip('\n')

        if validators.ipv4(ip_amass):
            hosts['A'] = host_amass
            hosts['ipv4'] = ip_amass
            aux.append(hosts)
    return aux
예제 #5
0
def parseMassdnsStruct(domain):
    m_file = readFile(domain + ".massdns")
    aux = list()
    for massdns_item in m_file:
        hosts = dict()
        line = massdns_item.replace('. ', ',').replace(' ', ',')
        if line.split(',')[1] == "CNAME":
            continue
        host_massdns = line.split(',')[0].rstrip('\n')
        ip_massdns = line.split(',')[2].rstrip('\n')

        if isGlobalIpv4(ip_massdns):
            hosts['vhost'] = host_massdns
            hosts['ipaddr'] = ip_massdns
            aux.append(hosts)

    return aux
예제 #6
0
def WayBackMachine(domain):
    if os.path.isfile(domain + ".sub.wayback") == False or os.path.getsize(
            domain + ".sub.wayback") == 0:
        url = "http://web.archive.org/cdx/search/cdx?url=*." + domain + "&output=json&fl=original&collapse=urlkey"
        response = requests.get(url)
        jdata = response.json()
        hostnames = list()
        for item in jdata:
            proto, host, port = parseUrlProtoHostPort(item[0])
            if "host" not in host and len(host) > 2:
                hostnames.append(host)
        hostnames = list(set(hostnames))
        saveFile(domain + ".sub.wayback", hostnames)

    else:
        hostnames = list()
        temp = readFile(domain + ".sub.wayback")
        for item in temp:
            if len(item) > 2:
                hostnames.append(item.rstrip("\n"))
    return hostnames
예제 #7
0
def S3Discovery(domain,verbose):
    print("[*] S3 Buckets Discovery phase has started")
    execS3Scanner(domain)
    list_of_buckets = readFile(domain+".buckets")
    print("  + {} buckets found.".format(str(len(list_of_buckets))))
    return True
예제 #8
0
def WebDiscovery(nmapObj, domain, verbose):
    print("[*] Web Discovery phase has started")
    if os.path.isfile(domain+".web") == False or os.path.getsize(domain+".web") == 0:
        webhosts=FindWeb(domain, nmapObj)
        saveFile(domain + ".web", webhosts)
    else:
        webhosts = readFile(domain + ".web")

    print("[*] Web Stack identification via Wappalyzer")
    if os.path.isfile(domain+".wapp") == False or os.path.getsize(domain+".wapp") == 0:
        list_of_webstack = RetrieveWebContent(webhosts)
        list_of_webstack = wappFormat(domain,list_of_webstack)
        totalsize=len(list_of_webstack)
        itemcount=1
        appendFile(domain + ".wapp", '{"data":[')
        for item in list_of_webstack:
            njson = json.dumps(item)
            appendFile(domain + ".wapp", njson)
            if itemcount < totalsize:
                appendFile(domain + ".wapp", ',')
            itemcount+=1
            appendFile(domain + ".web." + str(item['status']) + ".txt", item['url']+"\n")

        appendFile(domain + ".wapp", ']}')
    else:
        list_of_webstack = readFile(domain + ".wapp")

    print("[*] Javascript files identification")
    if os.path.isfile(domain+".js.allfiles") == False or os.path.getsize(domain+".js.allfiles") == 0:
        if verbose > 0:
            print("  + Compiling a list of all js references found")
        list_of_js_files_all=list()
        all_lists_of_all_js_of_all_nodes=list()
        list_of_webstack = readFile(domain+".wapp")
        for rawdata in list_of_webstack:
            jdata = json.loads(rawdata)['data']
            for jnode in jdata:
                for js in normalize_jsfiles(jnode['url'],jnode['js']):
                    list_of_js_files_all.append(js)
                    if verbose > 0:
                        print("  + Found js file: {}".format(str(js)))
                    appendFile(domain + ".js.allfiles", js+"\n")
        if verbose>0:
            if len(list_of_js_files_all)==0:
                print("  + Could not find any js files on target hosts")
        #list_of_js_files_all = readFile(domain + ".js.allfiles")
        list_of_jsdirs_uri = GetJsCommonDirectoriesURI(domain,list_of_js_files_all)
        list_of_js_dir = list()

        for js_dir_uri_item in list_of_jsdirs_uri:
            js_dir_path = getUrlPath(js_dir_uri_item).replace("//","/")
            if verbose > 0:
                print("  + Found js common directory: {}".format(str(js_dir_path)))
            list_of_js_dir.append(js_dir_path)

        list_of_js_dir = list(set(list_of_js_dir))

            # js_dir_uri holds the full uri of directories with js files:
            # http://target/dir1/dir2/js/
            # 
            # list_of_js_dir holds the path portion of that uri:
            # /dir1/dir2/js/
        for jsdir in list_of_js_dir:
            appendFile(domain + ".js.dirs", jsdir +"\n")
        
        for jsdiruri in list_of_jsdirs_uri:
            appendFile(domain + ".js.dirsuri", jsdiruri  +"\n")

        ###
        ###
        if len(list_of_js_files_all)>0: 
            print("[*] Extracting more endpoints from js files via LinkFinder")
            if os.path.isfile(domain+".js.endpoints") == False or os.path.getsize(domain+".js.endpoints") == 0:
                all_js_files = list(set(readFile(domain+".js.allfiles")))
                all_endpoints_found_inside_js = ExtractJsLinks(domain,all_js_files)
                jsondata = json.dumps(all_endpoints_found_inside_js)
                print("[*] Generating endpoints json file: {}".format(str(domain+".js.endpoints")))
                appendFile(domain+".js.endpoints",jsondata)
            else:
                print("[*] Skipping: " + domain+".js.endpoints found")
    else:
        print("[*] Skipping: " + domain+".js.allfiles found")

    return webhosts,list_of_webstack
예제 #9
0
    ips,hosts = TargetDiscovery(user_domain,user_subdomain_wordlist,user_skipdiscover,user_sublist)

    if len(ips) == 0:
        user_noscan = True
    else: 
        user_noscan = False

    if not user_noscan:
        print("[*] Port Scanning phase started")
        if os.path.isfile(user_domain + ".nmap.xml") == False or os.path.getsize(user_domain + ".nmap.xml") == 0:
            print("  + Running masscan against %s targets" % str(len(ips)))
            out,err=execMasscan(user_domain, ports)
            if "You don't have permission" in err:
                sys.exit("[x] You don't have permission to run portscan. You must run as root.")
            oports = readFile(user_domain + ".masscan")
            if len(oports) > 0:
                print("  + Running nmap fingerprinting and scripts")
                execMton(user_domain)
            else:
                print("[x] No open ports found for this domain. Aborting...")
                sys.exit(1)
        else:
            print("  + Nmap report found, loading data...")
        nmapObj = nmap_LoadXmlObject(user_domain + ".nmap.xml")

    if nmapObj:
        list_of_webservers_found, list_of_webstack = WebDiscovery(nmapObj, user_domain, user_verbose)

    else:
        print("[*] Web discovery skipped (no open ports found)")