Ejemplo n.º 1
0
def creditcards(web):

    print(GR+' [*] Initiating module...')
    time.sleep(0.5)
    #print(R+'\n     ========================')
    #print(R+'      CREDIT CARD DISCLOSURE')
    #print(R+'     ========================\n')
    from core.methods.print import pleak
    pleak("Credit card disclosure")
    credit0x00(web)
Ejemplo n.º 2
0
def errors(web):

    #print(R+'\n       =========================')
    #print(R+'        E R R O R   H U N T E R ')
    #print(R+'       =========================')
    from core.methods.print import pleak
    pleak("error hunter")
    print(O + '  [This module covers up Full Path Disclosures]\n')
    print(GR + ' [*] Making the request...')
    time.sleep(0.5)
    request(web)
Ejemplo n.º 3
0
def errors(web):
    global module, lvl1, lvl3
    module = "ReconANDOSINT"
    lvl1 = "Information Disclosure"
    lvl3 = ""
    lvl2 = inspect.stack()[0][3]
    from core.methods.print import pleak
    pleak("error hunter")
    print(C+'  [This module covers up Full Path Disclosures]\n')
    print(GR+' [*] Making the request...')
    time.sleep(0.5)
    request(web, lvl2)
Ejemplo n.º 4
0
def ssn0x00(url):
    requests = session()
    #print(R+'\n    =================================')
    #print(R+'     SOCIAL SECURITY INFO DISCLOSURE')
    #print(R+'    =================================\n')
    from core.methods.print import pleak
    pleak("social security info disclosure")
    time.sleep(0.5)
    links = [url]
    po = url.split('//')[1]
    for w in links:
        print(O + ' [*] Scraping Page: ' + C + color.TR3 + C * G + url + C +
              color.TR2 + C)
        req = requests.get(w).text
        check0x00(req, name)

    soup = BeautifulSoup(req, 'lxml')
    for line in soup.find_all('a', href=True):
        newline = line['href']
        try:
            if newline[:4] == "http":
                if po in newline:
                    urls.append(str(newline))
            elif newline[:1] == "/":
                combline = url + newline
                urls.append(str(combline))
        except:
            print(R + ' [-] Unhandled Exception Occured!')

    try:
        for uurl in urls:
            print("\n" + O + " [+] Scraping Page: " + C + color.TR3 + C + G +
                  uurl + C + color.TR2 + C)
            req = requests.get(uurl).text
            check0x00(req, name)

    except:
        print(R + ' [-] Outbound Query Exception...')

    if found == 0x00:
        print(
            R +
            '\n [-] No Social Security Numbers found disclosed in plaintext in source code!\n'
        )
        save_data(
            database, module, lvl1, lvl2, lvl3, name,
            "No Social Security Numbers found disclosed in plaintext in source code."
        )

    print(G + ' [+] Scraping Done!' + C + color.TR2 + C)
Ejemplo n.º 5
0
def creditcards(web):
    lvl2 = inspect.stack()[0][3]
    global module
    module = "ReconANDOSINT"
    global lvl1
    lvl1 = "Information Disclosure"
    global lvl3
    lvl3 = ""
    time.sleep(0.5)
    #print(R+'\n     ========================')
    #print(R+'      CREDIT CARD DISCLOSURE')
    #print(R+'     ========================\n')
    from core.methods.print import pleak
    pleak("Credit card disclosure")
    credit0x00(web, lvl2)
Ejemplo n.º 6
0
def mail0x00(url, lvl2):
    name = targetname(url)
    requests = session()
    #print(R+'\n    ======================')
    #print(R+'     EMAIl INFO HARVESTER')
    #print(R+'    ======================\n')
    from core.methods.print import pleak
    pleak("email info harvester")
    time.sleep(0.5)
    links = [url]
    po = url.split('//')[1]
    for w in links:
        print(O + ' [*] Scraping Page:' + C + color.TR3 + C + G + url + C +
              color.TR2 + C)
        req = requests.get(w).text
        check0x00(req, lvl2, name)

    soup = BeautifulSoup(req, 'lxml')
    for line in soup.find_all('a', href=True):
        newline = line['href']
        try:
            if newline[:4] == "http":
                if po in newline:
                    urls.append(str(newline))
            elif newline[:1] == "/":
                combline = url + newline
                urls.append(str(combline))
        except:
            print(R + ' [-] Unhandled Exception Occured!')

    try:
        for uurl in urls:
            print("\n" + O + " [+] Scraping Page:" + C + color.TR3 + C + G +
                  uurl + C + color.TR2 + C)
            req = requests.get(uurl).text
            check0x00(req, lvl2, name)

    except:
        print(R + ' [-] Outbound Query Exception...')

    if found == 0x00:
        print(
            R +
            '\n [-] No Emails found disclosed in plaintext in source code!\n')
        save_data(database, module, lvl1, lvl2, lvl3, name,
                  "No emails found disclosed in plaintext in source code")

    print(G + ' [+] Scraping Done!' + C + color.TR2 + C)
Ejemplo n.º 7
0
def internalip0x00(url):
    requests = session()
    #print(R+'\n    ========================')
    #print(R+'     INTERNAL IP DISCLOSURE')
    #print(R+'    ========================\n')
    from core.methods.print import pleak
    pleak("internal ip disclosure")
    time.sleep(0.5)
    links = [url]
    po = url.split('//')[1]
    for w in links:
        print(O + ' [*] Scraping Page: ' + C + color.TR3 + C + G + url + C +
              color.TR2 + C)
        req = requests.get(w).text
        check0x00(req)

    soup = BeautifulSoup(req, 'lxml')
    for line in soup.find_all('a', href=True):
        newline = line['href']
        try:
            if newline[:4] == "http":
                if po in newline:
                    urls.append(str(newline))
            elif newline[:1] == "/":
                combline = url + newline
                urls.append(str(combline))
        except:
            print(R + ' [-] Unhandled Exception Occured!')

    try:
        for uurl in urls:
            print("\n" + O + " [+] Scraping Page: " + C + color.TR3 + C * G +
                  uurl + C + color.TR2 + C)
            req = requests.get(uurl).text
            check0x00(req)

    except:
        print(R + ' [-] Outbound Query Exception...')

    if found == 0x00:
        print(
            R +
            '\n [-] No Internal IPs found disclosed in plaintext in source code!\n'
        )

    print(G + ' [+] Scraping Done!' + C + color.TR2 + C)
Ejemplo n.º 8
0
def phone0x00(url):

    #print(R+'\n    ========================')
    #print(R+'     PHON3 NuMBER HARVESTER')
    #print(R+'    ========================\n')
    from core.methods.print import pleak
    pleak("phone number harvester")
    time.sleep(0.5)
    links = [url]
    po = url.split('//')[1]
    for w in links:
        print(GR + ' [*] Scraping Page: ' + O + url)
        req = requests.get(w).text
        check0x00(req)

    soup = BeautifulSoup(req, 'lxml')
    for line in soup.find_all('a', href=True):
        newline = line['href']
        try:
            if newline[:4] == "http":
                if po in newline:
                    urls.append(str(newline))
            elif newline[:1] == "/":
                combline = url + newline
                urls.append(str(combline))
        except:
            print(R + ' [-] Unhandled Exception Occured!')

    try:
        for uurl in urls:
            print(G + "\n [+] Scraping Page: " + O + uurl)
            req = requests.get(uurl).text
            check0x00(req)

    except:
        print(R + ' [-] Outbound Query Exception...')

    if found == 0x00:
        print(
            R +
            ' [-] No Phone Numbers found disclosed in plaintext in Source Code!\n'
        )

    print(G + ' [+] Scraping Done!')