Ejemplo n.º 1
0
def action_netcraft(domain, myResolver):
    info('NetCradt Search Started')
    netcraft_list = []
    print "\nPassive Gatherings From NetCraft\n"
    try:
        link = "http://searchdns.netcraft.com/?restriction=site+contains&host=*.{}&lookup=wait..&position=limited" .format (domain)
        response = requests.get(link)
        soup = BeautifulSoup(response.content, 'lxml')
        pattern = 'rel="nofollow">([a-z\.\-A-Z0-9]+)<FONT COLOR="#ff0000">'
        sub_results = re.findall(pattern, response.content)

    except Exception:
        error('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + ERROR_LOG_FILE, exc_info=True)

    if sub_results:
        for item in sub_results:
            netcheck = myResolver.query(item + '.' + domain)
            for data in netcheck:
                netcraft_list.append(item + '.' + domain + ' ' + str(data))
                print colored(item + '.' + domain, 'red')
    else:
        print '\tNo Results Found'

    info('NetCraft Completed')
    return netcraft_list
Ejemplo n.º 2
0
def action_emailHunter(domain, api, useragent_f, q, prox):
    info('Email Hunter Search Started')
    emails = []
    uas = get_user_agents(useragent_f)
    ua = random.choice(uas)
    link = 'https://api.emailhunter.co/v1/search?domain={0}&api_key={1}'.format(domain,api)

    if prox == True:
                proxy = {'http' : 'http://127.0.0.1:8080'}
    else:
        pass
    try:
        headers = {"Connection" : "close",
                   "User-Agent" : ua,
                   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                   'Accept-Language': 'en-US,en;q=0.5',
                   'Accept-Encoding': 'gzip, deflate'}
        if prox == True:
            response = requests.get(link, headers=headers, proxies=proxy)
        else:
            response = requests.get(link, headers=headers)

        json_data = response.json()
        for value in json_data['emails']:
            for domain in value['sources']:
                url = str(domain['uri']).replace("u'","")
                email =  str(value['value']).replace("u'","")
                emails.append((email,url))
    except ValueError:
        pass
    except Exception:
        error('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + ERROR_LOG_FILE, exc_info=True)

    info('Email Hunter Search Completed')
    q.put(sorted(emails))
Ejemplo n.º 3
0
def action_bing_true(domain, q, user_agents, prox):
    info('Bing Search Started')
    emails = []
    uas = user_agents
    searchfor = '@' + '"' + domain + '"'
    for start in range(0,30):
        ua = random.choice(uas)
        if prox == True:
            proxy = {'http' : 'http://127.0.0.1:8080'}
        else:
            pass
        try:
            headers = {"Connection" : "close",
                       "User-Agent" : ua,
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                       'Accept-Language': 'en-US,en;q=0.5',
                       'Accept-Encoding': 'gzip, deflate'}
            payload = { 'q': searchfor, 'first': start}
            link = 'https://www.bing.com/search'
            if prox == True:
                response = requests.get(link, headers=headers, params=payload, proxies=proxy, verify=False)
            else:
                response = requests.get(link, headers=headers, params=payload, verify=False)
            reg_emails = re.compile('[a-zA-Z0-9.-]*' + '@' + '<strong>')
            temp = reg_emails.findall(response.text)
            time.sleep(1)
            for item in temp:
                clean = item.replace("<strong>", "")
                email.append(clean + domain)

        except Exception:
            continue
    info('Bing Search Completed')
    q.put(sorted(emails))
Ejemplo n.º 4
0
def action_brute_wild(sub_list, domain, myResolver):
    info('Bruting Wild Card SubDomains')
    target_results = []
    random_addrs = []
    for i in range(0, 10, 1):
        one = ''.join(
            random.choice(string.ascii_uppercase + string.digits)
            for _ in range(15))
        myAnswers = myResolver.query(str(one) + '.' + str(domain))
        name = myAnswers.canonical_name
        random_addr = socket.gethostbyname(str(name))
        random_addrs.append(random_addr)
    random_addrs = sorted(set(random_addrs))
    for host in sub_list:
        try:
            host_host, host_addr = host.split(' ')
            if host_addr in random_addrs:
                pass
            else:
                target_results.append(host)
        except dns.resolver.NoNameservers:
            pass
        except dns.resolver.NoAnswer:
            pass
        except dns.resolver.NXDOMAIN:
            pass
        except dns.name.EmptyLabel:
            pass
        except Exception:
            continue
    info('Completed Bruting Wild Card SubDomains')
    return target_results
Ejemplo n.º 5
0
def action_brute_wild(sub_list, domain):
    info('Bruting Wild Card SubDomains\n')
    target_results = []
    one = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(15))
    myAnswers = myResolver.query(str(one) + '.' + str(domain))
    name = myAnswers.canonical_name
    random_addr = socket.gethostbyname(str(name))
    for host in sub_list:
        try:
            host_host, host_addr = host.split(' ')
            if random_addr == host_addr:
                pass
            else:
                target_results.append(host)
        except dns.resolver.NoNameservers:
            pass
        except dns.resolver.NoAnswer:
            pass
        except dns.resolver.NXDOMAIN:
            pass
        except dns.name.EmptyLabel:
            pass
        except Exception:
            continue
    info('Completed Bruting Wild Card SubDomains\n')
    return target_results
Ejemplo n.º 6
0
def updateCheck(VERSION):
	command_check = (["pip list -o"])
	process_check = subprocess.Popen(command_check, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
	output_check = process_check.communicate()[0]
	line = output_check.splitlines()
	for i in line:
		if 'bluto' in str(i).lower():
			new_version = re.match('Bluto\s\(.*\)\s\-\sLatest\:\s(.*?)\s\[sdist\]', i).group(1)
			found = True
		else:
			found = False

	if found:
		info('Update Availble')
		print colored('\nUpdate Available!', 'red'), colored('{}'.format(new_version), 'green')
		print colored('Would you like to attempt to update?\n', 'green')
		while True:
			answer = raw_input('Y|N: ').lower()
			if answer in ('y', 'yes'):
				update()
				print '\n'
				break
			elif answer in ('n', 'no'):
				print '\n'
				break
			else:
				print '\nThe Options Are yes|no Or Y|N, Not {}'.format(answer)
	else:
		print colored('You are running the latest version:','green'), colored('{}\n'.format(VERSION),'blue')
Ejemplo n.º 7
0
def action_bing_true(domain, q, user_agents, prox):
    info('Bing Search Started')
    emails = []
    uas = user_agents
    searchfor = '@' + '"' + domain + '"'
    for start in range(0,30):
        ua = random.choice(uas)
        if prox == True:
            proxy = {'http' : 'http://127.0.0.1:8080'}
        else:
            pass
        try:
            headers = {"Connection" : "close",
                       "User-Agent" : ua,
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                       'Accept-Language': 'en-US,en;q=0.5',
                       'Accept-Encoding': 'gzip, deflate'}
            payload = { 'q': searchfor, 'first': start}
            link = 'https://www.bing.com/search'
            if prox == True:
                response = requests.get(link, headers=headers, params=payload, proxies=proxy, verify=False)
            else:
                response = requests.get(link, headers=headers, params=payload, verify=False)
            reg_emails = re.compile('[a-zA-Z0-9.-]*' + '@' + '<strong>')
            temp = reg_emails.findall(response.text)
            time.sleep(1)
            for item in temp:
                clean = item.replace("<strong>", "")
                email.append(clean + domain)

        except Exception:
            continue
    info('Bing Search Completed')
    q.put(sorted(emails))
Ejemplo n.º 8
0
def updateCheck(VERSION):
    command_check = (["pip list -o"])
    process_check = subprocess.Popen(command_check,
                                     shell=True,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.STDOUT)
    output_check = process_check.communicate()[0]
    line = output_check.splitlines()
    for i in line:
        if 'bluto' in str(i).lower():
            new_version = re.match(
                'Bluto\s\(.*\)\s\-\sLatest\:\s(.*?)\s\[sdist\]', i).group(1)
            found = True
        else:
            found = False

    if found:
        info('Update Availble')
        print colored('\nUpdate Available!',
                      'red'), colored('{}'.format(new_version), 'green')
        print colored('Would you like to attempt to update?\n', 'green')
        while True:
            answer = raw_input('Y|N: ').lower()
            if answer in ('y', 'yes'):
                update()
                print '\n'
                break
            elif answer in ('n', 'no'):
                print '\n'
                break
            else:
                print '\nThe Options Are yes|no Or Y|N, Not {}'.format(answer)
    else:
        print colored('You are running the latest version:',
                      'green'), colored('{}\n'.format(VERSION), 'blue')
Ejemplo n.º 9
0
def action_emailHunter(domain, api, user_agents, q, prox):
    info('Hunter Search Started')
    emails = []
    uas = user_agents
    ua = random.choice(uas)
    link = 'https://api.emailhunter.co/v1/search?domain={0}&api_key={1}'.format(
        domain, api)

    if prox == True:
        proxy = {'http': 'http://127.0.0.1:8080'}
    else:
        pass
    try:
        headers = {
            "User-Agent": ua,
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate'
        }
        if prox == True:
            response = requests.get(link,
                                    headers=headers,
                                    proxies=proxy,
                                    verify=False)
        else:
            response = requests.get(link, headers=headers, verify=False)
        if response.status_code == 200:
            json_data = response.json()
            for value in json_data['emails']:
                for domain in value['sources']:
                    url = str(domain['uri']).replace("u'", "")
                    email = str(value['value']).replace("u'", "")
                    emails.append((email, url))
        elif response.status_code == 401:
            json_data = response.json()
            if json_data['message'] == 'Too many calls for this period.':
                print colored(
                    "\tError:\tIt seems the Hunter API key being used has reached\n\t\tit's limit for this month.",
                    'red')
                print colored('\tAPI Key: {}\n'.format(api), 'red')
                q.put(None)
                return None
            if json_data['message'] == 'Invalid or missing api key.':
                print colored(
                    "\tError:\tIt seems the Hunter API key being used is no longer valid,\nit was probably deleted.",
                    'red')
                print colored('\tAPI Key: {}\n'.format(api), 'red')
                print colored(
                    '\tWhy don\'t you grab yourself a new one (they are free)',
                    'green')
                print colored('\thttps://hunter.io/api_keys', 'green')
                q.put(None)
                return None
        else:
            info('No Response From Hunter')
            q.put(None)
    except UnboundLocalError, e:
        print e
Ejemplo n.º 10
0
def get_line_count(filename):
    info('Gathering SubDomains Count')
    lines = 0
    for line in open(filename):
        lines += 1

    info('Completed Gathering SubDomains Count')
    return lines
Ejemplo n.º 11
0
def action_brute_start(subs):
    info('Bruting SubDomains\n')
    pool = ThreadPool(12)
    pool.map(action_brute, subs)
    pool.close()
    info('Completed Bruting SubDomains\n')

    return targets
Ejemplo n.º 12
0
def action_google(domain, userCountry, userServer, q, useragent_f, prox):
    info('Google Search Started')
    uas = get_user_agents(useragent_f)
    searchfor = '@' + '"' + domain + '"'
    entries_tuples = []
    seen = set()
    results = []
    for start in range(1,20,1):
        ua = random.choice(uas)
        try:
            if prox == True:
                proxy = {'http' : 'http://127.0.0.1:8080'}
            else:
                pass
            headers = {"Connection" : "close",
                       "User-Agent" : ua,
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                       'Accept-Language': 'en-US,en;q=0.5',
                       'Accept-Encoding': 'gzip, deflate',
                       'Referer': 'https://www.google.com'}
            payload = { 'nord':'1', 'q': searchfor, 'start': start*10}

            link = '{0}/search?num=200' .format(userServer)
            if prox == True:
                response = requests.get(link, headers=headers, params=payload, proxies=proxy)
            else:
                response = requests.get(link, headers=headers, params=payload)

            response.raise_for_status()
            response.text.encode('ascii', 'ignore').decode('ascii')
            soup = BeautifulSoup(response.text, "lxml")

            for div in soup.select("div.g"):

                for div in soup.select("div.g"):

                    email_temp = div.find("span", class_="st")
                    clean = re.sub('<em>', '', email_temp.text)
                    clean = re.sub('</em>', '', email_temp.text)
                    match = re.findall('[a-zA-Z0-9.]*' + '@' + domain, clean)
                    try:
                        if match:
                            if match is not '@' + domain:
                                if match is not '@':
                                    url = div.find('cite').text
                                    email = str(match).replace("u'",'').replace('[','').replace(']','').replace("'",'')
                                    entries_tuples.append((email.lower(),str(url).replace("u'",'').replace("'","")))
                    except Exception, e:
                        pass
            time.sleep(3)
            for urls in entries_tuples:
                if urls[1] not in seen:
                    results.append(urls)
                    seen.add(urls[1])
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 503:
                print colored('Google is responding with a Captcha, other searches will continue\n', 'red')
                break
Ejemplo n.º 13
0
def action_bluto_use(countryID):
    now = datetime.datetime.now()
    try:
        link = "http://darryllane.co.uk/bluto/log_use.php"
        payload = {'country': countryID, 'Date': now}
        requests.post(link, data=payload)
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
        pass
Ejemplo n.º 14
0
def doc_bing(domain, user_agents, prox, q):
    document_list = []
    uas = user_agents
    info('Bing Document Search Started')
    for start in range(1, 300, 10):
        ua = random.choice(uas)
        if prox == True:
            proxy = {'http': 'http://127.0.0.1:8080'}
        else:
            pass
        try:
            headers = {
                "Connection": "close",
                "User-Agent": ua,
                'Accept':
                'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                'Accept-Language': 'en-US,en;q=0.5',
                'Accept-Encoding': 'gzip, deflate'
            }
            payload = {
                'q':
                'filetype:(doc dot docx docm dotx dotm docb xls xlt xlm xlsx xlsm xltx xltm xlsb xla xlam xll xlw ppt pot pps pptx pptm potx potm ppam ppsx ppsm sldx sldm pub pdf) site:{}'
                .format(domain),
                'first':
                start
            }
            link = 'http://www.bing.com/search'
            if prox == True:
                response = requests.get(link,
                                        headers=headers,
                                        proxies=proxy,
                                        params=payload,
                                        verify=False)
            else:
                response = requests.get(link,
                                        headers=headers,
                                        params=payload,
                                        verify=False)

            soup = BeautifulSoup(response.text, "lxml")

            divs = soup.findAll('li', {'class': 'b_algo'})
            for div in divs:
                h2 = div.find('h2')
                document = h2.find('a', href=True)['href']
                document = urllib2.unquote(document)
                document_list.append(document)
        except TypeError:
            pass
        except requests.models.ChunkedEncodingError:
            continue
        except Exception:
            traceback.print_exc()
            continue
    potential_docs = len(document_list)
    info('Bing Document Search Finished')
    q.put(document_list)
Ejemplo n.º 15
0
def action_bluto_use(countryID):
    now = datetime.datetime.now()
    try:
        link = "http://darryllane.co.uk/bluto/log_use.php"
        payload = {'country': countryID, 'Date': now}
        requests.post(link, data=payload)
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
        pass
Ejemplo n.º 16
0
def action_google(domain, userCountry, userServer, q, user_agents, prox):
    info('Google Search Started')
    uas = user_agents
    searchfor = '@' + '"' + domain + '"'
    entries_tuples = []
    seen = set()
    results = []
    for start in range(1,10,1):
        ua = random.choice(uas)
        try:
            if prox == True:
                proxy = {'http' : 'http://127.0.0.1:8080'}
            else:
                pass
            headers = {"User-Agent" : ua,
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                       'Accept-Language': 'en-US,en;q=0.5',
                       'Accept-Encoding': 'gzip, deflate',
                       'Referer': 'https://www.google.com'}
            payload = { 'nord':'1', 'q': searchfor, 'start': start*10}

            link = '{0}/search?num=200' .format(userServer)
            if prox == True:
                response = requests.get(link, headers=headers, params=payload, proxies=proxy, verify=False)
            else:
                response = requests.get(link, headers=headers, params=payload, verify=False)

            response.raise_for_status()
            response.text.encode('ascii', 'ignore').decode('ascii')
            soup = BeautifulSoup(response.text, "lxml")

            for div in soup.select("div.g"):

                for div in soup.select("div.g"):

                    email_temp = div.find("span", class_="st")
                    clean = re.sub('<em>', '', email_temp.text)
                    clean = re.sub('</em>', '', email_temp.text)
                    match = re.findall('[a-zA-Z0-9.]*' + '@' + domain, clean)
                    try:
                        if match:
                            if match is not '@' + domain:
                                if match is not '@':
                                    url = div.find('cite').text
                                    email = str(match).replace("u'",'').replace('[','').replace(']','').replace("'",'')
                                    entries_tuples.append((email.lower(),str(url).replace("u'",'').replace("'","")))
                    except Exception, e:
                        pass
            time.sleep(3)
            for urls in entries_tuples:
                if urls[1] not in seen:
                    results.append(urls)
                    seen.add(urls[1])
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 503:
                info('Google is responding with a Captcha, other searches will continue')
                break
Ejemplo n.º 17
0
def get_user_agents(useragent_f):
    info('Gathering UserAgents')
    uas = []
    with open(useragent_f, 'rb') as uaf:
        for ua in uaf.readlines():
            if ua:
                uas.append(ua.strip()[1:-1-1])
    random.shuffle(uas)
    info('Completed Gathering UserAgents')
    return uas
Ejemplo n.º 18
0
def update():
	command_check = (["pip install bluto --upgrade"])
	process_check = subprocess.Popen(command_check, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
	output_check = process_check.communicate()[0]
	lines = output_check.splitlines()
	info(lines)
	if 'Successfully installed' in lines[:-1]:
		print colored('\nUpdate Successfull!', 'green')
		sys.exit()
	else:
		print colored('\nUpdate Failed, Please Check The Logs For Details', 'red')
Ejemplo n.º 19
0
def action_brute_start(subs, myResolver):
    global myResolverG
    myResolverG = myResolver
    info('Bruting SubDomains')
    print '\nBrute Forcing Sub-Domains\n'
    pool = ThreadPool(8)
    pool.map(action_brute, subs)
    pool.close()
    info('Completed Bruting SubDomains')

    return targets
Ejemplo n.º 20
0
def action_brute_start(subs, myResolver):
    global myResolverG
    myResolverG = myResolver
    info('Bruting SubDomains')
    print '\nBrute Forcing Sub-Domains\n'
    pool = ThreadPool(8)
    pool.map(action_brute, subs)
    pool.close()
    info('Completed Bruting SubDomains')

    return targets
Ejemplo n.º 21
0
def get_subs(filename, domain):
    info('Gathering SubDomains')
    full_list = []
    try:
        subs = [line.rstrip('\n') for line in open(filename)]
        for sub in subs:
            full_list.append(str(sub.lower() + "." + domain))
    except Exception:
        error('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + ERROR_LOG_FILE, exc_info=True)
        sys.exit()

    info('Completed Gathering SubDomains\n')
    return full_list
Ejemplo n.º 22
0
def get_dns_details(domain, myResolver):
    info('Gathering DNS Details\n')
    ns_list = []
    zn_list =[]
    mx_list = []
    try:
        print "\nName Server:\n"
        myAnswers = myResolver.query(domain, "NS")
        for data in myAnswers.rrset:
            data1 = str(data)
            data2 = (data1.rstrip('.'))
            addr = socket.gethostbyname(data2)
            ns_list.append(data2 + '\t' + addr)
            zn_list.append(data2)
            list(set(ns_list))
            ns_list.sort()
        for i in ns_list:
            print colored(i, 'green')
    except dns.resolver.NoNameservers:
        error('\tNo Name Servers\nConfirm The Domain Name Is Correct.' + ERROR_LOG_FILE, exc_info=True)
        sys.exit()
    except dns.resolver.NoAnswer:
        print "\tNo DNS Servers"
    except dns.resolver.NXDOMAIN:
        error("\tDomain Does Not Exist" + ERROR_LOG_FILE, exc_info=True)
        sys.exit()
    except dns.resolver.Timeout:
        error('\tTimeouted\nConfirm The Domain Name Is Correct.' + ERROR_LOG_FILE, exc_info=True)
        sys.exit()
    except Exception:
        error('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + ERROR_LOG_FILE, exc_info=True)

    try:
        print "\nMail Server:\n"
        myAnswers = myResolver.query(domain, "MX")
        for data in myAnswers:
            data1 = str(data)
            data2 = (data1.split(' ',1)[1].rstrip('.'))
            addr = socket.gethostbyname(data2)
            mx_list.append(data2 + '\t' + addr)
            list(set(mx_list))
            mx_list.sort()
        for i in mx_list:
            print colored(i, 'green')
    except dns.resolver.NoAnswer:
        print "\tNo Mail Servers"
    except Exception:
        error('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + ERROR_LOG_FILE, exc_info=True)

    info('Completed Gathering DNS Details\n')
    return zn_list
Ejemplo n.º 23
0
def action_netcraft(domain, myResolver):
    info('NetCraft Search Started')
    netcraft_list = []
    print "\nPassive Gatherings From NetCraft\n"
    try:
        link = "http://searchdns.netcraft.com/?restriction=site+contains&host=*.{}&lookup=wait..&position=limited" .format (domain)
        response = requests.get(link, verify=False)
        soup = BeautifulSoup(response.content, 'lxml')
        pattern = 'rel="nofollow">([a-z\.\-A-Z0-9]+)<FONT COLOR="#ff0000">'
        sub_results = re.findall(pattern, response.content)
    except dns.exception.Timeout:
        pass
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + INFO_LOG_FILE, exc_info=True)

    if sub_results:
        for item in sub_results:
            try:
                netcheck = myResolver.query(item + '.' + domain)
                for data in netcheck:
                    netcraft_list.append(item + '.' + domain + ' ' + str(data))
                    print colored(item + '.' + domain, 'red')
            except dns.exception.Timeout:
                pass
            except dns.resolver.NXDOMAIN:
                pass
            except Exception:
                info('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + INFO_LOG_FILE, exc_info=True)
    else:
        print '\tNo Results Found'

    info('NetCraft Completed')
    return netcraft_list
Ejemplo n.º 24
0
def update():
    command_check = (["pip install bluto --upgrade"])
    process_check = subprocess.Popen(command_check,
                                     shell=True,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.STDOUT)
    output_check = process_check.communicate()[0]
    lines = output_check.splitlines()
    info(lines)
    if 'Successfully installed' in lines[:-1]:
        print colored('\nUpdate Successfull!', 'green')
        sys.exit()
    else:
        print colored('\nUpdate Failed, Please Check The Logs For Details',
                      'red')
Ejemplo n.º 25
0
def action_linkedin(domain, userCountry, q, company, user_agents, prox):
    info('LinkedIn Search Started')
    uas = user_agents
    entries_tuples = []
    seen = set()
    results = []
    who_error = False
    searchfor = 'site:linkedin.com/in ' + '"' + company + '"'
    ua = random.choice(uas)
    for start in range(1,50,1):
        if prox == True:
            proxy = {'http' : 'http://127.0.0.1:8080'}
        else:
            pass
        try:
            headers = {"Connection" : "close",
                       "User-Agent" : ua,
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                       'Accept-Language': 'en-US,en;q=0.5',
                       'Accept-Encoding': 'gzip, deflate'}
            payload = { 'q': searchfor, 'first': start}
            link = 'http://www.bing.com/search'
            if prox == True:
                response = requests.get(link, headers=headers, params=payload, proxies=proxy, verify=False)
            else:
                response = requests.get(link, headers=headers, params=payload, verify=False)

            response.text.encode('utf-8')
            soup = BeautifulSoup(response.text, "lxml")

            for div in soup.findAll('li', {'class': 'b_algo'}):
                title_temp = div.find('a').text
                url = div.find('cite').text.encode('utf-8')
                person = str((title_temp.split(' | ')[0]))
                description_temp = div.find('div', {'class': 'b_caption'})
                description = description_temp.find('p').text.encode('utf-8').lstrip('View ').replace("’s","").replace("professional profile on LinkedIn. ... ","").replace(" professional profile on LinkedIn. LinkedIn is the world's largest business network, ...","").replace("’S","").replace("’","").replace("professional profile on LinkedIn.","").replace(person, '').lstrip(' ').lstrip('. ').replace("LinkedIn is the world's largest business network, helping professionals like  discover ...","").replace("LinkedIn is the world's largest business network, helping professionals like  discover inside ...","").replace("professional profile on ... • ","").replace("professional ... ","").replace("...","").lstrip('•').lstrip(' ')
                entries_tuples.append((url, person.title(), description))

        except Exception:
            continue

    for urls in entries_tuples:
        if urls[1] not in seen:
            results.append(urls)
            seen.add(urls[1])

    info('LinkedIn Search Completed')
    q.put(sorted(results))
Ejemplo n.º 26
0
def action_linkedin(domain, userCountry, q, company, user_agents, prox):
    info('LinkedIn Search Started')
    uas = user_agents
    entries_tuples = []
    seen = set()
    results = []
    who_error = False
    searchfor = 'site:linkedin.com/in ' + '"' + company + '"'
    ua = random.choice(uas)
    for start in range(1,50,1):
        if prox == True:
            proxy = {'http' : 'http://127.0.0.1:8080'}
        else:
            pass
        try:
            headers = {"Connection" : "close",
                       "User-Agent" : ua,
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                       'Accept-Language': 'en-US,en;q=0.5',
                       'Accept-Encoding': 'gzip, deflate'}
            payload = { 'q': searchfor, 'first': start}
            link = 'http://www.bing.com/search'
            if prox == True:
                response = requests.get(link, headers=headers, params=payload, proxies=proxy, verify=False)
            else:
                response = requests.get(link, headers=headers, params=payload, verify=False)

            response.text.encode('utf-8')
            soup = BeautifulSoup(response.text, "lxml")

            for div in soup.findAll('li', {'class': 'b_algo'}):
                title_temp = div.find('a').text
                url = div.find('cite').text.encode('utf-8')
                person = str((title_temp.split(' | ')[0]))
                description_temp = div.find('div', {'class': 'b_caption'})
                description = description_temp.find('p').text.encode('utf-8').lstrip('View ').replace("’s","").replace("professional profile on LinkedIn. ... ","").replace(" professional profile on LinkedIn. LinkedIn is the world's largest business network, ...","").replace("’S","").replace("’","").replace("professional profile on LinkedIn.","").replace(person, '').lstrip(' ').lstrip('. ').replace("LinkedIn is the world's largest business network, helping professionals like  discover ...","").replace("LinkedIn is the world's largest business network, helping professionals like  discover inside ...","").replace("professional profile on ... • ","").replace("professional ... ","").replace("...","").lstrip('•').lstrip(' ')
                entries_tuples.append((url, person.title(), description))

        except Exception:
            continue

    for urls in entries_tuples:
        if urls[1] not in seen:
            results.append(urls)
            seen.add(urls[1])

    info('LinkedIn Search Completed')
    q.put(sorted(results))
Ejemplo n.º 27
0
def check_dom(domain, myResolver):
    try:
        myAnswers = myResolver.query(domain, "NS")
        dom = str(myAnswers.canonical_name).strip('.')
        if dom:
            pass
    except dns.resolver.NoNameservers:
        print '\nError: \nDomain Not Valid, Check You Have Entered It Correctly\n'
        sys.exit()
    except dns.resolver.NXDOMAIN:
        print '\nError: \nDomain Not Valid, Check You Have Entered It Correctly\n'
        sys.exit()
    except dns.exception.Timeout:
        print '\nThe connection hit a timeout. Are you connected to the internet?\n'
        sys.exit()
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
Ejemplo n.º 28
0
def check_dom(domain, myResolver):
    try:
        myAnswers = myResolver.query(domain, "NS")
        dom = str(myAnswers.canonical_name).strip('.')
        if dom:
            pass
    except dns.resolver.NoNameservers:
        print '\nError: \nDomain Not Valid, Check You Have Entered It Correctly\n'
        sys.exit()
    except dns.resolver.NXDOMAIN:
        print '\nError: \nDomain Not Valid, Check You Have Entered It Correctly\n'
        sys.exit()
    except dns.exception.Timeout:
        print '\nThe connection hit a timeout. Are you connected to the internet?\n'
        sys.exit()
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
Ejemplo n.º 29
0
def action_emailHunter(domain, api, user_agents, q, prox):
    info('Hunter Search Started')
    emails = []
    uas = user_agents
    ua = random.choice(uas)
    link = 'https://api.emailhunter.co/v1/search?domain={0}&api_key={1}'.format(domain,api)

    if prox == True:
                proxy = {'http' : 'http://127.0.0.1:8080'}
    else:
        pass
    try:
        headers = {"User-Agent" : ua,
                   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                   'Accept-Language': 'en-US,en;q=0.5',
                   'Accept-Encoding': 'gzip, deflate'}
        if prox == True:
            response = requests.get(link, headers=headers, proxies=proxy, verify=False)
        else:
            response = requests.get(link, headers=headers, verify=False)
        if response.status_code == 200:
            json_data = response.json()
            for value in json_data['emails']:
                for domain in value['sources']:
                    url = str(domain['uri']).replace("u'","")
                    email =  str(value['value']).replace("u'","")
                    emails.append((email,url))
        elif response.status_code == 401:
            json_data = response.json()
            if json_data['message'] =='Too many calls for this period.':
                print colored("\tError:\tIt seems the Hunter API key being used has reached\n\t\tit's limit for this month.", 'red')
                print colored('\tAPI Key: {}\n'.format(api),'red')
                q.put(None)
                return None
            if json_data['message'] == 'Invalid or missing api key.':
                print colored("\tError:\tIt seems the Hunter API key being used is no longer valid,\nit was probably deleted.", 'red')
                print colored('\tAPI Key: {}\n'.format(api),'red')
                print colored('\tWhy don\'t you grab yourself a new one (they are free)','green')
                print colored('\thttps://hunter.io/api_keys','green')
                q.put(None)
                return None
        else:
            info('No Response From Hunter')
            q.put(None)
    except UnboundLocalError,e:
        print e
Ejemplo n.º 30
0
def doc_bing(domain, user_agents, prox, q):
    document_list = []
    uas = user_agents
    info('Bing Document Search Started')
    for start in range(1,300,10):
        ua = random.choice(uas)
        if prox == True:
            proxy = {'http' : 'http://127.0.0.1:8080'}
        else:
            pass
        try:
            headers = {"Connection" : "close",
                       "User-Agent" : ua,
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                       'Accept-Language': 'en-US,en;q=0.5',
                       'Accept-Encoding': 'gzip, deflate'}
            payload = { 'q': 'filetype:(doc dot docx docm dotx dotm docb xls xlt xlm xlsx xlsm xltx xltm xlsb xla xlam xll xlw ppt pot pps pptx pptm potx potm ppam ppsx ppsm sldx sldm pub pdf) site:{}'.format(domain), 'first': start}
            link = 'http://www.bing.com/search'
            if prox == True:
                response = requests.get(link, headers=headers, proxies=proxy, params=payload, verify=False)
            else:
                response = requests.get(link, headers=headers, params=payload, verify=False)

            soup = BeautifulSoup(response.text, "lxml")

            divs = soup.findAll('li', {'class': 'b_algo'})
            for div in divs:
                h2 = div.find('h2')
                document = h2.find('a', href=True)['href']
                document = urllib2.unquote(document)
                document_list.append(document)
        except TypeError:
            pass
        except requests.models.ChunkedEncodingError:
            continue
        except Exception:
            traceback.print_exc()
            continue
    potential_docs = len(document_list)
    info('Bing Document Search Finished')
    q.put(document_list)
Ejemplo n.º 31
0
def action_brute(subdomain):
    global myResolverG
    try:
        myAnswers = myResolverG.query(subdomain)
        for data in myAnswers:
            targets.append(subdomain + ' ' + str(data))

    except dns.resolver.NoNameservers:
        pass
    except dns.resolver.NXDOMAIN:
        pass
    except dns.resolver.NoAnswer:
        pass
    except dns.exception.SyntaxError:
        pass
    except dns.exception.Timeout:
        info('Timeout: {}'.format(subdomain))
        pass
    except dns.resolver.Timeout:
        pass
    except Exception:
        info(
            'An Unhandled Exception Has Occured, Please Check The Log For Details'
            + INFO_LOG_FILE)
        info(traceback.print_exc())
Ejemplo n.º 32
0
def ms_doc(ms_file_list):
    software_list = []
    user_names = []
    info('Extracting MSDOCS MetaData')
    for filename in ms_file_list:
        try:
            data = olefile.OleFileIO(filename)
            meta = data.get_metadata()
            author = re.sub('[^0-9a-zA-Z]+', ' ', meta.author)
            company = re.sub('[^0-9a-zA-Z]+', ' ', meta.company)
            software = re.sub('[^0-9a-zA-Z]+', ' ', meta.creating_application)
            save_by = re.sub('[^0-9a-zA-Z]+', ' ', meta.last_saved_by)
            if author:
                oddity = re.match('(\s\w\s+(\w\s+)+\w)', author)
                if oddity:
                    oddity = str(oddity.group(1)).replace(' ', '')
                    user_names.append(str(oddity).title())
                else:
                    user_names.append(str(author).title())
            if software:
                oddity2 = re.match('(\s\w\s+(\w\s+)+\w)', software)
                if oddity2:
                    oddity2 = str(oddity2.group(1)).replace(' ', '')
                    software_list.append(oddity2)
                else:
                    software_list.append(software)

            if save_by:
                oddity3 = re.match('(\s\w\s+(\w\s+)+\w)', save_by)
                if oddity3:
                    oddity3 = str(oddity3.group(1)).replace(' ', '')
                    user_names.append(str(oddity3).title())
                else:
                    user_names.append(str(save_by).title())

        except Exception:
            pass
    info('Finished Extracting MSDOC MetaData')
    return (user_names, software_list)
Ejemplo n.º 33
0
def doc_exalead(domain, user_agents, prox, q):
    document_list = []
    uas = user_agents
    info('Exalead Document Search Started')
    for start in range(0, 80, 10):
        ua = random.choice(uas)
        link = 'http://www.exalead.com/search/web/results/?search_language=&q=(filetype:xls+OR+filetype:doc+OR++filetype:pdf+OR+filetype:ppt)+site:{}&search_language=&elements_per_page=10&start_index={}'.format(
            domain, start)
        if prox == True:
            proxy = {'http': 'http://127.0.0.1:8080'}
        else:
            pass
        try:
            headers = {
                "Connection": "close",
                "User-Agent": ua,
                'Accept':
                'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                'Accept-Language': 'en-US,en;q=0.5',
                'Accept-Encoding': 'gzip, deflate'
            }
            if prox == True:
                response = requests.get(link, headers=headers, proxies=proxy)
            else:
                response = requests.get(link, headers=headers)
            soup = BeautifulSoup(response.text, "lxml")
            if soup.find('label', {
                    'class': 'control-label',
                    'for': 'id_captcha'
            }):
                print colored("\tSo you don't like spinach?", "blue")
                print "\n\tCaptchas are preventing some document searches."
                break
            for div in soup.findAll('li', {'class': 'media'}):
                document = div.find('a', href=True)['href']
                document = urllib2.unquote(document)
                document_list.append(document)

        except Exception:
            info(
                'An Unhandled Exception Has Occured, Please Check The Log For Details'
                + INFO_LOG_FILE)
            continue

        time.sleep(10)
    potential_docs = len(document_list)
    info('Exalead Document Search Finished')
    info('Potential Exalead Documents Found: {}'.format(potential_docs))
    q.put(document_list)
Ejemplo n.º 34
0
def action_pwned(emails):
    info('Compromised Account Enumeration Search Started')
    pwend_data = []
    seen = set()
    for email in emails:
        link = 'https://haveibeenpwned.com/api/v2/breachedaccount/{}'.format(email)
        try:
            headers = {"Connection" : "close",
                       "User-Agent" : "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                       'Accept-Language': 'en-US,en;q=0.5',
                       'Accept-Encoding': 'gzip, deflate'}

            response = requests.get(link, headers=headers)
            json_data = response.json()
            if json_data:
                if email in seen:
                    pass
                else:
                    for item in json_data:
                        seen.add(email)
                        email_address = email
                        breach_domain = str(item['Domain']).replace("u'","")
                        breach_data = str(item['DataClasses']).replace("u'","'").replace('"','').replace('[','').replace(']','')
                        breach_date = str(item['BreachDate']).replace("u'","")
                        breach_added = str(item['AddedDate']).replace("u'","").replace('T',' ').replace('Z','')
                        breach_description = str(item['Description']).replace("u'","")
                        pwend_data.append((email_address, breach_domain, breach_data, breach_date, breach_added, breach_description))

        except ValueError:
            pass
        except Exception:
            error('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + ERROR_LOG_FILE, exc_info=True)

    info('Compromised Account Enumeration Search Completed')
    return pwend_data
Ejemplo n.º 35
0
def pdf_read(pdf_file_list):
    info('Extracting PDF MetaData')
    software_list = []
    user_names = []
    for filename in pdf_file_list:
        info(filename)
        try:

            fp = open(filename, 'rb')
            parser = PDFParser(fp)
            doc = PDFDocument(parser)
            software = re.sub('[^0-9a-zA-Z]+', ' ', doc.info[0]['Creator'])
            person = re.sub('[^0-9a-zA-Z]+', ' ', doc.info[0]['Author'])
            if person:
                oddity = re.match('(\s\w\s+(\w\s+)+\w)', person)
                if oddity:
                    oddity = str(oddity.group(1)).replace(' ', '')
                    user_names.append(str(oddity).title())
                else:
                    user_names.append(str(person).title())
            if software:
                oddity2 = re.match('(\s\w\s+(\w\s+)+\w)', software)
                if oddity2:
                    oddity2 = str(oddity2.group(1)).replace(' ', '')
                    software_list.append(oddity2)
                else:
                    software_list.append(software)
        except IndexError:
            continue
        except pdfminer.pdfparser.PDFSyntaxError:
            continue
        except KeyError:
            continue
        except TypeError:
            continue
        except Exception:
            info(
                'An Unhandled Exception Has Occured, Please Check The Log For Details'
                + INFO_LOG_FILE)
            continue
    info('Finished Extracting PDF MetaData')
    return (user_names, software_list)
Ejemplo n.º 36
0
def get_subs(filename, domain):
    info('Gathering SubDomains')
    full_list = []
    try:
        subs = [line.rstrip('\n') for line in open(filename)]
        for sub in subs:
            full_list.append(str(sub.lower() + "." + domain))
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
        sys.exit()

    info('Completed Gathering SubDomains')
    return full_list
Ejemplo n.º 37
0
def action_pwned(emails):
    info('Compromised Account Enumeration Search Started')
    pwend_data = []
    seen = set()
    for email in emails:
        link = 'https://haveibeenpwned.com/api/v2/breachedaccount/{}'.format(
            email)
        try:
            headers = {
                "Connection": "close",
                "User-Agent":
                "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
                'Accept':
                'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                'Accept-Language': 'en-US,en;q=0.5',
                'Accept-Encoding': 'gzip, deflate'
            }

            response = requests.get(link, headers=headers, verify=False)
            json_data = response.json()
            if json_data:
                if email in seen:
                    pass
                else:
                    for item in json_data:
                        seen.add(email)
                        email_address = email
                        breach_domain = str(item['Domain']).replace("u'", "")
                        breach_data = str(item['DataClasses']).replace(
                            "u'",
                            "'").replace('"', '').replace('[',
                                                          '').replace(']', '')
                        breach_date = str(item['BreachDate']).replace("u'", "")
                        breach_added = str(item['AddedDate']).replace(
                            "u'", "").replace('T', ' ').replace('Z', '')
                        breach_description = str(item['Description']).replace(
                            "u'", "")
                        pwend_data.append(
                            (email_address, breach_domain, breach_data,
                             breach_date, breach_added, breach_description))

        except ValueError:
            pass
        except Exception:
            info(
                'An Unhandled Exception Has Occured, Please Check The Log For Details\n'
                + INFO_LOG_FILE,
                exc_info=True)

    info('Compromised Account Enumeration Search Completed')
    return pwend_data
Ejemplo n.º 38
0
def get_sub_interest(filename, domain):
    info('Gathering SubDomains Of Interest')
    full_list = []
    try:
        subs = [line.rstrip('\n') for line in open(filename)]
        for sub in subs:
            full_list.append(str(sub.lower() + "." + domain))

    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
        sys.exit()

    info('Completed Gathering SubDomains Of Interest')
    return full_list
Ejemplo n.º 39
0
def action_emailHunter(domain, api, user_agents, q, prox):
    info('Email Hunter Search Started')
    emails = []
    uas = user_agents
    ua = random.choice(uas)
    link = 'https://api.emailhunter.co/v1/search?domain={0}&api_key={1}'.format(
        domain, api)

    if prox == True:
        proxy = {'http': 'http://127.0.0.1:8080'}
    else:
        pass
    try:
        headers = {
            "Connection": "close",
            "User-Agent": ua,
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate'
        }
        if prox == True:
            response = requests.get(link, headers=headers, proxies=proxy)
        else:
            response = requests.get(link, headers=headers)

        json_data = response.json()
        if json_data['message'] == 'Too many calls for this period.':
            print colored(
                "\tError:\tIt seems the EmailHunter API key being used has reached\n\t\tit's limit for this month.",
                'red')
            print colored('\tAPI Key: {}\n'.format(api), 'red')
            q.put(None)
            return None
        for value in json_data['emails']:
            for domain in value['sources']:
                url = str(domain['uri']).replace("u'", "")
                email = str(value['value']).replace("u'", "")
                emails.append((email, url))
    except ValueError:
        pass
    except Exception:
        info(
            'An Unhandled Exception Has Occured, Please Check The Log For Details\n'
            + INFO_LOG_FILE,
            exc_info=True)

    info('Email Hunter Search Completed')
    q.put(sorted(emails))
Ejemplo n.º 40
0
def action_wild_cards(domain, myResolver):
    info('Checking Wild Cards\n')
    try:
        one = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(15))
        myAnswers = myResolver.query(str(one) + '.' + str(domain))

    except dns.resolver.NoNameservers:
        pass

    except dns.resolver.NoAnswer:
        pass

    except dns.resolver.NXDOMAIN:
        info('Wild Cards False\n')
        return False
    else:
        info('Wild Cards True\n')
        return True
Ejemplo n.º 41
0
def action_wild_cards(domain, myResolver):
    info('Checking Wild Cards')
    try:
        one = ''.join(
            random.choice(string.ascii_uppercase + string.digits)
            for _ in range(15))
        myAnswers = myResolver.query(str(one) + '.' + str(domain))

    except dns.resolver.NoNameservers:
        pass

    except dns.resolver.NoAnswer:
        pass

    except dns.resolver.NXDOMAIN:
        info('Wild Cards False')
        return False
    else:
        info('Wild Cards True')
        return True
Ejemplo n.º 42
0
def action_pwned(emails):
    info('Compromised Account Enumeration Search Started')
    pwend_data = []
    seen = set()
    for email in emails:
        time.sleep(3)
        link = 'https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false'.format(email)
        try:
            headers = {"User-Agent" : "BlutoDNS v2.4.16",
                       'hibp-api-key': 'db192f959742455f98106687df692c68'}

            response = requests.get(link, headers=headers, verify=False)
            json_data = response.json()
            if json_data:
                if email in seen:
                    pass
                else:
                    for item in json_data:
                        breach_data = ' '
                        seen.add(email)
                        email_address = email
                        breach_domain = item['Domain'].encode('utf-8')
                        data = item['DataClasses']
                        for value in data:
                            breach_data = breach_data + value.encode('utf-8') + ', '
                        breach_data = breach_data.strip().strip(',')[:-1]    
                        breach_date = item['BreachDate'].encode('utf-8')
                        breach_added = item['AddedDate'].encode('utf-8')
                        breach_description = item['Description'].encode('utf-8')	
                        pwend_data.append((email_address, breach_domain, breach_data, breach_date, breach_added, breach_description))

        except ValueError as e:
            pass
        except Exception:
            info('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + INFO_LOG_FILE, exc_info=True)

    info('Compromised Account Enumeration Search Completed')
    return pwend_data
Ejemplo n.º 43
0
def action_brute(subdomain):
    global myResolverG
    try:
        myAnswers = myResolverG.query(subdomain)
        for data in myAnswers:
            targets.append(subdomain + ' ' + str(data))

    except dns.resolver.NoNameservers:
        pass
    except dns.resolver.NXDOMAIN:
        pass
    except dns.resolver.NoAnswer:
        pass
    except dns.exception.SyntaxError:
        pass
    except dns.exception.Timeout:
        info('Timeout: {}'.format(subdomain))
        pass
    except dns.resolver.Timeout:
        pass
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
        info(traceback.print_exc())
Ejemplo n.º 44
0
def action_download(doc_list, docs):
    info('Document Download Started')
    i = 0
    download_list = []
    initial_count = 0
    print 'Gathering Live Documents For Metadata Mining\n'
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows; U; Windows NT 6.0; pl; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2 GTB7.1 ( .NET CLR 3.5.30729',
        'Referer': 'https://www.google.co.uk/',
        'Accept-Language': 'en-US,en;q=0.5',
        'Cache-Control': 'no-cache'
    }
    for doc in doc_list:
        doc = doc.replace(' ', '%20')
        try:
            r = requests.get(doc.encode('utf-8'),
                             headers=headers,
                             verify=False)
            if r.status_code == 404:
                r.raise_for_status()

            if r.status_code == 200:
                params = cgi.parse_header(
                    r.headers.get('Content-Disposition', ''))[-1]
                if 'filename' not in params:
                    filename = str(doc).replace('%20', ' ').split('/')[-1]
                    with open(docs + filename, "w") as code:
                        i += 1
                        code.write(r.content)
                        code.close()
                        initial_count += 1
                        print('\tDownload Count: {}\r'.format(
                            str(initial_count))),
                        info(str(doc).replace('%20', ' '))
                        download_list.append(str(doc).replace('%20', ' '))

                    continue
                else:
                    filename_t = re.search('filename="(.*)"',
                                           r.headers['content-disposition'])
                    filename = filename_t.group(1)

                    with open(docs + filename, "w") as code:
                        i += 1
                        code.write(r.content)
                        code.close()
                        initial_count += 1
                        print('\tDownload Count: {}\r'.format(
                            str(initial_count))),
                        download_list.append(str(doc).replace('%20', ' '))
                        info(str(doc).replace('%20', ' '))
                    continue

        except ValueError:
            info('No Filename in header')
            pass
        except AttributeError:
            pass
        except IOError:
            info('Not Found: {}'.format(str(doc).replace('%20', ' ')))
            pass
        except requests.exceptions.HTTPError:
            info('Error: File Not Found Server Side: HTTPError')
            pass
        except requests.exceptions.ConnectionError:
            info('Error: File Not Found Server Side: ConnectionError')
            pass
        except KeyError:
            pass
        except UnboundLocalError:
            pass
        except Exception:
            info(
                'An Unhandled Exception Has Occured, Please Check The Log For Details\n'
                + INFO_LOG_FILE)
            info(str(doc).replace('%20', ' '))
            pass
    if i < 1:
        return download_list
    data_size = get_size(docs)
    print '\tData Downloaded: {}MB'.format(str(math.floor(data_size)))
    info('Documents Downloaded: {}'.format(initial_count))
    return download_list
Ejemplo n.º 45
0
def doc_start(domain, USERAGENT_F, prox, q):
    ms_list_ext = ('.docx', '.pptx', '.xlsx', '.doc', '.xls', '.ppt')
    ms_file_list = []
    pdf_file_list = []
    info('Let The Hunt Begin')
    domain_r = domain.split('.')
    if not os.path.exists(
            os.path.expanduser('~/Bluto/doc/{}'.format(domain_r[0]))):
        os.makedirs(os.path.expanduser('~/Bluto/doc/{}'.format(domain_r[0])))

    location = os.path.expanduser('~/Bluto/doc/{}/'.format(domain_r[0]))
    info('Data Folder Created ' + location)
    docs = os.path.expanduser(location)
    doc_list = doc_search(domain, USERAGENT_F, prox)

    if doc_list == []:
        q.put(None)
        return
    doc_list = set(sorted(doc_list))
    download_list = action_download(doc_list, docs)
    download_count = len(download_list)

    for root, dirs, files in os.walk(docs):
        for filename in files:
            if str(filename).endswith(ms_list_ext):
                ms_file_list.append(os.path.join(root, filename))
            if str(filename).endswith('.pdf'):
                pdf_file_list.append(os.path.join(root, filename))

    if ms_file_list and pdf_file_list:
        user_names_ms, software_list_ms = ms_doc(ms_file_list)
        user_names_pdf, software_list_pdf = pdf_read(pdf_file_list)
        user_names_t = user_names_ms + user_names_pdf
        software_list_t = software_list_ms + software_list_pdf

    elif ms_file_list:
        user_names_ms, software_list_ms = ms_doc(ms_file_list)
        user_names_t = user_names_ms
        software_list_t = software_list_ms

    elif pdf_file_list:
        user_names_pdf, software_list_pdf = pdf_read(pdf_file_list)
        user_names_t = user_names_pdf
        software_list_t = software_list_pdf
    else:
        user_names_t = []
        software_list_t = []

    if user_names_t and software_list_t:
        user_names = sorted(set(user_names_t))
        software_list = sorted(set(software_list_t))
        info('The Hunt Ended')
        q.put((user_names, software_list, download_count, download_list))

    elif software_list_t:
        software_list = sorted(set(software_list_t))
        user_names = []
        info('The Hunt Ended')
        q.put((user_names, software_list, download_count, download_list))

    elif user_names_t:
        user_names = sorted(set(user_names_t))
        software_list = []
        info('The Hunt Ended')
        q.put((user_names, software_list, download_count, download_list))
    elif (user_names_t and software_list) is None:
        q.put(None)
Ejemplo n.º 46
0
def write_html(email_evidence_results, linkedin_evidence_results, pwned_results, report_location, company, data_mine):
    info('Started HTML Report')
    if data_mine is not None:
        user_names = data_mine[0]
        software_list = data_mine[1]
        download_count = data_mine[2]
        download_list = data_mine[3]
        username_count = len(user_names)
        software_count = len(software_list)
    header = '''
    <!DOCTYPE html>
    <html>
    <head>
    <style>
    table {{
        style="width:75%"
        border-collapse: separate;
        border-spacing: 5px;
        *border-collapse: expression('separate', cellSpacing = '10px');
    }}
    th {{
        text-align: left;
    }}
    header {{
        background-color:black;
        color:white;
        text-align:center;
        padding:5px;
    }}
    section {{
        width:75%;
        float:left;
        padding:10px;
    }}
    footer {{
        background-color:black;
        color:white;
        clear:both;
        text-align:center;
        padding:5px;
    }}
    div {{
        width: 100%;
    }}
    </style>
    </head>
    <body>

    <header>
    <h1>Bluto Evidence Report</h1>
    <h2>{a}</h2>
    </header>
    '''.format(a=company)
    footer = '''

        <footer>
            <p>Bluto</p>
            <p>Author: Darryl Lane</p>
            <p>Twitter: @darryllane101</p>
        </footer>
    </body>
    </html>
    '''

    emailDescription ='''

        <H2>Email Evidence:</H2>
        <th>
            <div>
                <p>
                 Email evidence includes the email address and the location it was found, this allows for potential remediation.
                 If corporate emails are to be utilised in the public domain, it is recommended that they are generic in nature and are not able to
                 authenticate to any public corporate services such as VPN, or similare remote control services.

                 This data can also be used in further attack vectors such as potential targets for Social Engineering and Phishing attacks.
                </p>
            </div>
        </th>
    '''
    metaDescription ='''

            <H2>MetaData Evidence:</H2>
            <th>
                <div>
                    <p>
                     Various techniques were used to gather potentially useful information on the scoped domain. The consultant
                     identified multiple documents available for download from the scoped domains website/s. These documents could hold potentially
                     sensitive data such as usernames, email addresses, folder structures, printers, operating system version information and
                     software version information. This information can prove to be very useful to an attacker when targeting various vectors
                     such as Social Engineering, password attacks and to expose further attack vectors.

                     It is recommended that all document metadata is sanitised before being published into the public domain.
                    </p>
                </div>
            </th>
        '''

    linkedinDescription ='''

            <H2>LinkedIn Evidence:</H2>
            <th>
                <div>
                    <p>
                     Staff names, job roles and associations can be gathered from social media sites such as LinkedIn. This information can be used
                     to attempt futher information gathering via vectors such as Social Engineering techniques, phone attacks, and phishing attacks. This data can also be used to try determine more
                     information such as potential email addresses.
                    </p>
                </div>
            </th>
        '''

    compromisedDescription ='''

                <H2>Compromised Account Evidence:</H2>
                <th>
                    <div>
                        <p>
                         This data was made publicly available due to a breach, this means that these account passwords and any portals that are utilised by these accounts
                         could be compromised. It is recommedned that all account passwords are modified and made to adhere to company policy.
                        </p>
                    </div>
                </th>
            '''

    try:
        with open(report_location, 'w') as myFile:
            myFile.write(header)
            myFile.write('<section>')
            if email_evidence_results:
                myFile.write(emailDescription)
                myFile.write('<table>')
                myFile.write('<tr>')
                myFile.write('<th>Email Address</th>')
                myFile.write('<th>URL Address</th>')
                myFile.write('</tr>')
                for email, url in email_evidence_results:
                    myFile.write('<tr>')
                    myFile.write('<td>{}</td>'.format(email))
                    myFile.write('<td>{}</td>'.format(url))
                    myFile.write('</tr>')
                myFile.write('</table>')
            if linkedin_evidence_results:
                myFile.write(linkedinDescription)
            if linkedin_evidence_results:
                for url, person, clean in linkedin_evidence_results:
                    myFile.write('<table style="width:60%">')
                    myFile.write('<p><tr><td><b>Person:</b> {}</td>'.format(person))
                    myFile.write('</tr>')
                    myFile.write('<tr><td><b>Role</b>: {}</td>'.format(clean))
                    myFile.write('</tr>')
                    myFile.write('<tr><td><b>Url</b>: {}</td>'.format(url))
                    myFile.write('</tr></p>')
                    myFile.write('</table>')
            if pwned_results:
                myFile.write(compromisedDescription)
                myFile.write('<table>')
            if pwned_results:
                for result in pwned_results:
                    myFile.write('<p><tr><td>Email: {}</td>'.format(result[0]))
                    myFile.write('</tr>')
                    myFile.write('<tr><td>Domain: {}</td>'.format(result[1]))
                    myFile.write('</tr>')
                    myFile.write('<tr><td>Data: {}</td>'.format(result[2]))
                    myFile.write('</tr>')
                    myFile.write('<tr><td>Compromise Date: {}</td>'.format(result[3]))
                    myFile.write('</tr>')
                    myFile.write('<tr><td>Date Added: {}</td>'.format(result[4]))
                    myFile.write('</tr>')
                    myFile.write('<tr><td>Description: <p>{}</p></td>'.format(result[5]))
                    myFile.write('</tr></p>')
                myFile.write('</table>')
            if data_mine:
                myFile.write(metaDescription)
                myFile.write('<table>')
                if data_mine:
                    myFile.write('<tr>')
                    if software_count:
                        myFile.write('<th>Software Count</th>')
                    if username_count:
                        myFile.write('<th>Username Count</th>')
                    if download_count:
                        myFile.write('<th>Download Count</th>')
                    myFile.write('</tr>')
                    myFile.write('<tr>')
                    if software_count:
                        myFile.write('<td>{}</td>'.format(software_count))
                    if username_count:
                        myFile.write('<td>{}</td>'.format(username_count))
                    if download_count:
                        myFile.write('<td>{}</td>'.format(download_count))
                    myFile.write('</tr>')
                myFile.write('</table>')
                myFile.write('<table>')
                myFile.write('<br>')
            if user_names:
                myFile.write('<tr>')
                myFile.write('<th>Usernames</th>')
                myFile.write('</tr>')
                for username in user_names:
                    myFile.write('<tr>')
                    myFile.write('<td>{}</td>'.format(username))
                    myFile.write('<tr>')
                myFile.write('</table>')
                myFile.write('<table style="width:75">')
                myFile.write('<br>')
            if software_list:
                myFile.write('<tr>')
                myFile.write('<th>Software</th>')
                myFile.write('</tr>')
                for software in software_list:
                    myFile.write('<tr>')
                    myFile.write('<td>{}</td>'.format(software))
                    myFile.write('<tr>')
                myFile.write('</table>')
                myFile.write('<table style="width:100%">')
            if download_list:
                myFile.write('<tr>')
                myFile.write('<th>Document</th>')
                myFile.write('</tr>')
                for doc in download_list:
                    myFile.write('<tr>')
                    myFile.write('<td>{}</td>'.format(doc))
                    myFile.write('<tr>')
                myFile.write('</table>')
            myFile.write('</section>')
            myFile.write(footer)
            myFile.write('</body>')
            myFile.write('</html>')
            myFile.close()
            info('Completed HTML Report')
    except IOError,e:
        info('IOError', exc_info=True)
Ejemplo n.º 47
0
                        pass
            time.sleep(3)
            for urls in entries_tuples:
                if urls[1] not in seen:
                    results.append(urls)
                    seen.add(urls[1])
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 503:
                print colored('Google is responding with a Captcha, other searches will continue\n', 'red')
                break
        except AttributeError as f:
            pass
        except Exception:
            error('An Unhandled Exception Has Occured, Please Check The Log For Details' + ERROR_LOG_FILE, exc_info=True)

    info('Google Search Completed')
    q.put(sorted(results))


#Takes [list[tuples]]email~url #Returns [list[tuples]]email_address, url_found, breach_domain, breach_data, breach_date, /
#breach_added, breach_description
def action_pwned(emails):
    info('Compromised Account Enumeration Search Started')
    pwend_data = []
    seen = set()
    for email in emails:
        link = 'https://haveibeenpwned.com/api/v2/breachedaccount/{}'.format(email)
        try:
            headers = {"Connection" : "close",
                       "User-Agent" : "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
Ejemplo n.º 48
0
def action_country_id(countries_file, prox):
    info('Identifying Country')
    userCountry = ''
    userServer = ''
    userIP = ''
    userID = False
    o = 0
    tcountries_dic = {}
    country_list = []

    with open(countries_file) as fin:
        for line in fin:
            key, value = line.strip().split(';')
            tcountries_dic.update({key: value})

    countries_dic = dict((k.lower(), v.lower()) for k,v in tcountries_dic.iteritems())

    for country, server in countries_dic.items():
        country_list.append(country)

    country_list = [item.capitalize() for item in country_list]
    country_list.sort()

    while True:
        try:
            if prox == True:
                proxy = {'http' : 'http://127.0.0.1:8080'}
                r = requests.get(r'https://freegeoip.net/json/', proxies=proxy, verify=False)
                ip = r.json()['ip']
                originCountry = r.json()['country_name']

            else:
                r = requests.get(r'https://freegeoip.net/json/', verify=False)
                ip = r.json()['ip']
                originCountry = r.json()['country_name']

        except ValueError as e:
            if o == 0:
                print colored('\nUnable to connect to the CountryID, we will retry.', 'red')
            if o > 0:
                print '\nThis is {} of 3 attempts' .format(o)
            time.sleep(2)
            o += 1
            if o == 4:
                break
            continue
        break

    if o == 4:
        print colored('\nWe have been unable to connect to the CountryID service.\n','red')
        print '\nPlease let Bluto know what country you hale from.\n'
        print colored('Available Countries:\n', 'green')

        if len(country_list) % 2 != 0:
            country_list.append(" ")

        split = len(country_list)/2
        l1 = country_list[0:split]
        l2 = country_list[split:]

        for key, value in zip(l1,l2):
            print "{0:<20s} {1}".format(key, value)

        country_list = [item.lower() for item in country_list]

        while True:
            originCountry = raw_input('\nCountry: ').lower()
            if originCountry in country_list:
                break
            if originCountry == '':
                print '\nYou have not selected a country so the default server will be used'
                originCountry = 'United Kingdom'.lower()
                break
            else:
                print '\nCheck your spelling and try again'

        for country, server in countries_dic.items():
            if country == originCountry:
                userCountry = country
                userServer = server
                userID = True

    else:

        for country, server in countries_dic.items():
            if country == originCountry.lower():
                userCountry = country
                userServer = server
                userID = True
        if userID == False:
            if default_s == True:
                userCountry = 'DEAFULT'
                pass
            else:
                print 'Bluto currently doesn\'t have your countries google server available.\nPlease navigate to "https://freegeoip.net/json/" and post an issue to "https://github.com/darryllane/Bluto/issues"\nincluding the country value as shown in the json output\nYou have been assigned to http://www.google.co.uk for now.'
                userServer = 'http://www.google.co.uk'
                userCountry = 'United Kingdom'

    print '\n\tSearching From: {0}\n\tGoogle Server: {1}\n' .format(userCountry.title(), userServer)
    info('Country Identified: {}'.format(userCountry))
    return (userCountry, userServer)
Ejemplo n.º 49
0
def action_whois(domain):

    try:
        whois_things = pythonwhois.get_whois(domain)
        try:
            company = whois_things['contacts']['registrant']['name']
        except Exception:
            print '\nThere seems to be no Registrar for this domain.'
            company = domain
            pass
        splitup = company.lower().split()
        patern = re.compile('|'.join(splitup))
        while True:
            if patern.search(domain):
                info('Whois Results Are Good ' + company)
                print '\nThe Whois Results Look Promising: ' + colored('{}','green').format(company)
                accept = raw_input(colored('\nIs The Search Term sufficient?: ','green')).lower()
                if accept in ('y', 'yes'):
                    company = company
                    break
                elif accept in ('n', 'no'):
                    temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
                    if temp_company == '':
                        info('User Supplied Blank Company')
                        company = domain
                        break
                    else:
                        info('User Supplied Company ' + company)
                        company = temp_company
                        break
                else:
                    print '\nThe Options Are yes|no Or y|no Not {}'.format(accept)

            else:
                info('Whois Results Not Good ' + company)
                print colored("\n\tThe Whois Results Don't Look Very Promissing: '{}'","red") .format(company)
                print'\nPlease Supply The Company Name\n\n\tThis Will Be Used To Query LinkedIn'
                temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
                if temp_company == '':
                    info('User Supplied Blank Company')
                    company = domain
                    break
                else:
                    info('User Supplied Company ' + company)
                    company = temp_company
                    break


    except pythonwhois.shared.WhoisException:
        pass
    except socket.error:
        pass
    except KeyError:
        pass
    except pythonwhois.net.socket.errno.ETIMEDOUT:
        print colored('\nWhoisError: You may be behind a proxy or firewall preventing whois lookups. Please supply the registered company name, if left blank the domain name ' + '"' + domain + '"' +' will be used for the Linkedin search. The results may not be as accurate.','red')
        temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
        if temp_company == '':
            company = domain
        else:
            company = temp_company
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
    if 'company' not in locals():
        print 'There is no Whois data for this domain.\n\nPlease supply a company name.'
        while True:
            temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
            if temp_company == '':
                info('User Supplied Blank Company')
                company = domain
                break
            else:
                company = temp_company
                info('User Supplied Company ' + company)
                break

    return company
Ejemplo n.º 50
0
def action_zone_transfer(zn_list, domain):
    info('Attempting Zone Transfers')
    global clean_dump
    print "\nAttempting Zone Transfers"
    zn_list.sort()
    vuln = True
    vulnerable_listT = []
    vulnerable_listF = []
    dump_list = []
    for ns in zn_list:
        try:
            z = dns.zone.from_xfr(
                dns.query.xfr(ns, domain, timeout=3, lifetime=5))
            names = z.nodes.keys()
            names.sort()
            if vuln == True:
                info('Vuln: {}'.format(ns))
                vulnerable_listT.append(ns)

        except Exception as e:
            error = str(e)
            if error == 'Errno -2] Name or service not known':
                pass
            if error == "[Errno 54] Connection reset by peer" or "No answer or RRset not for qname":
                info('Not Vuln: {}'.format(ns))
                vuln = False
                vulnerable_listF.append(ns)
            else:
                info(
                    'An Unhandled Exception Has Occured, Please Check The Log For Details\n'
                    + INFO_LOG_FILE,
                    exc_info=True)

    if vulnerable_listF:
        print "\nNot Vulnerable:\n"
        for ns in vulnerable_listF:
            print colored(ns, 'green')

    if vulnerable_listT:
        info('Vulnerable To Zone Transfers')
        print "\nVulnerable:\n"
        for ns in vulnerable_listT:
            print colored(ns, 'red'), colored("\t" + "TCP/53", 'red')

        z = dns.zone.from_xfr(
            dns.query.xfr(vulnerable_listT[0], domain, timeout=3, lifetime=5))
        names = z.nodes.keys()
        names.sort()
        print "\nRaw Zone Dump\n"
        for n in names:
            data1 = "{}.{}".format(n, domain)
            try:
                addr = socket.gethostbyname(data1)
                dump_list.append("{}.{} {}".format(n, domain, addr))

            except Exception as e:
                error = str(e)
                if error == "[Errno -5] No address associated with hostname":
                    pass
                if error == 'Errno -2] Name or service not known':
                    pass
                else:
                    info(
                        'An Unhandled Exception Has Occured, Please Check The Log For Details\n'
                        + INFO_LOG_FILE,
                        exc_info=True)

            print z[n].to_text(n)

    info('Completed Attempting Zone Transfers')
    clean_dump = sorted(set(dump_list))
    return ((vulnerable_listT, clean_dump))
Ejemplo n.º 51
0
def get_dns_details(domain, myResolver):
    info('Gathering DNS Details')
    ns_list = []
    zn_list = []
    mx_list = []
    try:
        print "\nName Server:\n"
        myAnswers = myResolver.query(domain, "NS")
        for data in myAnswers.rrset:
            data1 = str(data)
            data2 = (data1.rstrip('.'))
            addr = socket.gethostbyname(data2)
            ns_list.append(data2 + '\t' + addr)
            zn_list.append(data2)
            list(set(ns_list))
            ns_list.sort()
        for i in ns_list:
            print colored(i, 'green')
    except dns.resolver.NoNameservers:
        info('\tNo Name Servers\nConfirm The Domain Name Is Correct.' +
             INFO_LOG_FILE,
             exc_info=True)
        sys.exit()
    except dns.resolver.NoAnswer:
        print "\tNo DNS Servers"
    except dns.resolver.NXDOMAIN:
        info("\tDomain Does Not Exist" + INFO_LOG_FILE, exc_info=True)
        sys.exit()
    except dns.resolver.Timeout:
        info('\tTimeouted\nConfirm The Domain Name Is Correct.' +
             INFO_LOG_FILE,
             exc_info=True)
        sys.exit()
    except Exception:
        info(
            'An Unhandled Exception Has Occured, Please Check The Log For Details\n'
            + INFO_LOG_FILE,
            exc_info=True)

    try:
        print "\nMail Server:\n"
        myAnswers = myResolver.query(domain, "MX")
        for data in myAnswers:
            data1 = str(data)
            data2 = (data1.split(' ', 1)[1].rstrip('.'))
            addr = socket.gethostbyname(data2)
            mx_list.append(data2 + '\t' + addr)
            list(set(mx_list))
            mx_list.sort()
        for i in mx_list:
            print colored(i, 'green')
    except dns.resolver.NoAnswer:
        print "\tNo Mail Servers"
    except Exception:
        info(
            'An Unhandled Exception Has Occured, Please Check The Log For Details'
            + INFO_LOG_FILE)

    info('Completed Gathering DNS Details')
    return zn_list
Ejemplo n.º 52
0
def action_zone_transfer(zn_list, domain):
    info('Attempting Zone Transfers\n')
    global clean_dump
    print "\nAttempting Zone Transfers"
    zn_list.sort()
    vuln = True
    vulnerable_listT = []
    vulnerable_listF = []
    dump_list = []
    for ns in zn_list:
        try:
            z = dns.zone.from_xfr(dns.query.xfr(ns, domain))
            names = z.nodes.keys()
            names.sort()
            if vuln == True:
                vulnerable_listT.append(ns)

        except Exception as e:
            error = str(e)
            if error == 'Errno -2] Name or service not known':
                pass
            if error == "[Errno 54] Connection reset by peer" or "No answer or RRset not for qname":
                vuln = False
                vulnerable_listF.append(ns)
            else:
                error('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + ERROR_LOG_FILE, exc_info=True)


    if vulnerable_listF:
        print "\nNot Vulnerable:\n"
        for ns in vulnerable_listF:
            print colored(ns, 'green')

    if vulnerable_listT:
        print "\nVulnerable:\n"
        for ns in vulnerable_listT:
            print colored(ns,'red'), colored("\t" + "TCP/53", 'red')


        z = dns.zone.from_xfr(dns.query.xfr(vulnerable_listT[0], domain))
        names = z.nodes.keys()
        names.sort()
        print "\nRaw Zone Dump\n"
        for n in names:
            data1 = "{}.{}" .format(n,domain)
            try:
                addr = socket.gethostbyname(data1)
                dump_list.append("{}.{} {}" .format(n, domain, addr))

            except Exception as e:
                error = str(e)
                if error == "[Errno -5] No address associated with hostname":
                    pass
                if error == 'Errno -2] Name or service not known':
                    pass
                else:
                    error('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + ERROR_LOG_FILE, exc_info=True)

            print z[n].to_text(n)

    info('Completed Attempting Zone Transfers\n')
    clean_dump = sorted(set(dump_list))
    return ((vulnerable_listT, clean_dump))
Ejemplo n.º 53
0
def action_country_id(countries_file, prox):
    info('Identifying Country')
    userCountry = ''
    userServer = ''
    userIP = ''
    userID = False
    o = 0
    tcountries_dic = {}
    country_list = []

    with open(countries_file) as fin:
        for line in fin:
            key, value = line.strip().split(';')
            tcountries_dic.update({key: value})

    countries_dic = dict((k.lower(), v.lower()) for k,v in tcountries_dic.iteritems())

    for country, server in countries_dic.items():
        country_list.append(country)

    country_list = [item.capitalize() for item in country_list]
    country_list.sort()

    while True:
        try:
            if prox == True:
                proxy = {'https' : 'http://127.0.0.1:8080'}
                r = requests.get(r'http://api.ipstack.com/check?access_key=dd763372274e9ae8aed34a55a7a4b36a', proxies=proxy, verify=False)
                ip = r.json()['ip']
                originCountry = r.json()['country_name']

            else:
                r = requests.get(r'http://api.ipstack.com/check?access_key=dd763372274e9ae8aed34a55a7a4b36a', verify=False)
                ip = r.json()['ip']
                originCountry = r.json()['country_name']

        except ValueError as e:
            if o == 0:
                print colored('\nUnable to connect to the CountryID, we will retry.', 'red')
            if o > 0:
                print '\nThis is {} of 3 attempts' .format(o)
            time.sleep(2)
            o += 1
            if o == 4:
                break
            continue
        break

    if o == 4:
        print colored('\nWe have been unable to connect to the CountryID service.\n','red')
        print '\nPlease let Bluto know what country you hale from.\n'
        print colored('Available Countries:\n', 'green')

        if len(country_list) % 2 != 0:
            country_list.append(" ")

        split = len(country_list)/2
        l1 = country_list[0:split]
        l2 = country_list[split:]

        for key, value in zip(l1,l2):
            print "{0:<20s} {1}".format(key, value)

        country_list = [item.lower() for item in country_list]

        while True:
            originCountry = raw_input('\nCountry: ').lower()
            if originCountry in country_list:
                break
            if originCountry == '':
                print '\nYou have not selected a country so the default server will be used'
                originCountry = 'United Kingdom'.lower()
                break
            else:
                print '\nCheck your spelling and try again'

        for country, server in countries_dic.items():
            if country == originCountry:
                userCountry = country
                userServer = server
                userID = True

    else:

        for country, server in countries_dic.items():
            if country == originCountry.lower():
                userCountry = country
                userServer = server
                userID = True
        if userID == False:
            if default_s == True:
                userCountry = 'DEAFULT'
                pass
            else:
                print 'Bluto currently doesn\'t have your countries google server available.\nPlease navigate to "https://freegeoip.net/json/" and post an issue to "https://github.com/darryllane/Bluto/issues"\nincluding the country value as shown in the json output\nYou have been assigned to http://www.google.co.uk for now.'
                userServer = 'http://www.google.co.uk'
                userCountry = 'United Kingdom'

    print '\n\tSearching From: {0}\n\tGoogle Server: {1}\n' .format(userCountry.title(), userServer)
    info('Country Identified: {}'.format(userCountry))
    return (userCountry, userServer)
Ejemplo n.º 54
0
def doc_exalead(domain, user_agents, prox, q):
    document_list = []
    uas = user_agents
    info('Exalead Document Search Started')
    for start in range(0,80,10):
        ua = random.choice(uas)
        link = 'http://www.exalead.com/search/web/results/?search_language=&q=(filetype:xls+OR+filetype:doc+OR++filetype:pdf+OR+filetype:ppt)+site:{}&search_language=&elements_per_page=10&start_index={}'.format(domain, start)
        if prox == True:
            proxy = {'http' : 'http://127.0.0.1:8080'}
        else:
            pass
        try:
            headers = {"Connection" : "close",
                       "User-Agent" : ua,
                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                       'Accept-Language': 'en-US,en;q=0.5',
                       'Accept-Encoding': 'gzip, deflate'}
            if prox == True:
                response = requests.get(link, headers=headers, proxies=proxy, verify=False)
            else:
                response = requests.get(link, headers=headers, verify=False)
            soup = BeautifulSoup(response.text, "lxml")
            if soup.find('label', {'class': 'control-label', 'for': 'id_captcha'}):
                info("So you don't like spinach?")
                info("Captchas are preventing some document searches.")
                break
            for div in soup.findAll('li', {'class': 'media'}):
                document = div.find('a', href=True)['href']
                document = urllib2.unquote(document)
                document_list.append(document)

        except Exception:
            info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
            continue

        time.sleep(10)
    potential_docs = len(document_list)
    info('Exalead Document Search Finished')
    info('Potential Exalead Documents Found: {}'.format(potential_docs))
    q.put(document_list)
Ejemplo n.º 55
0
def action_output_wild_false_hunter(brute_results_dict, sub_intrest, google_results, bing_true_results, linkedin_results, check_count, domain, time_spent_email, time_spent_brute, time_spent_total, emailHunter_results, args, report_location, company, data_mine):
    info('Output action_output_wild_false_hunter: Start')
    linkedin_evidence_results = []
    email_evidence_results = []
    email_results = []
    email_seen = []
    url_seen = []
    person_seen = []
    final_emails = []

    if emailHunter_results is not None:
        for email in emailHunter_results:
            email_results.append(email[0])
            email_evidence_results.append((email[0],email[1]))

    for email, url in google_results:
        try:
            e1, e2 = email.split(',')
            if url not in email_seen:
                email_seen.append(url)
                email_evidence_results.append((str(e2).replace(' ',''),url))
                email_evidence_results.append((str(e1).replace(' ',''),url))
                email_results.append((str(e2).replace(' ','')))
                email_results.append((str(e1).replace(' ','')))

        except ValueError:
            if url not in email_seen:
                email_seen.append(url)
                email_evidence_results.append((str(email).replace(' ',''),url))
                email_results.append(str(email).replace(' ',''))

    for e, u in bing_true_results:
        email_results.append(e)
        if u not in url_seen:
            email_evidence_results.append((e, u))

    for url, person, description in linkedin_results:
        if person not in person_seen:
            person_seen.append(person)
            linkedin_evidence_results.append((url, person, description))

    linkedin_evidence_results.sort(key=lambda tup: tup[1])
    sorted_email = set(sorted(email_results))
    for email in sorted_email:
        if email == '[]':
            pass
        elif email == '@' + domain:
            pass
        else:
            final_emails.append(email)
    email_count = len(final_emails)
    staff_count = len(person_seen)
    f_emails = sorted(final_emails)
    pwned_results = action_pwned(f_emails)
    c_accounts = len(pwned_results)

    print '\n\nEmail Addresses:\n'
    write_html(email_evidence_results, linkedin_evidence_results, pwned_results, report_location, company, data_mine)
    if f_emails:

        for email in f_emails:

            print '\t' + str(email).replace("u'","").replace("'","").replace('[','').replace(']','')
    else:
        print '\tNo Data To Be Found'

    print '\nCompromised Accounts:\n'
    if pwned_results:
        sorted_pwned = sorted(pwned_results)
        for account in sorted_pwned:
            print 'Account: \t{}'.format(account[0])
            print ' Domain: \t{}'.format(account[1])
            print '   Date: \t{}\n'.format(account[3])
    else:
        print '\tNo Data To Be Found'

    print '\nLinkedIn Results:\n'

    sorted_person = sorted(person_seen)
    if sorted_person:
        for person in sorted_person:
            print person
    else:
        print '\tNo Data To Be Found'

    if data_mine is not None:
        user_names = data_mine[0]
        software_list = data_mine[1]
        download_count = data_mine[2]
        download_list = data_mine[3]
        username_count = len(user_names)
        software_count = len(software_list)

        print '\nData Found In Document MetaData'
        print '\nPotential Usernames:\n'
        if user_names:
            for user in user_names:
                print '\t' + colored(user, 'red')
        else:
            print '\tNo Data To Be Found'

        print '\nSoftware And Versions Found:\n'
        if software_list:
            for software in software_list:
                print '\t' + colored(software, 'red')
        else:
            print '\tNo Data To Be Found'
    else:
        user_names = []
        software_list = []
        download_count = 0
        username_count = len(user_names)
        software_count = len(software_list)

    sorted_dict = collections.OrderedDict(sorted(brute_results_dict.items()))
    bruted_count = len(sorted_dict)
    print "\nBluto Results: \n"
    for item in sorted_dict:
        if item is not '*.' + domain:
            if item is not '@.' + domain:
                if item in sub_intrest:
                    print colored(item + "\t", 'red'), colored(sorted_dict[item], 'red')
                else:
                    print item + "\t",sorted_dict[item]

    time_spent_email_f = str(datetime.timedelta(seconds=(time_spent_email))).split('.')[0]
    time_spent_brute_f = str(datetime.timedelta(seconds=(time_spent_brute))).split('.')[0]
    time_spent_total_f = str(datetime.timedelta(seconds=(time_spent_total))).split('.')[0]

    print '\nHosts Identified: {}' .format(str(bruted_count))
    print 'Potential Emails Found: {}' .format(str(email_count))
    print 'Potential Staff Members Found: {}' .format(str(staff_count))
    print 'Compromised Accounts: {}' .format(str(c_accounts))
    print 'Potential Usernames Found: {}'.format(username_count)
    print 'Potential Software Found: {}'.format(software_count)
    print 'Documents Downloaded: {}'.format(download_count)
    print "Email Enumeration:", time_spent_email_f
    print "Requests executed:", str(check_count) + " in ", time_spent_brute_f
    print "Total Time:", time_spent_total_f

    info('Hosts Identified: {}' .format(str(bruted_count)))
    info("Email Enumeration: {}" .format(str(time_spent_email_f)))
    info('Compromised Accounts: {}' .format(str(c_accounts)))
    info('Potential Staff Members Found: {}' .format(str(staff_count)))
    info('Potential Emails Found: {}' .format(str(email_count)))
    info("Total Time:" .format(str(time_spent_total_f)))
    info('Documents Downloaded: {}'.format(download_count))
    info('DNS No Wild Cards + Email Hunter Run completed')
    info('Output action_output_wild_false_hunter: Completed')

    domain_r = domain.split('.')
    docs = os.path.expanduser('~/Bluto/doc/{}/'.format(domain_r[0]))
    answers = ['no','n','y','yes']
    while True:
        print colored("\nWould you like to keep all local data?\n(Local Logs, Downloded Documents, HTML Evidence Report)\n\nYes|No:", "red")
        answer = raw_input("").lower()
        if answer in answers:
            if answer == 'y' or answer == 'yes':
                domain
                print '\nThe documents are located here: {}'.format(docs)
                print 'The logs are located here: {}.'.format(LOG_DIR)
                print "\nAn evidence report has been written to {}\n".format(report_location)
                while True:
                    answer = raw_input("Would you like to open this report now? ").lower()
                    if answer in answers:
                        if answer == 'y' or answer == 'yes':
                            print '\nOpening {}' .format(report_location)
                            webbrowser.open('file://' + str(report_location))
                            break
                        else:
                            break
                    else:
                        print 'Your answer needs to be either yes|y|no|n rather than, {}' .format(answer)
                break
            else:
                shutil.rmtree(docs)
                shutil.rmtree(LOG_DIR)
                os.remove(report_location)
                break
        else:
            print '\tYour answer needs to be either yes|y|no|n rather than, {}' .format(answer)
Ejemplo n.º 56
0
                                    entries_tuples.append((email.lower(),str(url).replace("u'",'').replace("'","")))
                    except Exception, e:
                        pass
            time.sleep(3)
            for urls in entries_tuples:
                if urls[1] not in seen:
                    results.append(urls)
                    seen.add(urls[1])
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 503:
                info('Google is responding with a Captcha, other searches will continue')
                break
        except AttributeError as f:
            pass
        except Exception:
            info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)

    info('Google Search Completed')
    q.put(sorted(results))


#Takes [list[tuples]]email~url #Returns [list[tuples]]email_address, url_found, breach_domain, breach_data, breach_date, /
#breach_added, breach_description
def action_pwned(emails):
    info('Compromised Account Enumeration Search Started')
    pwend_data = []
    seen = set()
    for email in emails:
        link = 'https://haveibeenpwned.com/api/v2/breachedaccount/{}'.format(email)
        try:
            headers = {"Connection" : "close",
Ejemplo n.º 57
0
def action_whois(domain):
    try:
        whois_things = pythonwhois.get_whois(domain)
        try:
            company = whois_things['contacts']['registrant']['name']
        except Exception:
            print '\nThere seems to be no Registrar for this domain.'
            company = domain
            pass
        splitup = company.lower().split()
        patern = re.compile('|'.join(splitup))
        while True:
            if patern.search(domain):
                info('Whois Results Are Good ' + company)
                print '\nThe Whois Results Look Promising: ' + colored('{}','green').format(company)
                accept = raw_input(colored('\nIs The Search Term sufficient?: ','green')).lower()
                if accept in ('y', 'yes'):
                    company = company
                    break
                elif accept in ('n', 'no'):
                    temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
                    if temp_company == '':
                        info('User Supplied Blank Company')
                        company = domain
                        break
                    else:
                        info('User Supplied Company ' + company)
                        company = temp_company
                        break
                else:
                    print '\nThe Options Are yes|no Or y|no Not {}'.format(accept)

            else:
                info('Whois Results Not Good ' + company)
                print colored("\n\tThe Whois Results Don't Look Very Promissing: '{}'","red") .format(company)
                print'\nPlease Supply The Company Name\n\n\tThis Will Be Used To Query LinkedIn'
                temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
                if temp_company == '':
                    info('User Supplied Blank Company')
                    company = domain
                    break
                else:
                    info('User Supplied Company ' + company)
                    company = temp_company
                    break

    except pythonwhois.shared.WhoisException:
        pass
    except socket.error:
        pass
    except KeyError:
        pass
    except pythonwhois.net.socket.errno.ETIMEDOUT:
        print colored('\nWhoisError: You may be behind a proxy or firewall preventing whois lookups. Please supply the registered company name, if left blank the domain name ' + '"' + domain + '"' +' will be used for the Linkedin search. The results may not be as accurate.','red')
        temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
        if temp_company == '':
            company = domain
        else:
            company = temp_company
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
    if 'company' not in locals():
        print 'There is no Whois data for this domain.\n\nPlease supply a company name.'
        while True:
            temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
            if temp_company == '':
                info('User Supplied Blank Company')
                company = domain
                break
            else:
                company = temp_company
                info('User Supplied Company ' + company)
                break

    return company
Ejemplo n.º 58
0
            time.sleep(3)
            for urls in entries_tuples:
                if urls[1] not in seen:
                    results.append(urls)
                    seen.add(urls[1])
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 503:
                info(
                    'Google is responding with a Captcha, other searches will continue'
                )
                break
        except AttributeError as f:
            pass
        except Exception:
            info(
                'An Unhandled Exception Has Occured, Please Check The Log For Details'
                + INFO_LOG_FILE)

    info('Google Search Completed')
    q.put(sorted(results))


#Takes [list[tuples]]email~url #Returns [list[tuples]]email_address, url_found, breach_domain, breach_data, breach_date, /
#breach_added, breach_description
def action_pwned(emails):
    info('Compromised Account Enumeration Search Started')
    pwend_data = []
    seen = set()
    for email in emails:
        link = 'https://haveibeenpwned.com/api/v2/breachedaccount/{}'.format(
            email)