Пример #1
0
def get_bufferoverrun_data(domain_data):
	log_time=strftime("%d/%m/%y %H:%M:%S", gmtime())
	subdomains = []
	source = domain_data[0]
	json_blob = domain_data[1]

	try:
		error = json_blob['Meta']['Errors'][1]
		if 'output limit reached!' in error:
			logger.red('Maximum request reached for bufferover.run')
			return None
	except:
		pass
		
	list_blob = list(json_blob['FDNS_A'])
	for i in list_blob:
		if ',' in i:
			if i.split(',')[0] not in subdomains:
				subdomains.append(i.split(',')[0])

			if i.split(',')[1] not in subdomains:
				subdomains.append(i.split(',')[1])
		else:
			if i not in subdomains:
				if args.domain in subdomain:
					subdomains.append(i)

	domain = domain_structure.Domain(get_uid(),source,log_time,subdomains)
	return domain
Пример #2
0
    def search(self, domain, wildcard=True):
        base_url = "https://crt.sh/?q={}&output=json"
        if wildcard:
            domain = "%25.{}".format(domain)
        url = base_url.format(domain)

        user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'
        try:
            response = requests.get(url, headers={'User-Agent': user_agent})
        except Exception as e:
            logger.red('Got [%s] whilst requesting %s' %
                       (logger.RED(str(e)), logger.RED(url)))
            return None

        if response.ok:
            logger.green(
                'Got [%s] from %s' %
                (logger.GREEN(response.status_code), logger.GREEN(url)))
            content = response.content.decode('utf-8')
            try:
                data = json.loads(response.text)
                return ('crtsh', data)
            except Exception as e:
                logger.red('Got [%s] whilst loading data from %s' %
                           (logger.RED(str(e)), logger.RED(url)))
                return None
        else:
            return None
Пример #3
0
def emails(name_data, email_format, domain):
    firstname = name_data[0]
    middlename = name_data[1]
    surname = name_data[2]

    if middlename and surname == '':
        email = firstname + '@' + domain

    if 'firstname.surname' in email_format:
        email = firstname_dot_surname(firstname, surname, domain)
    elif 'f.surname' in email_format:
        email = f_dot_surname(firstname, surname, domain)
    elif 'firstnamesurname' in email_format:
        email = firstnamesurname(firstname, surname, domain)
    elif 'fsurname' in email_format:
        email = fsurname(firstname, surname, domain)

    elif 'surname.firstname' in email_format:
        email = surname_dot_firstname(firstname, surname, domain)
    elif 's.firstname' in email_format:
        email = s_dot_firstname(firstname, surname, domain)
    elif 'surnamefirstname' in email_format:
        email = surnamefirstname(firstname, surname, domain)
    elif 'sfirstname' in email_format:
        email = sfirstname(firstname, surname, domain)
    elif 'firstname.msurname' in email_format:
        email = firstname_mdotsurname(firstname, middlename, surname, domain)
    else:
        logger.red('Unknown email scheme specified.')
        quit()

    return email
Пример #4
0
def results_parse(host_object, scan_type):
    data={}
    #ip, name, shares, null_sessions
    if scan_type == None:
        for host in host_object:
            append_me=[]
            append_me.extend((host.name, host.domain, host.shares, host.null_sessions))
            data[host.ip]=append_me
            logger.results_parse(host.ip, host.name, host.domain, host.shares, host.null_sessions, scan_type)
    else:
        if scan_type.lower() == 'null':
            for host in host_object:
                append_me=[]
                append_me.extend((host.name, host.domain, host.null_sessions))
                data[host.ip]=append_me
                logger.results_parse(host.ip,host.name, host.domain, None, host.null_sessions, scan_type)

        elif scan_type.lower() == 'shares':
            for host in host_object:
                append_me=[]
                append_me.extend((host.name, host.domain, host.shares))
                data[host.ip]=append_me
                logger.results_parse(host.ip, host.name, host.domain,host.shares, None, scan_type)

    #This data might be useful at some point. Its a dictionary of all the data collected.
    if not data:
        logger.red('Dang, no data was found. Make sure the targets are correct!')
Пример #5
0
def get_company_profile(cookie,company_id,keyword):
	if keyword == None:
		url='https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->%s)&origin=OTHER&q=guided&start=0' % company_id
	else:
		url = "https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->%s)&keywords=%s&origin=OTHER&q=guided&start=0" % (company_id,keyword)
	data=http.connect(url,cookie)
	if data == None:
		logger.red('Unable to authenticate to LinkedIn')
		quit()
	return data.text
Пример #6
0
def run(data):
    cookie = data.cookie
    company_id = data.company_id
    email_format = data.email_format
    keyword = data.keyword
    domain = data.domain
    validation = data.validation
    api_key = data.api_key
    filename = data.filename
    valid_emails_only = data.valid_emails_only

    logger.debug(str(vars(data)))

    profiles = linkedin_scraper.company_profile(cookie, company_id, keyword)
    if profiles == None:
        logger.red('Unable to extract data from LinkedIn')
        quit()
    company_profile_json = json.loads(profiles)

    try:
        total_employees = company_profile_json['elements'][0]['total']
    except:
        logger.red('Failed to extract users, try generalising the keywords')
        logger.red('If this problem persists, create a issue on GitHub!')
        quit()
    per_page = 40  # Each response contains 40 profiles per page.
    pages = int(
        total_employees / per_page
    )  # Divide the amount of users by 40, this will give you the amount of pages
    logger.debug('Per page: %s' % per_page)
    if total_employees < per_page:
        logger.debug('Setting per_page to 1')
        pages = 1
    logger.blue('Identified %s page(s)' % logger.BLUE(pages))
    logger.blue('Identified %s result(s)' % logger.BLUE(total_employees))

    if pages == 0:
        logger.red('Could not identify pages')
        quit()

    if total_employees > 1000:
        logger.red('This method of enumeration can only extract 1000 users')
        sleep(3)

    users = linkedin_scraper.get_users(data, pages, total_employees, keyword)
    job_role_count = role_occurrence.count(users, total_employees)

    if valid_emails_only:
        logger.valid_emails_only(users, filename)

    else:
        logger.dump(users, validation)
        logger.write_out(users, data, job_role_count, filename)

    return users
Пример #7
0
def insert(domain_data):
    db = init()
    data = vars(domain_data)
    logger.yellow('Adding %s to %s' %
                  (logger.YELLOW(str(data)), logger.YELLOW(db_name)))
    try:
        db.insert(data)
    except Exception as e:
        logger.red('Got [%s] whilst adding %s to %s' %
                   (logger.RED(str(e)), logger.RED(data), logger.RED(db_name)))
        return None  # this return code wont be checked anywhere, i just dont like leaving unclosed functions :)
Пример #8
0
def company_profile(cookie,company_id,keyword):
	
	# This function requests the companies profile and returns the data
	if keyword == None:
		url='https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->%s)&origin=OTHER&q=guided&start=0' % company_id
		logger.debug('Requesting %s from company_profile()' % url)
	else:
		url = "https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->%s)&keywords=%s&origin=OTHER&q=guided&start=0" % (company_id,keyword)
		logger.debug('Requesting %s from company_profile()' % url)
	data=http.connect(url,cookie)
	if data == None:
		logger.red('Unable to authenticate to LinkedIn')
		quit()
	return data.text
Пример #9
0
def validate(email, api_key):
    successful = {}
    url = 'https://api.hunter.io/v2/email-verifier?email=%s&api_key=%s' % (
        email, api_key)
    try:
        r = requests.get(url)
        status_code = r.status_code
    except Exception as e:
        logger.red('Unable to get %s' % url)
        quit()

    try:
        data = json.loads(r.content)
    except Exception as e:
        logger.red('Failed to load JSON from requests')
        quit()

    if status_code == 429 or status_code == 401:
        try:
            result = data['errors'][0]['details']
        except Exception as e:
            logger.red('Failed to load JSON from errors')
            quit()

        if 'exceeded' in result:
            return 429

        elif 'No user found for the API key supplied' in result:
            return 401
    elif status_code == 200:
        try:
            result = data['data']['result']
            score = data['data']['score']
        except Exception as e:
            logger.red('Unable to extract json for %s' % email)
            quit()

        percent = str(score) + '%'

        if score > 68:
            logger.green('Validated %s at %s' %
                         (logger.GREEN(email), logger.GREEN(percent)))
            return True
        else:
            return False
    else:
        logger.red('Got unexpected HTTP response' %
                   logger.RED(str(status_code)))
Пример #10
0
def get_targets(targets):
    # parses an input of targets to get a list of all possible ips
    target_list = []

    try:
        with open(targets, 'r') as file:
            contents = file.readlines()
            for i in (contents):
                target = i.rstrip()
                target_list.append(target)
            logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list)))))
            return target_list
    except:
        try:
            if "/" in targets:
                try:
                    subnet = IPNetwork(targets)
                except:
                    logger.red('failed to parse')
                    quit()

                for i in subnet:
                    tmp_str = str(i)
                    last_octet = str(tmp_str.split('.')[3])
                    if last_octet == '0' or last_octet == '255':
                        pass
                    else:
                        target_list.append(str(i))
                logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list)))))
                return target_list
            elif "," in targets:
                ips=targets.split(',')
                for ip in ips:
                    target_list.append(ip)
                logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list)))))
                return target_list

            else:
                target_list.append(targets)
                logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list)))))
                return target_list
        except:
            logger.red('Failed to parse targets.')
            quit()
Пример #11
0
def user_data(results, pages, cookie, company_id, domain, email_format):
    # Every page returns a dictionary of data, each dictionary is added to this list.

    users_per_page = []
    for page in range(0, pages + 1):

        if page + 1 == 25:
            break

        if results < 40:
            # This method pulls 40 results per page. If the available results is less then 40
            # Set results_per_age to whatever the number is
            results_per_page = results
            results_to_fetch = results
        else:
            # However, if the amount of available results is higher than the per page limit, set the per page limit to the max (40)
            results_per_page = 40

        # Every time this is hit, the start point in the api is incremented. First, it gets 0 - 40, then 40 - 80 and so on.
        # This can be dynamically figured out by multiplying the page number (1) by the results_per_page (40).
        results_to_fetch = results_per_page * page

        # In order to stop this loop from requesting more than is available, and then breaking it, this if statement limits that:
        if results_to_fetch >= results:
            break

        url = "https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->%s)&origin=OTHER&q=guided&start=%s" % (
            company_id, results_to_fetch)
        logger.blue('Pulling from page %s' % logger.BLUE(page))
        data = http.connect(url, cookie)
        result = data.text.encode('UTF-8')

        try:
            result = json.loads(result)
        except Exception as e:
            x = str(e)
            logger.red(e)
            quit()

        users = extract_data(result, domain, email_format)

        users_per_page.append(users)

    return users_per_page
Пример #12
0
def do_certspotter():
    certspotter_api = certspotter.api()

    domain_data = certspotter_api.search(domain)

    if not verify(domain_data):
        logger.red('Failed to obtain data from %s' % logger.RED('certspotter'))
        return False
    else:
        logger.green('Successfully validated %s response' %
                     logger.GREEN('certspotter'))

    crunched_data = crunch.get_certspotter_data(domain_data)

    if verify(crunched_data):
        db.insert(crunched_data)
        return crunched_data
    else:
        return False
Пример #13
0
def do_bufferoverrun():
    bufferoverrun_api = bufferoverrun.api()

    domain_data = bufferoverrun_api.search(domain)

    if not verify(domain_data):
        logger.red('Failed to obtain data from %s' %
                   logger.RED('bufferover.run'))
        return False
    else:
        logger.green('Successfully validated %s response' %
                     logger.GREEN('bufferover.run'))

    crunched_data = crunch.get_bufferoverrun_data(domain_data)

    if verify(crunched_data):
        db.insert(crunched_data)
        return crunched_data
    else:
        return False
Пример #14
0
def connect(url, cookie):
    cookies = {'li_at': cookie, 'JSESSIONID': 'ajax:0397788525211216808'}
    headers = {
        'Csrf-Token': 'ajax:0397788525211216808',
        'X-RestLi-Protocol-Version': '2.0.0'
    }

    logger.debug(str(cookies))
    logger.debug(str(headers))

    try:
        r = requests.get(url, headers=headers, cookies=cookies)
        data = r.text
        if 'CSRF check failed.' in data:
            logger.red('Failed to authenticate to LinkedIn')
            return None
        return r
    except Exception as e:
        print(e)
        logger.red('Check the cookie and make sure its correct!')
        return None
Пример #15
0
def run(data,domain,filename,keyword,validation,api_key):
	cookie=data[0]
	company_id=data[1]
	email_format=data[2]
	profiles = get_company_profile(cookie,company_id,keyword)
	if profiles == None:
		logger.red('Unable to extract data from LinkedIn')
		quit()
	profiles_data=json.loads(profiles)
	results = profiles_data['elements'][0]['total']
	per_page=40
	pages = int(results / per_page)
	if results < per_page:
		pages=1
	logger.blue('Identified %s page(s)' % logger.BLUE(pages))
	logger.blue('Identified %s result(s)' % logger.BLUE(results))

	if pages == 0:
		logger.red('Could not identify pages')
		quit()

	if results > 1000:
		logger.red('This method of enumeration can only extract 1000 users')

	# sleep(3)

	users=user_data(results,pages,cookie,company_id,domain,email_format,validation,api_key)
	job_role_count=word_occurrence.count(users)

	logger.write_out(users,domain,job_role_count,filename,validation)
	
	return users
	
Пример #16
0
 def search(self, domain):
     url = "http://dns.bufferover.run/dns?q=%s" % domain
     user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'
     try:
         response = requests.get(url, headers={'User-Agent': user_agent})
     except Exception as e:
         logger.red('Got [%s] whilst requesting %s' %
                    (logger.RED(str(e)), logger.RED(url)))
         return None
     if response.ok:
         logger.green(
             'Got [%s] from %s' %
             (logger.GREEN(response.status_code), logger.GREEN(url)))
         content = response.content.decode('utf-8')
         try:
             data = json.loads(response.text)
             return ('dns.bufferover.run', data)
         except Exception as e:
             logger.red('Got [%s] whilst loading data from %s' %
                        (logger.RED(str(e)), logger.RED(url)))
             return None
     else:
         logger.red('Got [%s] from %s' %
                    (logger.RED(response.status_code), logger.RED(url)))
         return None
Пример #17
0
def do_crtsh():
    crtsh_api = crtsh.api(
    )  # create an instance of the crtsh class. isnt really required but it was just incase multiple domains were going to be added

    domain_data = crtsh_api.search(
        domain
    )  # go to crtsh and return a tuple. index 0 being the 'source' string, and 1 being the json blob.

    if not verify(domain_data):
        logger.red('Failed to obtain data from %s' % logger.RED('crt.sh'))
        return False
    else:
        logger.green('Successfully validated %s response' %
                     logger.GREEN('crt.sh'))

    crunched_data = crunch.get_crtsh_data(domain_data)

    if verify(crunched_data):
        db.insert(crunched_data)
        return crunched_data
    else:
        return False
Пример #18
0
 def search(self, domain):
     url = "https://api.certspotter.com/v1/issuances?domain=%s&expand=dns_names&expand=issuer&expand=cert" % domain
     user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'
     try:
         response = requests.get(url, headers={'User-Agent': user_agent})
     except Exception as e:
         logger.red('Got [%s] whilst requesting %s' %
                    (logger.RED(str(e)), logger.RED(url)))
         return None
     if response.ok:
         logger.green(
             'Got [%s] from %s' %
             (logger.GREEN(response.status_code), logger.GREEN(url)))
         content = response.content.decode('utf-8')
         try:
             data = json.loads(response.text)
             return ('certspotter', data)
         except Exception as e:
             logger.red('Got [%s] whilst loading data from %s' %
                        (logger.RED(str(e)), logger.RED(url)))
             return None
     else:
         return None
Пример #19
0
def check(urls):
    probed = []
    for url in urls:
        try:
            response = requests.get(url,
                                    allow_redirects=False,
                                    timeout=5,
                                    verify=False)
            logger.green('%s [%s]' % (url, logger.GREEN(response.status_code)))
            probed.append(url)
        except requests.exceptions.Timeout:
            logger.red('%s [%s]' % (url, logger.RED('Timed out')))
        except requests.exceptions.TooManyRedirects:
            logger.red('%s [%s]' % (url, logger.RED('Too many redirects')))
        except requests.exceptions.RequestException as e:
            logger.red('%s [%s]' % (url, logger.RED('Connection Refused')))
    return probed
Пример #20
0
def log_results(results_data, probed):
    subdomains = results_data[0]
    wildcards = results_data[1]
    uid = get_uid()

    if probed != None:
        try:
            filename = '%s_probed_%s.txt' % (args.domain, uid)
            with open(filename, 'w') as f:
                logger.green('Writing wildcards to %s' %
                             logger.GREEN(filename))
                for subdomain in probed:
                    f.write(subdomain + '\n')
        except Exception as e:
            logger.red('Got [%s] whilst logging to %s' %
                       (logger.RED(str(e)), logger.RED(filename)))
            return False
    try:
        filename = '%s_subdomains_%s.txt' % (args.domain, uid)
        with open(filename, 'w') as f:
            logger.green('Writing subdomains to %s' % logger.GREEN(filename))
            for subdomain in subdomains:
                f.write(subdomain + '\n')
    except Exception as e:
        logger.red('Got [%s] whilst logging to %s' %
                   (logger.RED(str(e)), logger.RED(filename)))
        return False

    try:
        filename = '%s_wildcards_%s.txt' % (args.domain, uid)
        logger.green('Writing wildcards to %s' % logger.GREEN(filename))
        with open(filename, 'w') as f:
            for wildcard in wildcards:
                f.write(wildcard + '\n')
    except Exception as e:
        logger.red('Got [%s] whilst logging to %s' %
                   (logger.RED(str(e)), logger.RED(filename)))
        return False
    return subdomains, wildcards
Пример #21
0
def validate(email):
    try:
        password = '******'
        url = 'https://outlook.office365.com/Microsoft-Server-ActiveSync'
        headers = {"MS-ASProtocolVersion": "14.0"}
        auth = (email, password)

        try:
            r = requests.options(url, headers=headers, auth=auth)
            status = r.status_code
        except:
            logger.red('Unable to connect to [%s]' % logger.RED(url))
            quit()

        if status == 401:
            logger.green('Successfully validated %s' % logger.GREEN(email))
            return True

        elif status == 404:
            if r.headers.get("X-CasErrorCode") == "emailNotFound":
                logger.red('Could not validate %s' % logger.RED(email))
                return False

        elif status == 403:
            logger.green('Found credentials: %s:%s (2FA)' %
                         (logger.GREEN(email), logger.GREEN(password)))
            return [True, password]

        elif status == 200:
            logger.green('Found credentials: %s:%s' %
                         (logger.GREEN(email), logger.GREEN(password)))
            return [True, password]
        else:
            logger.red('Got HTTP Status Response %s. Unexected, skipping.')
            return None

    except KeyboardInterrupt:
        logger.yellow('Keyboard interrupt detected!')
        quit()
Пример #22
0
def main():

    # We don't need if conditions, since we alreaady have True/False stored in variables

    logger.QUIET = args.quiet
    logger.VERBOSE = args.verbose

    if args.ports:
        p = []
        ports = args.ports
        if "-" in ports:
            try:
                start = int(ports.split('-')[0])
                end = int(ports.split('-')[1])
                for port in range(start, end+1):
                    p.append(port)
            except:
                print('failed to split on "-"')
                quit()
        elif "," in args.ports:
            ports = [int(n) for n in args.ports.split(",")]
            p = ports

        elif len(args.ports) > 0 and "-" not in args.ports and "," not in args.ports:
            try:
                p.append(int(args.ports))
            except ValueError:
                print('Please specify an port number')
                quit()
    else:
        p = [53, 88, 139, 445, 464]

    if args.ports:
        logger.verbose('Ports configuration: '+str(p))


    logger.verbose('Username: '******'WORKGROUP'

    logger.verbose('Domain: '+logger.YELLOW(domain))

    logging.debug('Trying to get targets..')

    hosts = get_targets(args.target)  # all possible hosts

    logging.debug('Got targets..')

    logging.debug('Proceeding to hosts discovery..')
    if args.mode != None:
        if args.mode.upper() == 'ICMP':
            logger.verbose('Discovery mode set to ICMP')
            alive_hosts = thread_pool.map(icmp_scan, hosts)  # all hosts that respond to icmp
            logging.debug('Hosts list: {}'.format(alive_hosts))
        elif args.mode.upper() == 'PORTS':
            logger.verbose('Discovery mode set to ports')
            port_scan_fixed = partial(port_scan, ports = p)
            alive_hosts = thread_pool.map(port_scan_fixed, hosts)
            logging.debug('Hosts list: {}'.format(alive_hosts))
        elif args.mode.upper() == 'SKIP':
            logger.verbose('Discovery mode set to skip, scanning all {} hosts'.format(
                logger.YELLOW(str(len(hosts)))))
            alive_hosts = hosts
        else:
            logger.red(
                'Unknown option for -m! Only PORTS, SKIP and ICMP can be used!')
            quit()
    else:
        logger.verbose('No discovery mode set, defaulting to ICMP')
        alive_hosts = icmp_scan(hosts)  # all hosts that respond to icmp
    
    logging.debug('All alive hosts are discovered..')

    # create an empty list that will store all the Host objects
    enumerated_hosts = []

    logging.debug('Processing hosts enumeration...')
    # for every host, do some enum
    enumerated_hosts = thread_pool.map(hosts_enumeration, alive_hosts)    
    logging.debug('Enumeration finished..')

    if args.output:
        outfile_name = args.output
        clean_output(outfile_name)
        for host in enumerated_hosts:  # for every host object, pass the attributes to output()
            output(outfile_name, host.ip, host.name,
                   host.null_sessions, host.shares)
Пример #23
0
def extract_data(data, domain, email_format):
    domain = '@' + domain
    collected_data = {}
    for d in data['elements'][0]['elements']:
        if 'com.linkedin.voyager.search.SearchProfile' in d['hitInfo'] and d[
                'hitInfo']['com.linkedin.voyager.search.SearchProfile'][
                    'headless'] == False:
            try:
                industry = d['hitInfo'][
                    'com.linkedin.voyager.search.SearchProfile']['industry']
            except:
                industry = ""

            raw_firstname = d['hitInfo'][
                'com.linkedin.voyager.search.SearchProfile']['miniProfile'][
                    'firstName']
            raw_surname = d['hitInfo'][
                'com.linkedin.voyager.search.SearchProfile']['miniProfile'][
                    'lastName']

            profile_url = "https://www.linkedin.com/in/%s" % d['hitInfo'][
                'com.linkedin.voyager.search.SearchProfile']['miniProfile'][
                    'publicIdentifier']
            occupation = d['hitInfo'][
                'com.linkedin.voyager.search.SearchProfile']['miniProfile'][
                    'occupation']
            location = d['hitInfo'][
                'com.linkedin.voyager.search.SearchProfile']['location']
            try:
                role_data = d['hitInfo'][
                    'com.linkedin.voyager.search.SearchProfile']['snippets'][
                        0]['heading']['text']
                try:
                    current_role = role_data.split(' at ')[0]
                    current_company = role_data.split(' at ')[1]
                except:
                    current_company = None, 'Error'
                    current_role = occupation
            except:
                try:
                    current_company = occupation.split(' at ')[1]
                    current_role = occupation.split(' at ')[0]
                except:
                    current_company = None, 'Error'
                    current_role = occupation

            name_data = [raw_firstname, raw_surname]

            name_scheme = naming_scheme.names(name_data)
            firstname = name_scheme[0]
            middlename = name_scheme[1]
            surname = name_scheme[2]
            fullname = name_scheme[3]

            name_data = [firstname, middlename, surname]
            email_scheme = naming_scheme.emails(name_data, email_format,
                                                domain)

            email = email_scheme

            try:
                datapoint_1 = d['hitInfo'][
                    'com.linkedin.voyager.search.SearchProfile'][
                        'miniProfile']['picture'][
                            'com.linkedin.common.VectorImage']['rootUrl']
                datapoint_2 = d['hitInfo'][
                    'com.linkedin.voyager.search.SearchProfile'][
                        'miniProfile']['picture'][
                            'com.linkedin.common.VectorImage']['artifacts'][2][
                                'fileIdentifyingUrlPathSegment']
                picture = datapoint_1 + datapoint_2

                if current_company[0] != None:
                    logger.green('Successfully obtained image for %s [%s]' %
                                 (logger.GREEN(fullname),
                                  logger.GREEN(current_company)))
                else:
                    logger.green('Successfully obtained image for %s' %
                                 (logger.GREEN(fullname)))
            except:
                if current_company[0] != None:
                    logger.red(
                        'Unable to obtain image for %s [%s]' %
                        (logger.RED(fullname), logger.RED(current_company)))
                else:
                    logger.red('Unable to obtain image for %s' %
                               (logger.RED(fullname)))
                picture = None

            if current_company[0] != None:
                logger.green('Found %s [%s] at %s' %
                             (logger.GREEN(fullname), logger.GREEN(email),
                              logger.GREEN(current_company)))
                userinfo = [
                    profile_url, picture, firstname, middlename, surname,
                    email, current_role, current_company
                ]
            else:
                logger.green('Found %s [%s]' %
                             (logger.GREEN(fullname), logger.GREEN(email)))
                userinfo = [
                    profile_url, picture, firstname, middlename, surname,
                    email, current_role, 'Error'
                ]

            collected_data[fullname] = userinfo
    return collected_data
Пример #24
0
def extract_data(data,domain,email_format,validation,api_key):

	if domain.startswith('@'):
		domain=domain
	else:
		domain='@'+domain

	collected_data={}
	for d in data['elements'][0]['elements']:
		if 'com.linkedin.voyager.search.SearchProfile' in d['hitInfo'] and d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['headless'] == False:
			try:
				industry = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['industry']
			except:
				industry = ""    

			raw_firstname = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['firstName']
			raw_surname = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['lastName']
			
			profile_url = "https://www.linkedin.com/in/%s" % d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['publicIdentifier']
			occupation = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['occupation']
			location = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['location']
			try:
				role_data=d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['snippets'][0]['heading']['text']
				try:
					current_role=role_data.split(' at ')[0]
					current_company=role_data.split(' at ')[1]
				except:
					current_company=None,'Error'
					current_role=occupation					
			except:
				try:
					current_company=occupation.split(' at ')[1]
					current_role=occupation.split(' at ')[0]
				except:
					current_company=None,'Error'
					current_role=occupation

			name_data=[raw_firstname,raw_surname]

			name_scheme=naming_scheme.names(name_data)
			firstname=name_scheme[0]
			middlename=name_scheme[1]
			surname=name_scheme[2]
			fullname=name_scheme[3]

			name_data=[firstname,middlename,surname]
			email_scheme=naming_scheme.emails(name_data,email_format,domain)

			email = email_scheme

			try:
				datapoint_1=d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['picture']['com.linkedin.common.VectorImage']['rootUrl']
				datapoint_2=d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['picture']['com.linkedin.common.VectorImage']['artifacts'][2]['fileIdentifyingUrlPathSegment']
				picture=datapoint_1+datapoint_2

				if current_company[0] != None:
					logger.green('Successfully obtained image for %s [%s]' % (logger.GREEN(fullname),logger.GREEN(current_company)))
				else:
					logger.green('Successfully obtained image for %s' % (logger.GREEN(fullname)))
			except:
				if current_company[0] != None:
					logger.red('Unable to obtain image for %s [%s]' % (logger.RED(fullname),logger.RED(current_company)))
				else:
					logger.red('Unable to obtain image for %s' % (logger.RED(fullname)))
				picture = None

			if current_company[0] != None:
				logger.green('Found %s [%s] at %s' % (logger.GREEN(fullname),logger.GREEN(email),logger.GREEN(current_company)))
				userinfo=[profile_url,picture,firstname,middlename,surname,email,current_role,current_company]
			else:
				logger.green('Found %s [%s]' % (logger.GREEN(fullname),logger.GREEN(email)))
				userinfo=[profile_url,picture,firstname,middlename,surname,email,current_role,'Error']

			if validation != None:
				if validation == 'o365':
					validated=o365_validation.validate(email)
					userinfo.append(validated)
				elif validation == 'hunter':
					validated=hunter_api.validate(email,api_key)
					if validated == 429:
						logger.red('You have exceeded your hunter API Requests.')
						quit()
					elif validated == 401:
						logger.red('The API Key specified recieved an %s error.' % 'authentication')
						quit()
					else:
						userinfo.append(validated)


			collected_data[fullname]=userinfo

	return collected_data
Пример #25
0
from lib import runner
from lib import db
from lib import logger
from lib import banner
from lib import probe
from time import sleep

VERSION = '0.5'

args = arguments.get_args()  # this function returns the args object

if not args.silent:
    banner.header(VERSION)

if args.domain == None:
    logger.red('Please supply a domain')
    quit()
else:
    domain = args.domain
    if args.name != None:
        db.db_name = args.name
    db.init()  # create the database file

interval = args.interval
counter = interval

if not args.query:
    if not args.single:
        try:
            while True:
                sleep(1)
Пример #26
0
from lib import logger
from lib import arguments
from time import gmtime, strftime
import json, os, hashlib

try:
    from tinydb import TinyDB, Query
except Exception as e:
    logger.red('Got [%s] whilst importing %s' % logger.RED(str(e)),
               logger.RED('tinydb'))
    quit()

db_name = str('subdomains.db')

args = arguments.get_args()


def init():
    if not os.path.isfile(db_name):
        logger.yellow('Creating database [%s]' % logger.YELLOW(db_name))
    db = TinyDB(db_name)
    return db


def insert(domain_data):
    db = init()
    data = vars(domain_data)
    logger.yellow('Adding %s to %s' %
                  (logger.YELLOW(str(data)), logger.YELLOW(db_name)))
    try:
        db.insert(data)
Пример #27
0
def main():
    pool = ThreadPool(processes=args.threads)
    logger.VERBOSE = args.verbose
    logger.LIVE = args.live
    start_time=strftime("%H:%M:%S", gmtime())
    filetypes=['txt','csv','html','all']
    if args.format:
        if args.format.lower() not in filetypes:
            logger.red('Did not understand the format supplied: [{}]'.format(logger.RED(args.format)))
            quit()



    if args.ports:
        p = []
        ports = args.ports
        if "-" in ports:
            try:
                start = int(ports.split('-')[0])
                end = int(ports.split('-')[1])
                for port in range(start, end+1):
                    p.append(port)
            except:
                print('failed to split on "-"')
                quit()
        elif "," in args.ports:
            ports = [int(n) for n in args.ports.split(",")]
            p = ports

        elif len(args.ports) > 0 and "-" not in args.ports and "," not in args.ports:
            try:
                p.append(int(args.ports))
            except ValueError:
                print('Please specify an port number')
                quit()
    else:
        p = [53, 88, 139, 445, 464]

    if args.ports:
        logger.verbose('Ports configuration: '+str(p))


    target = args.target  # to be replaced with argparse
    hosts = get_targets(target)  # all possible hosts
    scan_type=args.enumerate

    logger.blue('Target: [{}]'.format(logger.BLUE(target)))

    logger.blue('Found {} target(s)'.format(logger.BLUE(str(len(hosts)))))

    if scan_type == None:
        logger.blue('Scan type: [{}]'.format(logger.BLUE('default')))
    else:
        logger.blue('Scan type: [{}]'.format(logger.BLUE(scan_type)))


    if args.ports:
        logger.blue('Ports given: [{}]'.format(logger.BLUE(args.ports)))
    logger.blue('Port count: [{}]'.format(logger.BLUE(str(len(p)))))


    username,password=cred_split(args.credentials)

    if username and password:
        logger.blue('Username: [{}]'.format(logger.BLUE(username)))
        logger.blue('Password: [{}]'.format(logger.BLUE(password)))


    if args.domain:
        domain=args.domain
    else:
        domain='WORKGROUP'
        
    logger.blue('Domain: [{}]'.format(logger.BLUE(domain)))
    logger.header('SCANNING')
    logger.blue('Start time: '+logger.BLUE(start_time))

    if args.mode != None:
        if args.mode.upper() == 'ICMP':
            logger.verbose('Discovery mode set to ICMP')
            # alive_hosts = icmp_scan(hosts)  # all hosts that respond to icmp
            alive_hosts = pool.map(icmp_scan, hosts)

        elif args.mode.upper() == 'PORTS':
            logger.verbose('Discovery mode set to ports')
            # alive_hosts = port_scan(hosts, p)
            alive_hosts = pool.map(partial(port_scan, hosts), p)
        elif args.mode.upper() == 'SKIP':
            logger.verbose('Discovery mode set to skip, scanning all {} hosts'.format(logger.YELLOW(str(len(hosts)))))
            alive_hosts = hosts
        else:
            logger.red('Unknown option for -m! Only skip, port and icmp can be used!')
            quit()
    else:
        logger.verbose('No discovery mode set, skipping')
        alive_hosts = hosts  # all hosts that respond to icmp

    #Before enumeration, this just fixes some weird errors. Somehow the ports function returns a list and stores it a list. Like: [[]]. The next two lines fix that
    #and then removes any empties.

    alive_hosts=[''.join(x) for x in alive_hosts] #join into one list
    alive_hosts=list(filter(None, alive_hosts))#remove empties
    alive_hosts=list(set(alive_hosts))#removes duplicates

    # create an empty list that will store all the Host objects
    enumerated_hosts = []

    # for every host, do some enum; this could probably be done with multiprocessing

    if args.enumerate != None:
        if args.enumerate.lower() == 'null':
            pass
        elif args.enumerate.lower() == 'shares':
            pass
        else:
            logger.red('Unknown option for -e! Only null and shares can be used!')
            quit()

    enumerated_hosts = pool.map(hosts_enumeration, alive_hosts)

    end_time=strftime("%H:%M:%S", gmtime())

    logger.blue('End time: '+logger.BLUE(end_time))

    logger.header('RESULTS')

    results_parse(results_cache, scan_type)

    if args.output:
        outfile_name=args.output
        if args.format:
            outfo=args.format.lower()
            if outfo== 'txt':
                clean_output(outfile_name)
                output(results_cache,outfile_name,scan_type)
            elif outfo == 'csv':
                clean_output(outfile_name)
                csv_output(results_cache,outfile_name,scan_type)
            elif outfo == 'html':
                clean_output(outfile_name)
                html_output(results_cache,outfile_name,scan_type)
            elif outfo == 'all':
                
                try:
                    outfile_name=outfile_name.split('.')[0]
                except:
                    outfile_name=outfile_name

                clean_output(outfile_name)
                output(results_cache,outfile_name+'.txt',scan_type)
                csv_output(results_cache,outfile_name+'.csv',scan_type)
                html_output(results_cache,outfile_name+'.html',scan_type)

        else:
            clean_output(outfile_name)
            output(results_cache,outfile_name,scan_type)
Пример #28
0
def parse_users(data,userdata_per_page,total_employees):
	cookie = data.cookie
	company_id = data.company_id
	email_format =  data.email_format
	keyword = data.keyword
	domain = data.domain
	validation = data.validation
	api_key = data.api_key
	validation_count = 0

	logger.debug(str(vars(data)))

	# For every page, do some parsing.

	if domain.startswith('@'):
		domain=domain
	else:
		domain='@'+domain

	users = []

	if validation:
		print()
		logger.yellow('Starting Validation')
		for user_data in userdata_per_page:
			for d in user_data['elements'][0]['elements']: #This goes one user at a time
				validation_count += 1

	for user_data in userdata_per_page:
		for d in user_data['elements'][0]['elements']: #This goes one user at a time
			if 'com.linkedin.voyager.search.SearchProfile' in d['hitInfo'] and d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['headless'] == False:
				try:
					industry = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['industry']
					logger.debug(industry)
				except:
					industry = ""    

				raw_firstname = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['firstName']
				raw_surname = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['lastName']
				
				profile_url = "https://www.linkedin.com/in/%s" % d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['publicIdentifier']
				occupation = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['occupation']
				location = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['location']
				try:
					role_data=d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['snippets'][0]['heading']['text']
					try:
						current_role=role_data.split(' at ')[0]
						current_company=role_data.split(' at ')[1]
					except:
						current_company=None
						current_role=occupation					
				except:
					try:
						current_company=occupation.split(' at ')[1]
						current_role=occupation.split(' at ')[0]
					except:
						current_company=None
						current_role=occupation

				name_data=[raw_firstname,raw_surname]

				logger.debug(str(name_data))

				name_scheme=naming_scheme.names(name_data)
				firstname=name_scheme[0]
				middlename=name_scheme[1]
				surname=name_scheme[2]
				fullname=name_scheme[3]
				name_data=[firstname,middlename,surname]
				email_scheme=naming_scheme.emails(name_data,email_format,domain)

				email = email_scheme

				try:
					datapoint_1=d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['picture']['com.linkedin.common.VectorImage']['rootUrl']
					datapoint_2=d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['picture']['com.linkedin.common.VectorImage']['artifacts'][2]['fileIdentifyingUrlPathSegment']
					picture=datapoint_1+datapoint_2
					logger.debug(picture)
				except:
					picture = None

				if validation != None:
					validation_count-=1
					if validation == 'o365':
						validated=o365_validation.validate(email)
					elif validation == 'hunter':
						validated=hunter_validation.validate(email,api_key)
						if validated == 429:
							logger.red('You have exceeded your hunter API Requests.')
							quit()
						elif validated == 401:
							logger.red('The API Key specified recieved an %s error.' % 'authentication')
							quit()
					else:
						validated=False
				else:
					validated = False

				if validation:
					logger.verbose('%s emails remaining...' % logger.YELLOW(validation_count))


				user=user_structure.User(profile_url,picture,firstname,middlename,surname,fullname,email,validated,current_role,current_company)
				users.append(user)
	if validation:
		logger.yellow('Validation finished!')
		print()
	return users
Пример #29
0
        print('%s:%s' % (scheme, logger.BLUE(example)))
    quit()

# The most important part...
banner.banner()

if args.verbose:
    logger.verbose_switch = True
    logger.debug_switch = False

if args.debug:
    logger.debug_switch = True
    logger.verbose_switch = True

if args.cookie == None:
    logger.red('Please specify a file containing the %s cookie.' %
               logger.RED('li_at'))
    quit()

try:
    with open(args.cookie, 'r') as f:
        cookie = f.readline().rstrip()
except:
    logger.red('Please add the cookie to a file')
    logger.debug('%s not valid' % args.cookie)
    quit()

company_id = args.company_id

domain = args.domain

if args.output:
Пример #30
0
if args.output:
	filename = args.output
else:
	filename=None

if args.keyword == None:
	keyword = None
else:
	keyword = args.keyword

if args.format:
	email_schemes=['firstname.surname','firstnamesurname','f.surname','fsurname','surname.firstname','surnamefirstname','s.firstname','sfirstname']
	email_format=args.format.lower()
	if email_format not in email_schemes:
		logger.red('Unknown email scheme specified, please see the available below:')
		for i in email_schemes:
			logger.blue(i)
		quit()	
else:
	email_format='firstname.surname'

if args.company_id == None:
	logger.red('Please specify a company id with the %s flag' % logger.RED('-id'))
	quit()
if args.domain == None:
	logger.red('Please specify a domain with the %s flag' % logger.RED('-d'))
	quit()
connection_data=[cookie,company_id,email_format]
try:
	sleep(2)