def get_nullsessions(target): logger.blue('Testing null sessions on {}'.format(logger.BLUE(target))) rpc_command_lsaquery = 'rpcclient -U "" -N {} -c "lsaquery"'.format(target) result = run(rpc_command_lsaquery, stdout=PIPE, stderr=PIPE, universal_newlines=False, shell=True) if len(result.stdout) > 0 and len(result.stderr) == 0: command_output = result.stdout elif len(result.stderr) > 0 and len(result.stdout) == 0: command_output = result.stderr decoded = command_output.decode('utf-8') has_error = error_handle(decoded) try: output = decoded.rstrip().replace('\n', ' ') logger.verbose('Output from rpcclient: '+logger.YELLOW(str(output))) except: logger.verbose('Failed to get output from rpcclient') if has_error != False: logger.red_indent( 'Failed to authenticate with null sessions to {}'.format(logger.RED(target))) return False elif has_error == False: logger.green_indent( 'Successfully authenticated with null sessions to {}'.format(logger.GREEN(target))) return True
def writeToInflux(parmInfluxUrl, parmInfluxDb, parmTargetUser, parmTargetPwd, perfList, whitelistDict): ''' writes the data to the influx DB using the write REST API ''' l.debug("writeToInflux with the following parameters: \nparmInfluxUrl: '%s'\n parmInfluxDb: '%s'\n parmTargetUser: '******'\n parmTargetPwd: '%s'\n len(perfList): : '%s'" % (parmInfluxUrl, parmInfluxDb, parmTargetUser, parmTargetPwd, len(perfList))) try: (urlSchema, urlHost, urlPort) = o.splitHttpUrlString(parmInfluxUrl) except Exception as e: raise Exception, sys.exc_info()[1] ## ## influxdb write end-point with query string tmpUri = "/write" tmpUri += o.buildQueryString(db=parmInfluxDb, precision="ms", p=parmTargetPwd, u=parmTargetUser) l.debug("Uri to /write Influx: '%s'" % (tmpUri)) postHeaders = {"Content-type": "text/plain; charset=utf-8", "Accept": "text/plain"} ## ## Number of rows inserted rowCount = 0 ## ## Format the output as a string data = outputFormatter(perfList, outFormat="INFLUX", whitelistDict=whitelistDict) l.verbose("formatted influx data: \n%s", data) ## ## outputFormatter returns a string of the data separated by \n per line postDataDict = data.split("\n") ## ## iterate over the perflist and build the REST API string. ## The "tags" is string of tags separated by NODE_SEPARATOR and the counters will be the fields for postData in postDataDict: l.debug("POST data for write end-point: '%s'" % (postData)) ## ## try: ## ## Get the HTTP Connection httpConn = o.getHttpConnection(urlSchema, urlHost, urlPort) httpConn.request("POST", tmpUri, postData, postHeaders) httpResponse = httpConn.getresponse() responseData = httpResponse.read() httpConn.close() rowCount += 1 except Exception as e2: httpConn.close() errorString = "Failed to write data to influx, '%s'" % (e2.strerror) raise Exception, errorString ## ## indluxDb write returns code 204 if (httpResponse.status != httplib.NO_CONTENT): l.error("Error response data: '%s'" % (responseData)) errorString = "Write to influx db failed with status code: '%d'", httpResponse.status l.error(errorString) httpConn.close() raise Exception, errorString else: l.debug("influx URL ping returned status code: '%d'", httpResponse.status) ## ## Finished - close the connection httpConn.close() l.info("writeToInflux: Number of rows inserted: '%d'" % (rowCount))
def icmp_scan(targets): targets=[targets] # takes in a list of targets and tries to identify a list of hosts responding to icmp and returns them in a list logger.verbose('Amount of targets for ICMP Scan: {}'.format(len(targets))) alive_hosts = [] timeout = 2 logger.verbose('ICMP Timeout set to: '+logger.YELLOW(str(timeout))) for target in targets: logger.live_info('[{}]:\tSENDING ICMP'.format(logger.BLUE(target))) try: resp = sr1(IP(dst=str(target))/ICMP(), timeout=timeout, verbose=0) except: logger.live_bad('[{}]:\tGOT NO RESPONSE'.format(logger.RED(target))) try: icmp_type = str(resp.getlayer(ICMP).code) resp_parse = icmp_response_parse(icmp_type) logger.live_info('[{}]:\tGOT ICMP TYPE: [{}] {}'.format(logger.BLUE(target),logger.BLUE(icmp_type),logger.BLUE(resp_parse))) except: logger.live_info('[{}]:\tCOULD NOT GET ICMP TYPE'.format(logger.BLUE(target))) if resp is None: logger.live_bad('[{}]:\tGOT NO RESPONSE'.format(logger.RED(target))) logger.live_bad('[{}]:\tDOWN'.format(logger.RED(target))) elif(int(resp.getlayer(ICMP).type) == 3 and int(resp.getlayer(ICMP).code) in [1, 2, 3, 9, 10, 13]): logger.live_bad('[{}]:\tDOWN'.format(logger.RED(target))) else: logger.green('[{}]:\tUP [{}] {}'.format(logger.GREEN(target),logger.GREEN(icmp_type),logger.GREEN(resp_parse))) if target not in alive_hosts: alive_hosts.append(target) return alive_hosts
def get_nullsessions(target): return_val={} logger.live_info('[{}]:\tATTEMPTING NULL SESSIONS'.format(logger.BLUE(target))) rpc_command_lsaquery = 'rpcclient -U "" -N {} -c "lsaquery"'.format(target) result = run(rpc_command_lsaquery, stdout=PIPE, stderr=PIPE,universal_newlines=False, shell=True) if len(result.stdout) > 0 and len(result.stderr) == 0: command_output = result.stdout elif len(result.stderr) > 0 and len(result.stdout) == 0: command_output = result.stderr decoded = command_output.decode('utf-8') has_error = error_handle(target,decoded) try: output = decoded.rstrip().replace('\n', ' ') logger.verbose('[{}]:\tOUTPUT FROM RPCCLIENT: {}'.format(logger.YELLOW(target),logger.YELLOW(str(output)))) except: logger.verbose('[{}]:\tOUTPUT FROM RPCCLIENT: FAILED TO GET DATA'.format(logger.YELLOW(target))) if has_error == True: logger.live_bad('[{}]:\tNULL SESSIONS: {}'.format(logger.RED(target),logger.RED('FAILED'))) return False elif has_error == False: logger.green('[{}]:\tNULL SESSIONS: {}'.format(logger.GREEN(target),logger.GREEN('SUCCESS'))) return True
def getNodes(wasCellName, root): ''' Returns a list of performance records ''' l.logEntryExit("Entering: root: '%s'; wasCellName: '%s'" % (str(root), wasCellName)) nodeName = None serverName = None statName = None parentNodeNames = "" statRtnList = [] ## ## Check for a valid file via the root node tag (PerformanceMonitor) l.debug("XML tag of the root node is: '%s'" % (root.tag)) if (root.tag == "PerformanceMonitor"): responseStatus = root.get('responseStatus') if (responseStatus != 'success'): l.error("PerformanceMonitor responseStatus indicates an invalid file!") else: l.verbose("PerformanceMonitor responseStatus indicates a valid file!") ## ## process sub-nodes for nodeNode in root.findall("./Node"): l.verbose("Processing xmlNode.tag: '%s'" % (nodeNode.tag)) parentNodeNames += wasCellName ## ## Node nodes if (nodeNode.tag == "Node"): nodeName = nodeNode.get('name') parentNodeNames += NODE_SEPARATOR parentNodeNames += nodeName l.debug("nodeName set to: '%s'" % (nodeName)) ## ## Server nodes for serverNode in nodeNode.findall("./Server"): if (serverNode.tag == "Server"): serverName = serverNode.get('name') parentNodeNames += NODE_SEPARATOR parentNodeNames += serverName l.debug("serverName set to: '%s'" % (serverName)) ## ## Get all Stats nodes of the server node for statNode in serverNode.findall("./Stat"): if (statNode.tag == "Stat"): statName = statNode.get('name') l.debug("Found child node with name: '%s'" % (statNode.get("name"))) debugList = getStatsData(parentNodeNames, statNode) statRtnList += debugList l.debug("JSON-0 debugList: '%s'" % (str(json.dumps(debugList)))) l.debug("JSON-1 statRtnList: '%s'" % (str(json.dumps(statRtnList)))) else: l.debug("Expected nodeNode.tag to be \"Node\" but got: '%s'" % (nodeNode.tag)) return statRtnList
def start(self): """Start the scan.""" status("Starting directory scan") self.intresting_urls = self._dirscan() if self.intresting_urls is not 0: success("found %d urls to take a look at..." % (len(self.intresting_urls))) for urldic in self.intresting_urls: for key in urldic: verbose(self, "[%d] %s" % (urldic[key], key)) self._xss_scan()
def error_handle(target,cmd_out): ERRORS = ["NT_STATUS_CONNECTION_REFUSED", "NT_STATUS_INVALID_NETWORK_RESPONSE", "NT_STATUS_INVALID_PARAMETER", "NT_STATUS_UNSUCCESSFUL", "NT_STATUS_IO_TIMEOUT", "NT_STATUS_ACCESS_DENIED", "NT_STATUS_LOGON_FAILURE", "NT_STATUS_REVISION_MISMATCH", "COULD NOT CONNECT", "NT_STATUS_HOST_UNREACHABLE", "no servers could be reached"] val = False for error in ERRORS: if error in cmd_out: val = error # return the error if found # logger.verbose('Found error from rpcclient: {}'.format(logger.YELLOW(val))) logger.verbose('[{}]:\tERROR FROM RPCCLIENT: {}'.format(logger.YELLOW(target),logger.YELLOW(val))) break else: val = False # otherwise, return False if no error is found return val
def icmp_scan(target): # takes in a list of targets and tries to identify a list of hosts responding to icmp and returns them in a list timeout = 2 logger.verbose('ICMP Timeout set to: '+str(timeout)) logger.blue('Pinging: {}'.format(logger.BLUE(target))) resp = sr1(IP(dst=str(target))/ICMP(), timeout=timeout, verbose=0) try: icmp_type = str(resp.getlayer(ICMP).code) resp_parse = icmp_response_parse(icmp_type) logger.verbose('Got ICMP Type: [{}] {}'.format( logger.YELLOW(icmp_type), logger.YELLOW(resp_parse))) except: logger.verbose('Could not get ICMP Type code for: ' + logger.YELLOW(target)) if resp is None: logger.verbose('Got no response from: '+logger.YELLOW(target)) logger.red_indent('{}: Down'.format(logger.RED(target))) result = None elif(int(resp.getlayer(ICMP).type) == 3 and int(resp.getlayer(ICMP).code) in [1, 2, 3, 9, 10, 13]): logger.red_indent('{}: Down'.format(logger.RED(target))) result = None else: logger.green_indent('{}: Up'.format(logger.GREEN(target))) result = target return result
def get_name(target, timeout=5): logger.blue('Getting NetBIOS Name for {}'.format(logger.BLUE(target))) logger.verbose('Timeout for NetBIOS resolution: '+str(timeout)) bios = NetBIOS() try: tmpnetbios_name = bios.queryIPForName(target, timeout=timeout) netbios_name = str(tmpnetbios_name[0]) except: netbios_name = None bios.close() if netbios_name == None: logger.red_indent('Failed to get NetBIOS Name') return None else: logger.green_indent('Got NetBIOS Name: {}'.format( logger.GREEN(netbios_name))) return str(netbios_name)
def validate(email,api_key): successful={} url='https://api.hunter.io/v2/email-verifier?email=%s&api_key=%s' % (email,api_key) try: r=requests.get(url) status_code = r.status_code except Exception as e: print(e) quit() try: data=json.loads(r.content) except Exception as e: print(e) quit() if status_code == 429 or status_code == 401: try: result=data['errors'][0]['details'] except Exception as e: print(e) quit() if 'exceeded' in result: return 429 elif 'No user found for the API key supplied' in result: return 401 elif status_code == 200: try: result=data['data']['result'] score=data['data']['score'] except Exception as e: print(e) quit() percent=str(score)+'%' if score > 68: logger.verbose('Validated %s at %s' % (logger.GREEN(email),logger.GREEN(percent))) return True else: return False else: logger.verbose('Got unexpected HTTP response' % logger.RED(str(status_code)))
def port_scan(target, ports): src_port = RandShort() FIN = 0x01 SYN = 0x02 RST = 0x04 PSH = 0x08 ACK = 0x10 SYNACK = 0x12 RSTACK = 0x14 URG = 0x20 ECE = 0x40 CWR = 0x80 logger.blue('Checking TCP ports: {}'.format(logger.BLUE(target))) for port in ports: send_syn = sr1(IP(dst=target)/TCP(sport=src_port, dport=port, flags=SYN), verbose=0, timeout=2) if send_syn == None: logger.verbose( 'Recieved no TCP response from: '+logger.YELLOW(target)) logger.red_indent('{}:{} [{}]'.format(logger.RED( target), logger.RED(str(port)), logger.RED('CLOSED'))) elif(send_syn.haslayer(TCP)): if(send_syn.getlayer(TCP).flags == SYNACK): send_ack = sr(IP(dst=target)/TCP(sport=src_port, dport=port, flags=RST), verbose=0, timeout=2) logger.verbose('Recieved SYNACK from {}, responding with RST'.format( logger.YELLOW(target))) logger.green_indent('{}:{} [{}]'.format(logger.GREEN( target), logger.GREEN(str(port)), logger.GREEN('OPEN'))) logger.verbose('Found alive host: ' + logger.YELLOW(target)) return target elif (send_syn.getlayer(TCP).flags == RSTACK): logger.verbose('Recieved RSTACK from: ' + logger.YELLOW(target)) logger.red_indent('{}:{} [{}]'.format(logger.RED( target), logger.RED(str(port)), logger.RED('CLOSED'))) elif (send_syn.getlayer(TCP).flags == RST): logger.verbose('Recieved RST from: '+logger.YELLOW(target)) logger.red_indent('{}:{} [{}]'.format(logger.RED( target), logger.RED(str(port)), logger.RED('CLOSED'))) return None
def get_name(target, timeout=2): logger.live_info('[{}]:\tATTEMPTING NETBIOS NAME'.format(logger.BLUE(target))) # logger.verbose('Timeout for NetBIOS resolution: '+str(timeout)) logger.verbose('[{}]:\tNETBIOS TIMEOUT: {}'.format(logger.YELLOW(target),logger.YELLOW(str(timeout)))) bios = NetBIOS() try: tmpnetbios_name = bios.queryIPForName(target, timeout=timeout) netbios_name = str(tmpnetbios_name[0]) except: netbios_name = None bios.close() if netbios_name == None: logger.live_bad('[{}]:\tNETBIOS NAME: {}'.format(logger.RED(target),logger.RED('FAILED'))) return None else: logger.green('[{}]:\tNETBIOS NAME: {}'.format(logger.GREEN(target),logger.GREEN(netbios_name))) return str(netbios_name)
def validate(email): try: # The password here doesnt really matter as the o365 link just requires anything, but its worthwhile having a common password in order to check for access at the same time password = '******' url = 'https://outlook.office365.com/Microsoft-Server-ActiveSync' headers = {"MS-ASProtocolVersion": "14.0"} auth = (email, password) try: logger.verbose('Attempting to validate %s' % logger.YELLOW(email)) r = requests.options(url, headers=headers, auth=auth) status = r.status_code except: logger.verbose('Unable to connect to [%s]' % logger.RED(url)) quit() if status == 401: logger.verbose('Successfully validated %s' % logger.GREEN(email)) return True elif status == 404: logger.verbose('Could not validate %s' % logger.RED(email)) return False elif status == 403: logger.green('Found credentials: %s:%s (2FA)' % (logger.GREEN(email), logger.GREEN(password))) return [True, password] elif status == 200: logger.green('Found credentials: %s:%s' % (logger.GREEN(email), logger.GREEN(password))) return [True, password] else: logger.verbose( 'Got HTTP Status Response %s. Unexpected, skipping.' % logger.RED(str(status))) return None except KeyboardInterrupt: logger.yellow('Keyboard interrupt detected!') quit()
def get_targets(targets): # parses an input of targets to get a list of all possible ips target_list = [] try: with open(targets, 'r') as file: contents = file.readlines() for i in (contents): target = i.rstrip() target_list.append(target) logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list))))) return target_list except: try: if "/" in targets: try: subnet = IPNetwork(targets) except: logger.red('failed to parse') quit() for i in subnet: tmp_str = str(i) last_octet = str(tmp_str.split('.')[3]) if last_octet == '0' or last_octet == '255': pass else: target_list.append(str(i)) logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list))))) return target_list elif "," in targets: ips=targets.split(',') for ip in ips: target_list.append(ip) logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list))))) return target_list else: target_list.append(targets) logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list))))) return target_list except: logger.red('Failed to parse targets.') quit()
def main(): ## ## General prologue ## l.info("%s" % (SEPARATOR_LINE)) l.info("Script: %s/%s" % (WCV_SCRIPT_PATH, WCV_SCRIPTNAME)) l.info("Date = %s / Time = %s" % (wcvGetDate("/"), wcvGetTime(":"))) l.info("%s" % (SEPARATOR_LINE)) ## ## parse CLI command line arguments l.debug("sys.argv=%s" % (sys.argv)) sysArgv = sys.argv[1:] ## Copy the parameter values to the variables (parmServletXmlFile, parmPerfServletUrl, parmJsonOutFileName, parmWasCellName, parmNoEmpty, parmReplace, parmOmitSummary, parmInfluxUrl, parmInfluxDb, parmSeconds, parmTargetUser, parmTargetPwd, parmWasUser, parmWasPwd, parmOutFileName, parmOutFormat, parmOutConfigFile) = o.parseArguments(WCV_SCRIPTNAME, sysArgv) ## ## Get the output configuration file if provided whitelistDict = {} if parmOutConfigFile: whitelistDict = cr.readConfig(parmOutConfigFile) whitelistDict = whitelistDict["WHITELIST"] curDate = str(datetime.datetime.now().date()) curTime = datetime.datetime.now().strftime("%H:%M:%S") ## ## If we start AFTER the endTime, we run until the next day's stopTime!! otherwise we end today ... endDate = datetime.datetime.now() if (curTime >= TERMINATE_AT): endDate = endDate + datetime.timedelta(days=1) endDate = str(endDate.date()) l.debug("curDate: '%s', curTime: '%s', endDate: '%s'" % (curDate, curTime, endDate)) while ((endDate > curDate) or ((endDate == curDate) and (curTime < TERMINATE_AT))): ## ## if we get an xml file we can parse directly if (parmServletXmlFile != None): tree1 = ET.parse(parmServletXmlFile) root1 = tree1.getroot() l.debug("Processing XMl data from file: '%s'" % (parmServletXmlFile)) else: ## ## Get the xml from the performance servlet URL pmiXmlDataString = getPerfServletData(parmPerfServletUrl, parmWasUser, parmWasPwd) if (pmiXmlDataString != None): root1 = ET.fromstring(pmiXmlDataString) l.debug("Processing XMl data from URL: '%s'" % (parmPerfServletUrl)) else: root1 = None l.debug("No PMI data received from: '%s'" % (parmPerfServletUrl)) ## ## Only if we got an XML root node if (root1 != None): perfList = getNodes(parmWasCellName, root1) ## ## Should we remove empty "perfdata" lists in the dictionaries if (parmNoEmpty == True): emptyList = [x for x in perfList if len(x["perfdata"]) == 0] numEmptyEntries = len(emptyList) l.debug("Number of entries in the emptyList: '%d'" % numEmptyEntries) oldLenght = len(perfList) perfList = [x for x in perfList if len(x["perfdata"]) != 0] newLenght = len(perfList) l.debug("Removed empty entries. Old # of entries: '%d'; new # of entries: '%d'" % (oldLenght, newLenght)) if (parmOmitSummary == True): perfList = removeSummaryData(perfList) ## ## Write data to the outfile if selected l.debug("FINALLY: '%s'" % (str(json.dumps(perfList)))) if (parmJsonOutFileName != None): outFile = open(parmJsonOutFileName, "w") outFile.write(str(json.dumps(perfList))) ## ## Close outfile outFile.close() ## ## If we have an influx Db to write to ... if ((parmInfluxUrl != None) and (parmInfluxUrl != "")): writeToInflux(parmInfluxUrl, parmInfluxDb, parmTargetUser, parmTargetPwd, perfList, whitelistDict) l.info("Data pushed to influx DB") ## ## append to log file in Splunk-like format "TS key1=value1 key2=value2 ..." if (parmOutFileName != None): outFile = open(parmOutFileName, "w") data = outputFormatter(perfList, outFormat=parmOutFormat, whitelistDict=whitelistDict) l.verbose("formatted data: \n%s", data) outFile.write(data) outFile.close() ## ## If input was a file we break the loop otherwise we sleep if (parmServletXmlFile != None): break else: if ((parmSeconds != None) and (parmSeconds != 0)): l.debug("sleeping '%d' seconds" % (parmSeconds)) time.sleep(parmSeconds) else: l.info("No valid sleep time (--seconds|-s) provided --> exiting") break ## ## Get the current date / time for the check if we've reached the stoptime .. i.e. time to exit curDate=str(datetime.datetime.now().date()) curTime=datetime.datetime.now().strftime("%H:%M:%S") l.debug("curDate: '%s', curTime: '%s', endDate: '%s'" % (curDate, curTime, endDate))
def get_shares(target, domain_name, remote_name, username, password): my_name = 'WIN-2003' logger.verbose('Client name configured to: '+logger.YELLOW(my_name)) logger.blue('Looking up shares on {}'.format(logger.BLUE(target))) server_ip = target if remote_name != None: logger.verbose('Connection status: [{} | {} | {}]'.format(logger.YELLOW( server_ip), logger.YELLOW(remote_name), logger.YELLOW(domain_name))) else: try: logger.verbose('Connection status: [{} | {} | {}]'.format(logger.YELLOW( server_ip), logger.YELLOW('Could not resolve name'), logger.YELLOW(domain_name))) except: pass open_shares = [] if remote_name == None: logger.red_indent('Could not get remote hosts name, skipping...') return None else: conn = SMBConnection(username, password, my_name, remote_name, domain=domain_name, use_ntlm_v2=True, is_direct_tcp=True) logger.verbose('SMB configuration:') logger.verbose('\tConnecting with: {}'.format(logger.YELLOW(username))) for k, v in vars(conn).items(): attribute = str(k) value = str(v) if '<class' not in value and 'bound method' not in value and 'object' not in value and "b''" not in value: logger.verbose('\t'+attribute+': '+value) try: conn.connect(server_ip, 445) logger.green('Successfully connected to {} on {}'.format( logger.GREEN('smb'), logger.GREEN(server_ip))) try: shares = conn.listShares(timeout=15) for share in range(len(shares)): share_name = str(shares[share].name) logger.green_indent_list(logger.GREEN(share_name)) open_shares.append(share_name) except Exception as e: logger.red_indent('Got error: {}'.format(logger.RED(e))) except: logger.red_indent( 'Failed to obtain shares from {}'.format(logger.RED(server_ip))) return open_shares
def main(): # We don't need if conditions, since we alreaady have True/False stored in variables logger.QUIET = args.quiet logger.VERBOSE = args.verbose if args.ports: p = [] ports = args.ports if "-" in ports: try: start = int(ports.split('-')[0]) end = int(ports.split('-')[1]) for port in range(start, end+1): p.append(port) except: print('failed to split on "-"') quit() elif "," in args.ports: ports = [int(n) for n in args.ports.split(",")] p = ports elif len(args.ports) > 0 and "-" not in args.ports and "," not in args.ports: try: p.append(int(args.ports)) except ValueError: print('Please specify an port number') quit() else: p = [53, 88, 139, 445, 464] if args.ports: logger.verbose('Ports configuration: '+str(p)) logger.verbose('Username: '******'WORKGROUP' logger.verbose('Domain: '+logger.YELLOW(domain)) logging.debug('Trying to get targets..') hosts = get_targets(args.target) # all possible hosts logging.debug('Got targets..') logging.debug('Proceeding to hosts discovery..') if args.mode != None: if args.mode.upper() == 'ICMP': logger.verbose('Discovery mode set to ICMP') alive_hosts = thread_pool.map(icmp_scan, hosts) # all hosts that respond to icmp logging.debug('Hosts list: {}'.format(alive_hosts)) elif args.mode.upper() == 'PORTS': logger.verbose('Discovery mode set to ports') port_scan_fixed = partial(port_scan, ports = p) alive_hosts = thread_pool.map(port_scan_fixed, hosts) logging.debug('Hosts list: {}'.format(alive_hosts)) elif args.mode.upper() == 'SKIP': logger.verbose('Discovery mode set to skip, scanning all {} hosts'.format( logger.YELLOW(str(len(hosts))))) alive_hosts = hosts else: logger.red( 'Unknown option for -m! Only PORTS, SKIP and ICMP can be used!') quit() else: logger.verbose('No discovery mode set, defaulting to ICMP') alive_hosts = icmp_scan(hosts) # all hosts that respond to icmp logging.debug('All alive hosts are discovered..') # create an empty list that will store all the Host objects enumerated_hosts = [] logging.debug('Processing hosts enumeration...') # for every host, do some enum enumerated_hosts = thread_pool.map(hosts_enumeration, alive_hosts) logging.debug('Enumeration finished..') if args.output: outfile_name = args.output clean_output(outfile_name) for host in enumerated_hosts: # for every host object, pass the attributes to output() output(outfile_name, host.ip, host.name, host.null_sessions, host.shares)
def parse_users(data,userdata_per_page,total_employees): cookie = data.cookie company_id = data.company_id email_format = data.email_format keyword = data.keyword domain = data.domain validation = data.validation api_key = data.api_key validation_count = 0 logger.debug(str(vars(data))) # For every page, do some parsing. if domain.startswith('@'): domain=domain else: domain='@'+domain users = [] if validation: print() logger.yellow('Starting Validation') for user_data in userdata_per_page: for d in user_data['elements'][0]['elements']: #This goes one user at a time validation_count += 1 for user_data in userdata_per_page: for d in user_data['elements'][0]['elements']: #This goes one user at a time if 'com.linkedin.voyager.search.SearchProfile' in d['hitInfo'] and d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['headless'] == False: try: industry = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['industry'] logger.debug(industry) except: industry = "" raw_firstname = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['firstName'] raw_surname = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['lastName'] profile_url = "https://www.linkedin.com/in/%s" % d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['publicIdentifier'] occupation = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['occupation'] location = d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['location'] try: role_data=d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['snippets'][0]['heading']['text'] try: current_role=role_data.split(' at ')[0] current_company=role_data.split(' at ')[1] except: current_company=None current_role=occupation except: try: current_company=occupation.split(' at ')[1] current_role=occupation.split(' at ')[0] except: current_company=None current_role=occupation name_data=[raw_firstname,raw_surname] logger.debug(str(name_data)) name_scheme=naming_scheme.names(name_data) firstname=name_scheme[0] middlename=name_scheme[1] surname=name_scheme[2] fullname=name_scheme[3] name_data=[firstname,middlename,surname] email_scheme=naming_scheme.emails(name_data,email_format,domain) email = email_scheme try: datapoint_1=d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['picture']['com.linkedin.common.VectorImage']['rootUrl'] datapoint_2=d['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['picture']['com.linkedin.common.VectorImage']['artifacts'][2]['fileIdentifyingUrlPathSegment'] picture=datapoint_1+datapoint_2 logger.debug(picture) except: picture = None if validation != None: validation_count-=1 if validation == 'o365': validated=o365_validation.validate(email) elif validation == 'hunter': validated=hunter_validation.validate(email,api_key) if validated == 429: logger.red('You have exceeded your hunter API Requests.') quit() elif validated == 401: logger.red('The API Key specified recieved an %s error.' % 'authentication') quit() else: validated=False else: validated = False if validation: logger.verbose('%s emails remaining...' % logger.YELLOW(validation_count)) user=user_structure.User(profile_url,picture,firstname,middlename,surname,fullname,email,validated,current_role,current_company) users.append(user) if validation: logger.yellow('Validation finished!') print() return users
def main(): pool = ThreadPool(processes=args.threads) logger.VERBOSE = args.verbose logger.LIVE = args.live start_time=strftime("%H:%M:%S", gmtime()) filetypes=['txt','csv','html','all'] if args.format: if args.format.lower() not in filetypes: logger.red('Did not understand the format supplied: [{}]'.format(logger.RED(args.format))) quit() if args.ports: p = [] ports = args.ports if "-" in ports: try: start = int(ports.split('-')[0]) end = int(ports.split('-')[1]) for port in range(start, end+1): p.append(port) except: print('failed to split on "-"') quit() elif "," in args.ports: ports = [int(n) for n in args.ports.split(",")] p = ports elif len(args.ports) > 0 and "-" not in args.ports and "," not in args.ports: try: p.append(int(args.ports)) except ValueError: print('Please specify an port number') quit() else: p = [53, 88, 139, 445, 464] if args.ports: logger.verbose('Ports configuration: '+str(p)) target = args.target # to be replaced with argparse hosts = get_targets(target) # all possible hosts scan_type=args.enumerate logger.blue('Target: [{}]'.format(logger.BLUE(target))) logger.blue('Found {} target(s)'.format(logger.BLUE(str(len(hosts))))) if scan_type == None: logger.blue('Scan type: [{}]'.format(logger.BLUE('default'))) else: logger.blue('Scan type: [{}]'.format(logger.BLUE(scan_type))) if args.ports: logger.blue('Ports given: [{}]'.format(logger.BLUE(args.ports))) logger.blue('Port count: [{}]'.format(logger.BLUE(str(len(p))))) username,password=cred_split(args.credentials) if username and password: logger.blue('Username: [{}]'.format(logger.BLUE(username))) logger.blue('Password: [{}]'.format(logger.BLUE(password))) if args.domain: domain=args.domain else: domain='WORKGROUP' logger.blue('Domain: [{}]'.format(logger.BLUE(domain))) logger.header('SCANNING') logger.blue('Start time: '+logger.BLUE(start_time)) if args.mode != None: if args.mode.upper() == 'ICMP': logger.verbose('Discovery mode set to ICMP') # alive_hosts = icmp_scan(hosts) # all hosts that respond to icmp alive_hosts = pool.map(icmp_scan, hosts) elif args.mode.upper() == 'PORTS': logger.verbose('Discovery mode set to ports') # alive_hosts = port_scan(hosts, p) alive_hosts = pool.map(partial(port_scan, hosts), p) elif args.mode.upper() == 'SKIP': logger.verbose('Discovery mode set to skip, scanning all {} hosts'.format(logger.YELLOW(str(len(hosts))))) alive_hosts = hosts else: logger.red('Unknown option for -m! Only skip, port and icmp can be used!') quit() else: logger.verbose('No discovery mode set, skipping') alive_hosts = hosts # all hosts that respond to icmp #Before enumeration, this just fixes some weird errors. Somehow the ports function returns a list and stores it a list. Like: [[]]. The next two lines fix that #and then removes any empties. alive_hosts=[''.join(x) for x in alive_hosts] #join into one list alive_hosts=list(filter(None, alive_hosts))#remove empties alive_hosts=list(set(alive_hosts))#removes duplicates # create an empty list that will store all the Host objects enumerated_hosts = [] # for every host, do some enum; this could probably be done with multiprocessing if args.enumerate != None: if args.enumerate.lower() == 'null': pass elif args.enumerate.lower() == 'shares': pass else: logger.red('Unknown option for -e! Only null and shares can be used!') quit() enumerated_hosts = pool.map(hosts_enumeration, alive_hosts) end_time=strftime("%H:%M:%S", gmtime()) logger.blue('End time: '+logger.BLUE(end_time)) logger.header('RESULTS') results_parse(results_cache, scan_type) if args.output: outfile_name=args.output if args.format: outfo=args.format.lower() if outfo== 'txt': clean_output(outfile_name) output(results_cache,outfile_name,scan_type) elif outfo == 'csv': clean_output(outfile_name) csv_output(results_cache,outfile_name,scan_type) elif outfo == 'html': clean_output(outfile_name) html_output(results_cache,outfile_name,scan_type) elif outfo == 'all': try: outfile_name=outfile_name.split('.')[0] except: outfile_name=outfile_name clean_output(outfile_name) output(results_cache,outfile_name+'.txt',scan_type) csv_output(results_cache,outfile_name+'.csv',scan_type) html_output(results_cache,outfile_name+'.html',scan_type) else: clean_output(outfile_name) output(results_cache,outfile_name,scan_type)
def get_shares(target, domain_name, remote_name, username, password): my_name = 'WIN-2003' logger.verbose('[{}]:\tCLIENT NAME CONFIGURED TO: {}'.format(logger.YELLOW(target),logger.YELLOW(my_name))) logger.live_info('[{}]:\tATTEMPTING SHARES'.format(logger.BLUE(target))) server_ip = target if remote_name != None: logger.verbose('[{}]:\tCONNECTION STATUS: [{} | {} | {}]'.format(logger.YELLOW(target),logger.YELLOW(server_ip), logger.YELLOW(remote_name), logger.YELLOW(domain_name))) else: logger.verbose('[{}]:\tCONNECTION STATUS: [{} | {} | {}]'.format(logger.YELLOW(target),logger.YELLOW(server_ip), logger.YELLOW('COULD NOT RESOLVE'), logger.YELLOW(domain_name))) open_shares = [] if remote_name == None: logger.live_bad('[{}]:\tSMB CONNECTION: {}'.format(logger.RED(server_ip),logger.RED('COULD NOT GET REMOTE HOST NAME'))) return None else: conn = SMBConnection(username, password, my_name, remote_name,domain=domain_name, use_ntlm_v2=True, is_direct_tcp=True) logger.verbose('SMB configuration:') logger.verbose('\tConnecting with: {}'.format(logger.YELLOW(username))) for k, v in vars(conn).items(): attribute = str(k) value = str(v) if '<class' not in value and 'bound method' not in value and 'object' not in value and "b''" not in value: logger.verbose('\t'+attribute+': '+value) try: conn.connect(server_ip, 445) logger.green('[{}]:\tSMB CONNECTION: {}'.format(logger.GREEN(server_ip),logger.GREEN('SUCCESS'))) try: shares = conn.listShares(timeout=15) for share in range(len(shares)): share_name = str(shares[share].name) open_shares.append(share_name) except Exception as e: logger.live_bad('Got error: {}'.format(logger.RED(e))) except: logger.live_bad('[{}]:\tSMB CONNECTION: {}'.format(logger.RED(server_ip),logger.RED('FAILED'))) logger.green('[{}]:\tSHARES: {}'.format(logger.GREEN(target),logger.GREEN(', '.join(open_shares)))) return open_shares