def do_host_status(records=[], query=None, asset_group=None, hosts=[]): """ Runs through the t_hosts table and updates the *_count entries. Can also run through a specific list of record IDs instead. """ from skaldship.hosts import do_host_status do_host_status(records=records, query=query, asset_group=asset_group, hosts=hosts) return True
def update_dynamic_fields(): """ Executes the following functions that update dynamic field entries: skaldship.hosts.do_host_status skaldship.exploits.connect_exploits """ response.title = "%s :: Update Dynamic Fields" % (settings.title) users = db(db.auth_user).select() userlist = [] for user in users: userlist.append( [ user.id, user.username ] ) ag = db(db.t_hosts).select(db.t_hosts.f_asset_group, distinct=True).as_list() asset_groups = map((lambda x: x['f_asset_group']), ag) form = SQLFORM.factory( Field('f_exploit_link', type='boolean', default=True, label=T('Exploit linking')), Field('f_host_status', type='boolean', default=True, label=T('Host Service/Vuln counts')), Field('f_asset_group', type='list:string', label=T('Asset Group'), requires=IS_EMPTY_OR(IS_IN_SET(asset_groups, multiple=False))), Field('f_taskit', type='boolean', default=auth.user.f_scheduler_tasks, label=T('Run in background task')), ) from skaldship.hosts import do_host_status from skaldship.exploits import connect_exploits if form.accepts(request.vars, session): if form.vars.f_exploit_link: connect_exploits() if form.vars.f_host_status: if form.vars.f_taskit: task = scheduler.queue_task( do_host_status, pvars=dict(asset_group=form.vars.f_asset_group), group_name=settings.scheduler_group_name, sync_output=5, timeout=300 # 5 minutes ) if task.id: redirect(URL('tasks', 'status', args=task.id)) else: resp_text = "Error submitting job: %s" % (task.errors) else: do_host_status(asset_group=form.vars.f_asset_group) response.flash = "Task completed!" elif form.errors: response.flash = 'Error in form' return dict( form=form, err404=get_oreally_404(request.folder), )
def update_dynamic_fields(): """ Executes the following functions that update dynamic field entries: skaldship.hosts.do_host_status skaldship.exploits.connect_exploits """ response.title = "%s :: Update Dynamic Fields" % (settings.title) users = db(db.auth_user).select() userlist = [] for user in users: userlist.append( [ user.id, user.username ] ) ag = db(db.t_hosts).select(db.t_hosts.f_asset_group, distinct=True).as_list() asset_groups = map((lambda x: x['f_asset_group']), ag) form = SQLFORM.factory( Field('f_exploit_link', type='boolean', default=True, label=T('Exploit linking')), Field('f_host_status', type='boolean', default=True, label=T('Host Service/Vuln counts')), Field('f_asset_group', type='list:string', label=T('Asset Group'), requires=IS_EMPTY_OR(IS_IN_SET(asset_groups, multiple=False))), Field('f_taskit', type='boolean', default=auth.user.f_scheduler_tasks, label=T('Run in background task')), ) from skaldship.hosts import do_host_status from skaldship.exploits import connect_exploits if form.accepts(request.vars, session): if form.vars.f_exploit_link: connect_exploits() if form.vars.f_host_status: if form.vars.f_taskit: task = scheduler.queue_task( do_host_status, pvars=dict(asset_group=form.vars.f_asset_group), group_name=settings.scheduler_group_name, sync_output=5, timeout=settings.scheduler_timeout, ) if task.id: redirect(URL('tasks', 'status', args=task.id)) else: resp_text = "Error submitting job: %s" % (task.errors) else: do_host_status(asset_group=form.vars.f_asset_group) response.flash = "Task completed!" elif form.errors: response.flash = 'Error in form' return dict( form=form, err404=get_oreally_404(request.folder), )
def process_file(filename=None, asset_group=None, engineer=None): # Upload and process hping Scan file from skaldship.hosts import get_host_record, do_host_status, add_or_update log(" [*] Processing hping scan file %s" % filename) hoststats = 0 nodefields = {'f_engineer': engineer, 'f_asset_group': asset_group, 'f_confirmed': False} svc_db = db.t_services host_ip = None ICMP_type = '' answer_ip = '' with open(filename) as f: for line in f: if "IP: " in line: host_ip = line.split()[1] if IS_IPADDRESS()(host_ip)[1] == None: nodefields['f_ipaddr'] = host_ip host_rec = add_or_update(nodefields, update=True) hoststats += 1 else: log(" [!] ERROR: Not a valid IP Address (%s)" % host_ip, logging.ERROR) if "[*] " in line: ICMP_type = line.split()[1] if "ip=" in line: ip = line.split('=')[2] answer_ip = ip.split()[0] if "transmitted" in line: packets = line.split() if packets[0] == packets[3]: if answer_ip != host_ip: response = T("No") else: response = T("Yes") else: response = T("No") get_id = get_host_record(host_ip) svc_db.update_or_insert( f_hosts_id=get_id.id, f_proto='ICMP', f_number='0', f_status=response, f_name=ICMP_type ) db.commit() f.close() do_host_status(asset_group=asset_group) log(" [*] Import complete, %s hosts added/updated" % hoststats)
log(" [!] (%s) No response from ShodanHQ: %s" % (h, str(e)), level=logging.ERROR) elif query and webapi: log(" [!] Sending ShodanHQ WebAPI query: %s" % (str(query)), level=logging.DEBUG) try: query_result = webapi.search(query[0], limit=query[1]) if query_result.get('total') > 0: hosts.append(query_result.get('matches')) except WebAPIError, e: log(" [!] (%s) ShodanHQ error response: %s" % (query, str(e)), level=logging.ERROR) except Exception, e: log(" [!] (%s) No response from ShodanHQ: %s" % (query, str(e)), level=logging.ERROR) """ log(" [-] Parsing %d hosts" % (len(hosts))) for host in hosts: sd.parse_host(host) do_host_status() msg = " [*] Import complete: hosts: (%s/A, %s/U, %s/S) - services: (%s/A, %s/U)"\ % ( sd.stats['hosts_added'], sd.stats['hosts_updated'], sd.stats['hosts_skipped'], sd.stats['services_added'], sd.stats['services_updated'], ) log(msg) return msg
# process <script> results. This data contains both info # and vulnerability data. For now we'll take a list of # known nmap vuln checks from private/nmap_vulns.csv and # use that to separate between service_info and vulndata. pass if msf is not None: # send the downloaded nexpose file to MSF for importing try: res = msf.pro_import_file( msf_workspace, filename, { 'DS_REMOVE_FILE': False, 'tag': asset_group, }, ) print(" [*] Added file to MSF Pro: %s" % (res)) except MSFAPIError, e: logging.error("MSFAPI Error: %s" % (e)) pass # any new nexpose vulns need to be checked against exploits table and connected print(" [*] Connecting exploits to vulns and performing do_host_status") do_host_status(asset_group=asset_group) print(" [*] Import complete: hosts: %s added, %s skipped, %s errors - vulns: %s added, %s skipped" % (hoststats['added'], hoststats['skipped'], hoststats['errored'], vulns_added, vulns_skipped))
str(error), logging.ERROR) scan_data = None if scan_data and working_msf_api: task = msf_api.pro_import_data( msf_settings.get('workspace'), "".join(scan_data), { #'preserve_hosts': form.vars.preserve_hosts, 'blacklist_hosts': "\n".join(ip_ignore_list) }, ) msf_workspace_num = session.msf_workspace_num or 'unknown' msfurl = os.path.join(msf_settings.get('url'), 'workspaces', msf_workspace_num, 'tasks', task['task_id']) log(" [*] Added file to MSF Pro: %s" % msfurl) # any new Nessus vulns need to be checked against exploits table and connected log(" [*] Connecting exploits to vulns and performing do_host_status") connect_exploits() do_host_status(asset_group=asset_group) msg = (' [*] Import complete: hosts: %s added, %s updated, %s skipped ' '- %s vulns processed, %s added' % (nessus_hosts.stats['added'], nessus_hosts.stats['updated'], nessus_hosts.stats['skipped'], nessus_vulns.stats['processed'], nessus_vulns.stats['added'])) log(msg) return msg
def process_xml( filename=None, addnoports=False, asset_group=None, engineer=None, msf_workspace=False, ip_ignore_list=None, ip_include_list=None, update_hosts=False, ): # Upload and process Qualys XML Scan file import os, time, re, html.parser from io import StringIO from MetasploitProAPI import MetasploitProAPI from skaldship.hosts import html_to_markmin, get_host_record, do_host_status from skaldship.cpe import lookup_cpe parser = html.parser.HTMLParser() # output regexes RE_NETBIOS_NAME = re.compile('NetBIOS name: (?P<d>.*),') RE_NETBIOS_MAC = re.compile( 'NetBIOS MAC: (?P<d>([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2}))') RE_IPV4 = re.compile('^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$') if msf_workspace: msf = MetasploitProAPI(host=user_id.f_msf_pro_url, apikey=user_id.f_msf_pro_key) if msf.login(): logger.info(" [-] Authenticated to Metasploit PRO") else: logger.error( " [!] Unable to login to Metasploit PRO, check your API key") msf = None else: logger.warn(" [-] No Metasploit workspace provided!") msf = None try: from lxml import etree except ImportError: try: import xml.etree.cElementTree as etree except ImportError: try: import xml.etree.ElementTree as etree except: raise Exception("Unable to find valid ElementTree module.") # build the hosts only/exclude list ip_exclude = [] if ip_ignore_list: ip_exclude = ip_ignore_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals ip_only = [] if ip_include_list: ip_only = ip_include_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals print((" [*] Processing Qualys scan file %s" % (filename))) try: nmap_xml = etree.parse(filename) except etree.ParseError as e: print((" [!] Invalid XML file (%s): %s " % (filename, e))) return root = nmap_xml.getroot() db = current.globalenv['db'] cache = current.globalenv['cache'] existing_vulnids = db(db.t_vulndata()).select( db.t_vulndata.id, db.t_vulndata.f_vulnid).as_dict(key='f_vulnid') #print(" [*] Found %d vulnerabilities in the database already." % (len(existing_vulnids))) # check for any CPE OS data if db(db.t_cpe_os).count() > 0: have_cpe = True else: have_cpe = False user_id = db.auth_user(engineer) or auth.user.id # parse the hosts, where all the goodies are nodes = root.findall('IP') print((" [-] Parsing %d hosts" % (len(nodes)))) hoststats = {} hoststats['added'] = 0 hoststats['skipped'] = 0 hoststats['updated'] = 0 hoststats['errored'] = 0 hosts = [] # array of host_id fields vulns_added = 0 vulns_skipped = 0 for node in nodes: nodefields = {} ipaddr = node.get('value') nodefields['f_ipaddr'] = ipaddr nodefields['f_hostname'] = node.get('hostname') nodefields['f_netbios_name'] = node.findtext('NETBIOS_HOSTNAME') # nodefields['f_macaddr'] = address.get('addr') """ status = node.find('status').get('state') print(" [-] Host %s status is: %s" % (ipaddr, status)) if status != "up": hoststats['skipped'] += 1 continue """ if ipaddr in ip_exclude: print(" [-] Host is in exclude list... skipping") hoststats['skipped'] += 1 continue if len(ip_only) > 0 and ipaddr not in ip_only: print(" [-] Host is not in the only list... skipping") hoststats['skipped'] += 1 continue ports = node.findall('INFOS') if len(ports) < 1 and not addnoports: print( " [-] No ports open and not asked to add those kind... skipping" ) hoststats['skipped'] += 1 continue nodefields['f_engineer'] = user_id nodefields['f_asset_group'] = asset_group nodefields['f_confirmed'] = False # check to see if IPv4/IPv6 exists in DB already if 'f_ipaddr' in nodefields: host_rec = db(db.t_hosts.f_ipaddr == nodefields['f_ipaddr']).select().first() else: logging.warn("No IP Address found in record. Skipping") continue if host_rec is None: host_id = db.t_hosts.insert(**nodefields) db.commit() hoststats['added'] += 1 print((" [-] Adding %s" % (ipaddr))) elif host_rec is not None and update_hosts: db.commit() host_id = db(db.t_hosts.f_ipaddr == nodefields['f_ipaddr']).update( **nodefields) db.commit() host_id = get_host_record(ipaddr) host_id = host_id.id hoststats['updated'] += 1 print((" [-] Updating %s" % (ipaddr))) else: hoststats['skipped'] += 1 db.commit() print((" [-] Skipped %s" % (ipaddr))) continue hosts.append(host_id) # : for hostscripts in node.findall('hostscript/script'): svc_id = db.t_services.update_or_insert(f_proto='info', f_number=0, f_status='open', f_hosts_id=host_id) db.commit() for script in hostscripts: script_id = script.get('id') output = script.get('output') svc_info = db.t_service_info.update_or_insert( f_services_id=svc_id, f_name=script_id, f_text=output) db.commit() # add ports and resulting vulndata for port in node.findall("ports/port"): f_proto = port.get('protocol') f_number = port.get('portid') f_status = port.find('state').get('state') port_svc = port.find('service') if port_svc: f_name = port_svc.get('name') f_product = port_svc.get('product') svc_fp = port_svc.get('servicefp') else: f_name = None f_product = None svc_fp = None print( (" [-] Adding port: %s/%s (%s)" % (f_proto, f_number, f_name))) svc_id = db.t_services.update_or_insert(f_proto=f_proto, f_number=f_number, f_status=f_status, f_hosts_id=host_id, f_name=f_name) if f_product: version = port.find('service').get('version', None) if version: f_product += " (%s)" % (version) svc_info = db.t_service_info.update_or_insert( f_services_id=svc_id, f_name=f_name, f_text=f_product) db.commit() if svc_fp: svc_info = db.t_service_info.update_or_insert( f_services_id=svc_id, f_name=svc_fp, f_text=svc_fp) db.commit() # Process <script> service entries for script in port.findall('service/script'): svc_info = db.t_service_info.update_or_insert( f_services_id=svc_id, f_name=script.get('id'), f_text=script.get('output')) db.commit() # Process <cpe> service entries for port_cpe in port.findall('service/cpe'): cpe_id = port_cpe.text.replace('cpe:/', '') if cpe_id[0] == "a": # process CPE Applications print((" [-] Found Application CPE data: %s" % (cpe_id))) svc_info = db.t_service_info.update_or_insert( f_services_id=svc_id, f_name='CPE ID', f_text="cpe:/%s" % (cpe_id)) db.commit() elif cpe_id[0] == "o": # process CPE Operating System os_id = lookup_cpe(cpe_id[2:]) if os_id is not None: db.t_host_os_refs.insert(f_certainty='0.9', f_family='Unknown', f_class='Other', f_hosts_id=host_id, f_os_id=os_id) db.commit() else: # So no CPE or existing OS data, lets split up the CPE data and make our own print(" [!] No os_id found, this is odd !!!") for config in port.findall("configuration/config"): cfg_id = db.t_service_info.update_or_insert( f_services_id=svc_id, f_name=config.attrib['name'], f_text=config.text) db.commit() if re.match('\w+.banner$', config.attrib['name']): db.t_services[svc_id] = dict(f_banner=config.text) db.commit() if config.attrib['name'] == 'mac-address': # update the mac address of the host db.t_hosts[host_id] = dict(f_macaddr=config.text) db.commit() if "advertised-name" in config.attrib['name']: # netbios computer name d = config.text.split(" ")[0] if "Computer Name" in config.text: data = {} data['f_netbios_name'] = d # if hostname isn't defined then lowercase netbios name and put it in if db.t_hosts[host_id].f_hostname is None: data['f_hostname'] = d.lower() db(db.t_hosts.id == host_id).update(**data) elif "Domain Name" in config.text: db(db.t_netbios.f_hosts_id == host_id).update( f_domain=d) or db.t_netbios.insert( f_hosts_id=host_id, f_domain=d) db.commit() for script in port.findall("script"): # process <script> results. This data contains both info # and vulnerability data. For now we'll take a list of # known nmap vuln checks from private/nmap_vulns.csv and # use that to separate between service_info and vulndata. pass if msf is not None: # send the downloaded nexpose file to MSF for importing try: res = msf.pro_import_file( msf_workspace, filename, { 'DS_REMOVE_FILE': False, 'tag': asset_group, }, ) print((" [*] Added file to MSF Pro: %s" % (res))) except MSFAPIError as e: logging.error("MSFAPI Error: %s" % (e)) pass # any new nexpose vulns need to be checked against exploits table and connected print(" [*] Connecting exploits to vulns and performing do_host_status") do_host_status(asset_group=asset_group) print(( " [*] Import complete: hosts: %s added, %s skipped, %s errors - vulns: %s added, %s skipped" % (hoststats['added'], hoststats['skipped'], hoststats['errored'], vulns_added, vulns_skipped)))
def process_xml( filename=None, addnoports=False, asset_group=None, engineer=None, msf_settings={}, ip_ignore_list=None, ip_include_list=None, update_hosts=False, ): # Upload and process Nmap XML Scan file import re import os from skaldship.hosts import get_host_record, do_host_status from skaldship.cpe import lookup_cpe from zenmapCore_Kvasir.NmapParser import NmapParser from gluon import current T = current.T # output regexes RE_NETBIOS_NAME = re.compile('NetBIOS computer name: (?P<d>.*),') RE_NETBIOS_WORKGROUP = re.compile('Workgroup: (?P<d>.*),') RE_NETBIOS_MAC = re.compile( 'NetBIOS MAC: (?P<d>([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2}))') # build the hosts only/exclude list ip_exclude = [] if ip_ignore_list: ip_exclude = ip_ignore_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals ip_only = [] if ip_include_list: ip_only = ip_include_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals log(" [*] Processing Nmap scan file %s" % (filename)) nmap_parsed = NmapParser() nmap_parsed.parse_file(filename) #existing_vulnids = db(db.t_vulndata()).select(db.t_vulndata.id, db.t_vulndata.f_vulnid).as_dict(key='f_vulnid') # parse the hosts, where all the goodies are log(" [-] Parsing %d hosts" % (len(nmap_parsed.hosts))) hoststats = {} hoststats['added'] = 0 hoststats['skipped'] = 0 hoststats['updated'] = 0 hoststats['errored'] = 0 hosts = [] # array of host_id fields svc_db = db.t_services for node in nmap_parsed.hosts: nodefields = {} if node.ipv6: ipaddr = node.ipv6 nodefields['f_ipaddr'] = ipaddr elif node.ip.get('type') == 'ipv4': ipaddr = node.ip.get('addr') nodefields['f_ipaddr'] = ipaddr else: log(" [!] No IPv4/IPv6 address, skipping") continue try: nodefields['f_macaddr'] = node.mac['addr'] except TypeError: nodefields['f_macaddr'] = None status = node.state log(" [-] Host %s status is: %s" % (ipaddr, status)) if status != "up": hoststats['skipped'] += 1 continue if ipaddr in ip_exclude: log(" [-] Host is in exclude list... skipping") hoststats['skipped'] += 1 continue if len(ip_only) > 0 and ipaddr not in ip_only: log(" [-] Host is not in the only list... skipping") hoststats['skipped'] += 1 continue if not node.ports and not addnoports: log(" [-] No ports open and not asked to add those kind... skipping" ) hoststats['skipped'] += 1 continue # we'll just take the last hostname in the names list since it'll usually be the full dns name for name in node.hostnames: nodefields['f_hostname'] = name['hostname'] nodefields['f_engineer'] = engineer nodefields['f_asset_group'] = asset_group nodefields['f_confirmed'] = False # see if host exists, if so update. if not, insert! query = (db.t_hosts.f_ipaddr == ipaddr) host_rec = db(query).select().first() if host_rec is None: host_id = db.t_hosts.insert(**nodefields) db.commit() hoststats['added'] += 1 log(" [-] Adding %s" % (ipaddr)) elif host_rec is not None and update_hosts: db.commit() host_id = db(db.t_hosts.f_ipaddr == nodefields['f_ipaddr']).update( **nodefields) db.commit() host_id = get_host_record(ipaddr) host_id = host_id.id hoststats['updated'] += 1 log(" [-] Updating %s" % (ipaddr)) else: hoststats['skipped'] += 1 db.commit() log(" [-] Skipped %s" % (ipaddr)) continue hosts.append(host_id) # process OS related info for os in node.osmatches: os_id = None host_id = None f_title = os['name'] #title for k in os['osclasses']: if k.get('cpe') != None: #fixes error on loading cpe:/os f_cpename = k['cpe'].replace('cpe:/o:', '') f_vendor = k['vendor'] f_product = k['osfamily'] f_version = k['osgen'] f_class = k['type'] f_family = k['osfamily'] f_certainty = k['accuracy'] cpe_res = db((db.t_os.f_cpename == f_cpename) & ( db.t_os.f_title == f_title)).select().first() if cpe_res is not None: os_id = cpe_res.id else: try: os_id = db.t_os.insert( f_cpename=f_cpename, f_title=f_title, f_vendor=f_vendor, f_product=f_product, f_version=f_version, ) except Exception as e: logger.error("Error inserting OS: %s" % (e)) db.commit() if os_id and (f_class or f_family or f_certainty): ipaddr = node.ip.get('addr') host_id = get_host_record(ipaddr) host_id = host_id.id try: db.t_host_os_refs.insert(f_certainty=f_certainty, f_family=f_family, f_class=f_class, f_hosts_id=host_id, f_os_id=os_id) except Exception as e: logger.error("Error inserting OS: %s" % (e)) db.commit() # process non-port <hostscript> entries. Add to info/0: for hostscripts in node.hostscripts: query = (svc_db.f_proto == 'info') & (svc_db.f_number == 0) & ( svc_db.f_hosts_id == host_id) svc_id = db.t_services.update_or_insert(query, f_proto='info', f_number=0, f_status='open', f_hosts_id=host_id) if not svc_id: svc_rec = db(query).select(cache=(cache.ram, 180)).first() if svc_rec: svc_id = svc_rec.id else: log(" [!] Service record wasn't created", logging.ERROR) continue db.commit() for script in hostscripts: script_id = script.id output = script.output db.t_service_info.update_or_insert(f_services_id=svc_id, f_name=script_id, f_text=output) db.commit() if script_id == 'nbstat': # pull out NetBIOS info from nbstat output result = RE_NETBIOS_MAC.search(output) if 'd' in result.groupdict(): host_rec.update(f_macaddr=result.group('d')) db.commit() result = RE_NETBIOS_NAME.search(output) if 'd' in result.groupdict(): host_rec.update(f_netbios_name=result.group('d')) db.commit() result = RE_NETBIOS_WORKGROUP.search(output) if 'd' in result.groupdict(): db( db.t_netbios.update_or_insert( f_hosts_id=host_id, f_domain=result.group('d'))) db.commit() # add ports and resulting vulndata for port in node.ports: f_proto = port.get('protocol') f_number = port.get('portid') f_status = port.get('port_state') f_name = port.get('service_name') f_product = port.get('service_product') query = (svc_db.f_proto == f_proto) & ( svc_db.f_number == f_number) & (svc_db.f_hosts_id == host_id) svc = db(query).select().first() # if record does not exist, query returns None so add the record: if svc is None: log(" [-] Adding port: %s/%s (%s)" % (f_proto, f_number, f_name)) svc_id = db.t_services.update_or_insert(f_proto=f_proto, f_number=f_number, f_status=f_status, f_name=f_name, f_hosts_id=host_id) if not (svc is None): log(" [-] Updating port: %s/%s (%s)" % (f_proto, f_number, f_name)) if svc.f_name != f_name and f_name not in svc.f_name: svc_id = db.t_services.validate_and_update( _key=svc.id, f_name=(f_name + ' | ' + svc.f_name)) else: svc_id = db.t_services.validate_and_update( _key=svc.id, f_name=(svc.f_name)) svc_id = svc_id.id if f_product: version = port.get('service_version') if version: f_product += " (%s)" % (version) db.t_service_info.update_or_insert(f_services_id=svc_id, f_name=f_name, f_text=f_product) db.commit() # Process <script> service entries for script in port.get('scripts'): try: if script.get('id') == 'ssl-cert': text = script.get('output') sslcert = text.replace("/", "\n") db.t_service_info.update_or_insert( f_services_id=svc_id, f_name=script.get('id'), f_text=sslcert) else: db.t_service_info.update_or_insert( f_services_id=svc_id, f_name=script.get('id'), f_text=script.get('output')) except Exception as e: logger.error("Error inserting Script: %s" % (e)) db.commit() # check for banner id and update t_services banner field with the output if script.get('id') == "banner": try: db.t_services.update_or_insert( (db.t_services.id == svc_id), f_banner=script.get('output')) except Exception as e: logger.error("Error inserting Banner: %s" % (e)) db.commit() # Process <cpe> service entries port_cpe = port.get('service_cpe') if port_cpe: cpe_id = port_cpe.replace('cpe:/', '') if cpe_id.startswith('a'): # process CPE Applications #log(" [-] Found Application CPE data: %s" % (cpe_id)) db.t_service_info.update_or_insert(f_services_id=svc_id, f_name='cpe.app', f_text="cpe:/%s" % (cpe_id)) db.commit() elif cpe_id.startswith('o'): # process CPE Operating System os_id = lookup_cpe(cpe_id[2:]) if os_id is not None: db.t_host_os_refs.insert(f_certainty='0.9', f_family='Unknown', f_class='Other', f_hosts_id=host_id, f_os_id=os_id) db.commit() else: # So no CPE or existing OS data, lets split up the CPE data and make our own log(" [!] No os_id found, this is odd !!!") # Adding uptime. Needed to add a table "f_uptime" in t_hosts db! if node.uptime['lastboot']: db.t_hosts.update_or_insert((db.t_hosts.f_ipaddr == ipaddr), f_uptime=node.uptime['lastboot']) if not node.uptime['lastboot']: db.t_hosts.update_or_insert((db.t_hosts.f_ipaddr == ipaddr), f_uptime=T("no entry found")) if msf_settings.get('workspace'): try: # check to see if we have a Metasploit RPC instance configured and talking from MetasploitProAPI import MetasploitProAPI msf_api = MetasploitProAPI(host=msf_settings.get('url'), apikey=msf_settings.get('key')) working_msf_api = msf_api.login() except Exception as error: log(" [!] Unable to authenticate to MSF API: %s" % str(error), logging.ERROR) working_msf_api = False try: scan_data = open(filename, "r+").readlines() except Exception as error: log( " [!] Error loading scan data to send to Metasploit: %s" % str(error), logging.ERROR) scan_data = None if scan_data and working_msf_api: task = msf_api.pro_import_data( msf_settings.get('workspace'), "".join(scan_data), { #'preserve_hosts': form.vars.preserve_hosts, 'blacklist_hosts': "\n".join(ip_ignore_list) }, ) msf_workspace_num = session.msf_workspace_num or 'unknown' msfurl = os.path.join(msf_settings.get('url'), 'workspaces', msf_workspace_num, 'tasks', task['task_id']) log(" [*] Added file to MSF Pro: %s" % msfurl) # any new nexpose vulns need to be checked against exploits table and connected log(" [*] Connecting exploits to vulns and performing do_host_status") do_host_status(asset_group=asset_group) log(" [*] Import complete: hosts: %s added, %s updated, %s skipped" % ( hoststats['added'], hoststats['updated'], hoststats['skipped'], ))
def process_xml( filename=None, asset_group=None, engineer=None, msf_settings={}, ip_ignore_list=None, ip_include_list=None, update_hosts=False, ): # Upload and process Nexpose XML Scan file from skaldship.cpe import lookup_cpe from skaldship.hosts import get_host_record from gluon.validators import IS_IPADDRESS import os db = current.globalenv['db'] session = current.globalenv['session'] parser = html.parser.HTMLParser() user_id = db.auth_user(engineer) # build the hosts only/exclude list ip_exclude = [] if ip_ignore_list: ip_exclude = ip_ignore_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals ip_only = [] if ip_include_list: ip_only = ip_include_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals log(" [*] Processing Nexpose scan file %s" % filename) try: nexpose_xml = etree.parse(filename) except etree.ParseError as e: msg = " [!] Invalid Nexpose XML file (%s): %s " % (filename, e) log(msg, logging.ERROR) return msg root = nexpose_xml.getroot() existing_vulnids = db(db.t_vulndata()).select( db.t_vulndata.id, db.t_vulndata.f_vulnid).as_dict(key='f_vulnid') log(" [*] Found %d vulnerabilities in the database already." % len(existing_vulnids)) # start with the vulnerability details vulns_added = 0 vulns_skipped = 0 vulns = root.findall("VulnerabilityDefinitions/vulnerability") log(" [*] Parsing %d vulnerabilities" % len(vulns)) for vuln in vulns: # nexpose identifiers are always lower case in kvasir. UPPER CASE IS FOR SHOUTING!!! vulnid = vuln.attrib['id'].lower() if vulnid in existing_vulnids: #log(" [-] Skipping %s - It's in the db already" % vulnid) vulns_skipped += 1 else: # add the vulnerability to t_vulndata - any duplicates are errored out (vulnfields, references) = vuln_parse(vuln, fromapi=False) try: vulnid = db.t_vulndata.update_or_insert(**vulnfields) if not vulnid: vulnid = db(db.t_vulndata.f_vulnid == vulnfields['f_vulnid']).select().first().id vulns_added += 1 db.commit() except Exception as e: log( " [!] Error inserting %s to vulndata: %s" % (vulnfields['f_vulnid'], e), logging.ERROR) vulnid = None db.commit() continue # add the references if vulnid is not None: for reference in references: # check to see if reference exists first ref_id = db(db.t_vuln_refs.f_text == reference[1]) if ref_id.count() == 0: # add because it doesn't ref_id = db.t_vuln_refs.insert(f_source=reference[0], f_text=reference[1]) db.commit() else: # pick the first reference as the ID ref_id = ref_id.select()[0].id # make many-to-many relationship with t_vuln_data res = db.t_vuln_references.insert(f_vuln_ref_id=ref_id, f_vulndata_id=vulnid) db.commit() log(" [*] %d Vulnerabilities added, %d skipped" % (vulns_added, vulns_skipped)) # re-make the existing_vulnids dict() since we've updated the system existing_vulnids = db(db.t_vulndata()).select( db.t_vulndata.id, db.t_vulndata.f_vulnid).as_dict(key='f_vulnid') # parse the nodes now nodes = root.findall("nodes/node") log(" [-] Parsing %d nodes" % len(nodes)) hoststats = {'added': 0, 'skipped': 0, 'updated': 0, 'errored': 0} hosts = [] # array of host_id fields for node in nodes: log(" [-] Node %s status is: %s" % (node.attrib['address'], node.attrib['status'])) #sys.stderr.write(msg) if node.attrib['status'] != "alive": hoststats['skipped'] += 1 continue if node.attrib['address'] in ip_exclude: log(" [-] Node is in exclude list... skipping") hoststats['skipped'] += 1 continue nodefields = {} if len(ip_only) > 0 and node.attrib['address'] not in ip_only: log(" [-] Node is not in the only list... skipping") hoststats['skipped'] += 1 continue # we'll just take the last hostname in the names list since it'll usually be the full dns name names = node.findall("names/name") for name in names: nodefields['f_hostname'] = name.text ip = node.attrib['address'] if IS_IPADDRESS()(ip): nodefields['f_ipaddr'] = ip else: log(" [!] Invalid IP Address: %s" % ip, logging.ERROR) nodefields['f_engineer'] = user_id nodefields['f_asset_group'] = asset_group nodefields['f_confirmed'] = False if 'hardware-address' in node.attrib: nodefields['f_macaddr'] = node.attrib['hardware-address'] if node.find('names/name') is not None: # XXX: for now just take the first hostname nodefields['f_hostname'] = node.find('names/name').text # check to see if IP exists in DB already query = (db.t_hosts.f_ipaddr == ip) host_rec = db(query).select().first() if host_rec is None: host_id = db.t_hosts.insert(**nodefields) db.commit() hoststats['added'] += 1 log(" [-] Adding IP: %s" % ip) elif update_hosts: db.commit() db(db.t_hosts.f_ipaddr == nodefields['f_ipaddr']).update( **nodefields) db.commit() host_id = get_host_record(nodefields['f_ipaddr']) host_id = host_id.id hoststats['updated'] += 1 log(" [-] Updating IP: %s" % ip) else: hoststats['skipped'] += 1 db.commit() log(" [-] Skipped IP: %s" % ip) continue hosts.append(host_id) # tests that aren't specific to any port we wrap up into a meta service # called "INFO" tests = node.findall("tests/test") if len(tests) > 0: svc_id = db.t_services.update_or_insert(f_proto="info", f_number="0", f_status="info", f_hosts_id=host_id) db.commit() for test in tests: d = {} vulnid = test.get('id').lower() # we may have valid username. if "cifs-acct-" in vulnid: username = test.get('key') if username is not None: d['f_services_id'] = svc_id d['f_username'] = username d['f_active'] = True d['f_source'] = vulnid query = (db.t_accounts.f_services_id == d['f_services_id']) &\ (db.t_accounts.f_username == d['f_username']) db.t_accounts.update_or_insert(query, **d) db.commit() if test.attrib['status'] == 'vulnerable-exploited' or \ test.attrib['status'] == 'potential' or \ test.attrib['status'] == 'exception-vulnerable-exploited' or \ test.attrib['status'] == 'exception-vulnerable-version' or \ test.attrib['status'] == 'exception-vulnerable-potential' or \ test.attrib['status'] == 'vulnerable-version': if vulnid in existing_vulnids: vuln_id = existing_vulnids[vulnid]['id'] else: continue if vulnid == 'cifs-nt-0001': # Windows users, local groups, and global groups infotext = nx_xml_to_html( StringIO(etree.tostring(test, xml_declaration=False))) try: unames = re.search( "Found user\(s\): (?P<unames>.+?) </li>", infotext).group('unames') except AttributeError as e: # regex not found continue for uname in unames.split(): # add account d['f_username'] = uname d['f_services_id'] = svc_id d['f_source'] = 'cifs-nt-0001' db.t_accounts.update_or_insert(**d) db.commit() test_str = etree.tostring(test, xml_declaration=False, encoding=str) test_str = test_str.encode('ascii', 'xmlcharrefreplace') proof = nx_xml_to_html(StringIO(test_str)) proof = html_to_markmin(proof) if vulnid == 'cifs-insecure-acct-lockout-limit': d['f_hosts_id'] = host_id try: d['f_lockout_limit'] = re.search( "contains: (?P<l>\d+)", proof).group('l') except AttributeError: d['f_lockout_limit'] = 0 query = (db.t_netbios.f_hosts_id == host_id) db.t_netbios.update_or_insert(query, **d) db.commit() # Check for CIFS uid/pw if "cifs-" in vulnid: try: uid = re.search("uid\[(?P<u>.*?)\]", proof).group('u') pw = re.search("pw\[(?P<p>.*?)\]", proof).group('p') realm = re.search("realm\[(?P<r>.*?)\]", proof).group('r') d = { 'f_services_id': svc_id, 'f_username': uid, 'f_password': pw, 'f_description': realm, 'f_active': True, 'f_compromised': True, 'f_source': vulnid } query = (db.t_accounts.f_services_id == svc_id) & (db.t_accounts.f_username == uid) db.t_accounts.update_or_insert(query, **d) db.commit() except AttributeError: db.commit() except Exception as e: log("Error inserting account (%s): %s" % (uid, e), logging.ERROR) db.commit() # solaris-kcms-readfile shadow file if vulnid.lower() == "rpc-solaris-kcms-readfile": # funky chicken stuff, if they mess with this output then we've got to # change this around as well. thems the breaks, maynard! shadow = parser.unescape(proof) for line in shadow.split("<br />")[1:-1]: user, pw, uid = line.split(':')[0:3] d['f_services_id'] = svc_id d['f_username'] = user d['f_hash1'] = pw d['f_hash1_type'] = "crypt" d['f_uid'] = uid d['f_source'] = "shadow" d['f_active'] = True d['f_source'] = "rpc-solaris-kcms-readfile" query = (db.t_accounts.f_services_id == svc_id) & ( db.t_accounts.f_username == user) db.t_accounts.update_or_insert(query, **d) db.commit() db.t_service_vulns.update_or_insert( f_services_id=svc_id, f_status=test.attrib['status'], f_proof=proof, f_vulndata_id=vuln_id) if "cisco-default-http-account" in vulnid.lower(): d['f_services_id'] = svc_id d['f_username'] = vulnid.split('-')[4] d['f_password'] = vulnid.split('-')[6] d['f_source'] = "cisco-default-http-account" query = (db.t_accounts.f_services_id == svc_id) & ( db.t_accounts.f_username == d['f_username']) db.t_accounts.update_or_insert(query, **d) db.commit() # add services (ports) and resulting vulndata for endpoint in node.findall("endpoints/endpoint"): f_proto = endpoint.attrib['protocol'] f_number = endpoint.attrib['port'] f_status = endpoint.attrib['status'] query = (db.t_services.f_hosts_id == host_id) \ & (db.t_services.f_proto == f_proto) \ & (db.t_services.f_number == f_number) svc_id = db.t_services.update_or_insert(query, f_proto=f_proto, f_number=f_number, f_status=f_status, f_hosts_id=host_id) if not svc_id: svc_id = db(query).select().first().id for service in endpoint.findall("services/service"): d = {} if 'name' in service.attrib: db.t_services[svc_id] = dict(f_name=service.attrib['name']) for test in service.findall("tests/test"): vulnid = test.get('id').lower() if test.attrib['status'] == 'vulnerable-exploited' or \ test.attrib['status'] == 'potential' or \ test.attrib['status'] == 'exception-vulnerable-exploited' or \ test.attrib['status'] == 'exception-vulnerable-version' or \ test.attrib['status'] == 'exception-vulnerable-potential' or \ test.attrib['status'] == 'vulnerable-version': if vulnid in existing_vulnids: vuln_id = existing_vulnids[vulnid]['id'] else: log( " [!] Unknown vulnid, Skipping! (id: %s)" % vulnid, logging.ERROR) continue test_str = etree.tostring(test, xml_declaration=False, encoding=str) test_str = test_str.encode('ascii', 'xmlcharrefreplace') proof = nx_xml_to_html(StringIO(test_str)) proof = html_to_markmin(proof) # Check for SNMP strings if "snmp-read-" in vulnid: snmpstring = re.search("pw\[(?P<pw>.*?)\]", proof).group('pw') db.t_snmp.update_or_insert(f_hosts_id=host_id, f_community=snmpstring, f_access="READ", f_version="v1") db.commit() if "snmp-write" in vulnid: snmpstring = re.search("pw\[(?P<pw>.*?)\]", proof).group('pw') db.t_snmp.update_or_insert(f_hosts_id=host_id, f_community=snmpstring, f_access="WRITE", f_version="v1") db.commit() # TODO: account names # Dell DRAC root/calvin if vulnid == "http-drac-default-login": d['f_services_id'] = svc_id d['f_username'] = '******' d['f_password'] = '******' d['f_active'] = True d['f_compromised'] = True d['f_source'] = vulnid query = (db.t_accounts.f_services_id == svc_id) & ( db.t_accounts.f_username == 'root') db.t_accounts.update_or_insert(query, **d) db.commit() # Check for uid/pw if "ftp-iis-" in vulnid or \ "telnet-" in vulnid or \ "cifs-" in vulnid or \ "tds-" in vulnid or \ "oracle-" in vulnid or \ "-default-" in vulnid or \ "ftp-generic-" in vulnid: try: uid = re.search("uid\[(?P<u>.*?)\]", proof).group('u') pw = re.search("pw\[(?P<p>.*?)\]", proof).group('p') realm = re.search("realm\[(?P<r>.*?)\]", proof).group('r') d['f_services_id'] = svc_id d['f_username'] = uid d['f_password'] = pw d['f_description'] = realm d['f_active'] = True d['f_compromised'] = True d['f_source'] = vulnid query = (db.t_accounts.f_services_id == svc_id) & (db.t_accounts.f_username == uid) db.t_accounts.update_or_insert(query, **d) db.commit() except AttributeError: db.commit() except Exception as e: log( "Error inserting account (%s): %s" % (uid, e), logging.ERROR) db.commit() # cisco default http login accounts if "cisco-default-http-account" in vulnid.lower(): d['f_services_id'] = svc_id d['f_username'] = vulnid.split('-')[4] d['f_password'] = vulnid.split('-')[6] d['f_source'] = "cisco-default-http-account" query = (db.t_accounts.f_services_id == svc_id) \ & (db.t_accounts.f_username == d['f_username']) db.t_accounts.update_or_insert(query, **d) db.commit() db.t_service_vulns.update_or_insert( f_services_id=svc_id, f_status=test.attrib['status'], f_proof=proof, f_vulndata_id=vuln_id) db.commit() for config in service.findall("configuration/config"): db.t_service_info.update_or_insert( f_services_id=svc_id, f_name=config.attrib['name'], f_text=config.text) db.commit() if re.match('\w+.banner$', config.attrib['name']): db.t_services[svc_id] = dict(f_banner=config.text) db.commit() if config.attrib['name'] == 'mac-address': # update the mac address of the host db.t_hosts[host_id] = dict(f_macaddr=config.text) db.commit() if "advertised-name" in config.attrib['name']: # netbios computer name d = config.text.split(" ")[0] if "Computer Name" in config.text: data = {'f_netbios_name': d} # if hostname isn't defined then lowercase netbios name and put it in if db.t_hosts[host_id].f_hostname is None: data['f_hostname'] = d.lower() db(db.t_hosts.id == host_id).update(**data) db.commit() elif "Domain Name" in config.text: query = (db.t_netbios.f_hosts_id == host_id) db.t_netbios.update_or_insert(query, f_hosts_id=host_id, f_domain=d) db.commit() for os_rec in node.findall('fingerprints/os'): """ <os certainty="1.00" device-class="Workstation" vendor="Microsoft" family="Windows" product="Windows 2000 Professional" version="SP4" arch="x86"/> if using SCAP output the os line looks like: <os certainty="0.66" device-class="General" vendor="Microsoft" family="Windows" product="Windows XP" arch="x86" cpe="cpe:/o:microsoft:windows_xp::sp3"/> """ if 'cpe' in os_rec.attrib: # we have a cpe entry from xml! hooray! cpe_name = os_rec.attrib['cpe'].replace('cpe:/o:', '') os_id = lookup_cpe(cpe_name) else: # no cpe attribute in xml, go through our messy lookup os_id = guess_cpe_os(os_rec) if os_id is not None: db.t_host_os_refs.update_or_insert( f_certainty=os_rec.attrib['certainty'], f_family=os_rec.get('family', 'Unknown'), f_class=os_rec.get('device-class', 'Other'), f_hosts_id=host_id, f_os_id=os_id) db.commit() else: log( " [!] os_rec could not be parsed: %s" % etree.tostring(os_rec), logging.ERROR) db.commit() if msf_settings.get('workspace'): try: # check to see if we have a Metasploit RPC instance configured and talking from MetasploitProAPI import MetasploitProAPI msf_api = MetasploitProAPI(host=msf_settings.get('url'), apikey=msf_settings.get('key')) working_msf_api = msf_api.login() except Exception as error: log(" [!] Unable to authenticate to MSF API: %s" % str(error), logging.ERROR) working_msf_api = False try: scan_data = open(filename, "r+").readlines() except Exception as error: log( " [!] Error loading scan data to send to Metasploit: %s" % str(error), logging.ERROR) scan_data = None if scan_data and working_msf_api: task = msf_api.pro_import_data( msf_settings.get('workspace'), "".join(scan_data), { #'preserve_hosts': form.vars.preserve_hosts, 'blacklist_hosts': "\n".join(ip_ignore_list) }, ) msf_workspace_num = session.msf_workspace_num or 'unknown' msfurl = os.path.join(msf_settings.get('url'), 'workspaces', msf_workspace_num, 'tasks', task['task_id']) log(" [*] Added file to MSF Pro: %s" % msfurl) # any new nexpose vulns need to be checked against exploits table and connected log(" [*] Connecting exploits to vulns and performing do_host_status") connect_exploits() do_host_status(asset_group=asset_group) msg = " [*] Import complete: hosts: %s added, %s skipped, %s errors - vulns: %s added, %s skipped" % ( hoststats['added'], hoststats['skipped'], hoststats['errored'], vulns_added, vulns_skipped) log(msg) return msg
def process_scanfile( filename=None, asset_group=None, engineer=None, msf_settings={}, ip_ignore_list=None, ip_include_list=None, update_hosts=False, ): """ Process a Nessus XML or CSV Report file. There are two types of CSV output, the first is very basic and is generated by a single Nessus instance. The second comes from the centralized manager. I forget what it's called but it packs more data. If you have a standalone scanner, always export/save as .nessus. :param filename: A local filename to process :param asset_group: Asset group to assign hosts to :param engineer: Engineer record number to assign hosts to :param msf_workspace: If set a Metasploit workspace to send the scanfile to via the API :param ip_ignore_list: List of IP addresses to ignore :param ip_include_list: List of IP addresses to ONLY import (skip all others) :param update_hosts: Boolean to update/append to hosts, otherwise hosts are skipped :returns msg: A string status message """ from skaldship.cpe import lookup_cpe nessus_config = nessus_get_config() db = current.globalenv['db'] # build the hosts only/exclude list ip_exclude = [] if ip_ignore_list: ip_exclude = ip_ignore_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals ip_only = [] if ip_include_list: ip_only = ip_include_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals log(" [*] Processing Nessus scan file %s" % filename) fIN = open(filename, "rb") # check to see if file is a CSV file, if so set nessus_csv to True line = fIN.readline() fIN.seek(0) if line.startswith('Plugin'): import csv csv.field_size_limit(sys.maxsize) # field size must be increased nessus_iterator = csv.DictReader(fIN) nessus_csv_type = 'Standalone' log(" [*] CSV file is from Standalone scanner") elif line.startswith('"Plugin"'): import csv csv.field_size_limit(sys.maxsize) # field size must be increased nessus_iterator = csv.DictReader(fIN) nessus_csv_type = 'SecurityCenter' log(" [*] CSV file is from SecurityCenter") else: nessus_csv_type = False try: nessus_xml = etree.parse(filename) log(" [*] XML file identified") except etree.ParseError as e: msg = " [!] Invalid Nessus scan file (%s): %s " % (filename, e) log(msg, logging.ERROR) return msg root = nessus_xml.getroot() nessus_iterator = root.findall("Report/ReportHost") nessus_hosts = NessusHosts(engineer, asset_group, ip_include_list, ip_ignore_list, update_hosts) nessus_vulns = NessusVulns() services = Services() svcs = db.t_services def _plugin_parse(host_id, vuln_id, vulndata, vulnextradata): # Parse the plugin data. This is where CSV and XML diverge. port = vulnextradata['port'] proto = vulnextradata['proto'] svcname = vulnextradata['svcname'] plugin_output = vulnextradata['plugin_output'] pluginID = vulnextradata['pluginID'] # check to see if we are to ignore this plugin ID or not. if int(pluginID) in nessus_config.get('ignored_plugins'): return svc_fields = { 'f_proto': proto, 'f_number': port, 'f_name': svcname, 'f_hosts_id': host_id } svc_rec = services.get_record(**svc_fields) # Nessus only guesses the services (and appends a ? at the end) splited = svc_fields['f_name'].split("?") if svc_rec is not None: if splited[0] != svc_rec.f_name and svc_rec.f_name not in splited[ 0]: svc_fields['f_name'] = "%s | %s" % (svc_rec.f_name, splited[0]) svc_id = svcs.update_or_insert(_key=svc_rec.id, **svc_fields) else: svc_fields['f_name'] = splited[0] svc_rec = services.get_record(create_or_update=True, **svc_fields) # create t_service_vulns entry for this pluginID svc_vuln = { 'f_services_id': svc_rec.id, 'f_vulndata_id': vuln_id, 'f_proof': plugin_output } # you may be a vulnerability if... if vulnextradata['exploit_available'] == 'true': # if you have exploits available you may be an extra special vulnerability svc_vuln['f_status'] = 'vulnerable-exploited' elif svcname == 'general': # if general service then you may not be a vulnerability svc_vuln['f_status'] = 'general' elif vulndata['f_severity'] == 0: # if there is no severity then you may not be a vulnerability svc_vuln['f_status'] = 'general' else: # you're a vulnerability svc_vuln['f_status'] = 'vulnerable' db.t_service_vulns.update_or_insert(**svc_vuln) ###################################################################################################### ## Let the parsing of Nessus Plugin Output commence! ## ## Many Plugins provide useful data in plugin_output. We'll go through the list here and extract ## out the good bits and add them to Kvasir's database. Some Plugins will not be added as vulnerabilities ## because they're truly informational. This list will change if somebody keeps it up to date. ## ## TODO: This should be moved into a separate function so we can also process csv data ## TODO: Add t_service_info key/value records (standardize on Nexpose-like keys?) ## ###################################################################################################### d = {} nessus_vulns.stats['added'] += 1 #### SNMP if pluginID == '10264': # snmp community strings for snmp in re.findall(' - (.*)', plugin_output): db.t_snmp.update_or_insert(f_hosts_id=host_id, f_community=snmp) db.commit() #### SMB/NetBIOS if pluginID in ['10860', '10399']: # SMB Use Host SID (10860) or Domain SID (10399) to enumerate users for user in re.findall(' - (.*)', plugin_output): username = user[0:user.find('(') - 1] try: gid = re.findall('\(id (\d+)', user)[0] except: gid = '0' # Windows users, local groups, and global groups d['f_username'] = username d['f_gid'] = gid d['f_services_id'] = svc_rec.id d['f_source'] = '10860' db.t_accounts.update_or_insert(**d) db.commit() if pluginID == '17651': # Microsoft Windows SMB : Obtains the Password Policy d['f_hosts_id'] = host_id try: d['f_lockout_duration'] = re.findall( 'Locked account time \(s\): (\d+)', plugin_output)[0] d['f_lockout_limit'] = re.findall( 'Number of invalid logon before locked out \(s\): (\d+)', plugin_output)[0] except IndexError: d['f_lockout_duration'] = 1800 d['f_lockout_limit'] = 0 db.t_netbios.update_or_insert(**d) db.commit() if pluginID == '10395': # Microsoft Windows SMB Shares Enumeration d['f_hosts_id'] = host_id d['f_shares'] = re.findall(' - (.*)', plugin_output) db.t_netbios.update_or_insert(**d) if pluginID == '10150': # Windows NetBIOS / SMB Remote Host Information Disclosure try: d['f_hosts_id'] = host_id d['f_domain'] = re.findall('(\w+).*= Workgroup / Domain name', plugin_output)[0] db.t_netbios.update_or_insert(**d) except IndexError: pass #### FTP if pluginID == '10092': # FTP Server Detection RE_10092 = re.compile('The remote FTP banner is :\n\n(.*)', re.DOTALL) try: d['f_banner'] = RE_10092.findall(plugin_output)[0] svc_rec.update(**d) db.commit() db(db.t_service_info) db.t_service_info.update_or_insert(f_services_id=svc_rec.id, f_name='ftp.banner', f_text=d['f_banner']) db.commit() except Exception as e: log("Error parsing FTP banner (id 10092): %s" % str(e), logging.ERROR) #### SSH if pluginID == '10267': # SSH Server Type and Version Information try: ssh_banner, ssh_supported_auth = re.findall( 'SSH version : (.*)\nSSH supported authentication : (.*)', plugin_output)[0] d['f_banner'] = ssh_banner svc_rec.update_record(**d) db.commit() db.t_service_info.update_or_insert(f_services_id=svc_rec.id, f_name='ssh.banner', f_text=d['f_banner']) db.t_service_info.update_or_insert(f_services_id=svc_rec.id, f_name='ssh.authentication', f_text=ssh_supported_auth) db.commit() except Exception as e: log("Error parsing SSH banner (id 10267): %s" % str(e), logging.ERROR) if pluginID == '10881': # SSH Protocol Versions Supported try: ssh_versions = re.findall(' - (.*)', plugin_output) db.t_service_info.update_or_insert( f_services_id=svc_rec.id, f_name='ssh.versions', f_text=', '.join(ssh_versions)) db.commit() except Exception as e: log("Error parsing SSH versions (id 10881): %s" % str(e), logging.ERROR) try: fingerprint = re.findall('SSHv2 host key fingerprint : (.*)', plugin_output) db.t_service_info.update_or_insert(f_services_id=svc_rec.id, f_name='ssh.fingerprint', f_text=fingerprint[0]) db.commit() except Exception as e: log("Error parsing SSH fingerprint (id 10881): %s" % str(e), logging.ERROR) ### Telnet if pluginID in ['10281', '42263']: # Telnet banner try: snip_start = plugin_output.find( 'snip ------------------------------\n') snip_end = plugin_output.rfind( 'snip ------------------------------\n') if snip_start > 0 and snip_end > snip_start: d['f_banner'] = plugin_output[snip_start + 36:snip_end - 36] svc_rec.update(**d) db.commit() else: log( "Error finding Telnet banner: (st: %s, end: %s, banner: %s)" % (snip_start, snip_end, plugin_output), logging.ERROR) except Exception as e: log("Error parsing Telnet banner: %s" % str(e), logging.ERROR) ### HTTP Server Info if pluginID == '10107': # HTTP Server Type and Version RE_10107 = re.compile('The remote web server type is :\n\n(.*)', re.DOTALL) try: d['f_banner'] = RE_10107.findall(plugin_output)[0] svc_rec.update(**d) db.commit() db.t_service_info.update_or_insert(f_services_id=svc_rec.id, f_name='http.banner.server', f_text=d['f_banner']) db.commit() except Exception as e: log("Error parsing HTTP banner (id 10107): %s" % str(e), logging.ERROR) ### Operating Systems and CPE if pluginID == '45590': # Common Platform Enumeration (CPE) for cpe_os in re.findall('(cpe:/o:.*)', plugin_output): os_id = lookup_cpe(cpe_os.replace('cpe:/o:', '').rstrip(' ')) if os_id: db.t_host_os_refs.update_or_insert( f_certainty='0.90', # just a stab f_family='Unknown', # not given in Nessus f_class=hostdata.get('system-type'), f_hosts_id=host_id, f_os_id=os_id) db.commit() for host in nessus_iterator: if not nessus_csv_type: (host_id, hostdata, hostextradata) = nessus_hosts.parse(host.find('HostProperties')) else: (host_id, hostdata, hostextradata) = nessus_hosts.parse(host) if not host_id: # no host_id returned, it was either skipped or errored out continue if not nessus_csv_type: # Parse the XML <ReportItem> sections where plugins, ports and output are all in for rpt_item in host.iterfind('ReportItem'): (vuln_id, vulndata, extradata) = nessus_vulns.parse(rpt_item) if not vuln_id: # no vulnerability id continue _plugin_parse(host_id, vuln_id, vulndata, extradata) else: (vuln_id, vulndata, extradata) = nessus_vulns.parse(host) _plugin_parse(host_id, vuln_id, vulndata, extradata) if msf_settings.get('workspace'): try: # check to see if we have a Metasploit RPC instance configured and talking from MetasploitProAPI import MetasploitProAPI msf_api = MetasploitProAPI(host=msf_settings.get('url'), apikey=msf_settings.get('key')) working_msf_api = msf_api.login() except Exception as error: log(" [!] Unable to authenticate to MSF API: %s" % str(error), logging.ERROR) working_msf_api = False try: scan_data = open(filename, "r+").readlines() except Exception as error: log( " [!] Error loading scan data to send to Metasploit: %s" % str(error), logging.ERROR) scan_data = None if scan_data and working_msf_api: task = msf_api.pro_import_data( msf_settings.get('workspace'), "".join(scan_data), { #'preserve_hosts': form.vars.preserve_hosts, 'blacklist_hosts': "\n".join(ip_ignore_list) }, ) msf_workspace_num = session.msf_workspace_num or 'unknown' msfurl = os.path.join(msf_settings.get('url'), 'workspaces', msf_workspace_num, 'tasks', task['task_id']) log(" [*] Added file to MSF Pro: %s" % msfurl) # any new Nessus vulns need to be checked against exploits table and connected log(" [*] Connecting exploits to vulns and performing do_host_status") connect_exploits() do_host_status(asset_group=asset_group) msg = (' [*] Import complete: hosts: %s added, %s updated, %s skipped ' '- %s vulns processed, %s added' % (nessus_hosts.stats['added'], nessus_hosts.stats['updated'], nessus_hosts.stats['skipped'], nessus_vulns.stats['processed'], nessus_vulns.stats['added'])) log(msg) return msg
def process_report_xml( filename=None, ip_ignore_list=None, ip_include_list=None, engineer=1, asset_group="Metasploit Import", update_hosts=True, ): """ Processes a Metasploit XML Export for the following data and adds to the db: - Hosts and services - Credentials Generate the XML report by using db_export -t xml filename.xml or through WebUI TODO: Auto-exploits successful exploit attempts if matching CVE/VulnDB entry found """ from gluon.validators import IS_IPADDRESS from skaldship.passwords.utils import lookup_hash from skaldship.hosts import get_host_record, get_or_create_record from skaldship.services import Services services = Services() db = current.globalenv['db'] #cache = current.globalenv['cache'] try: from lxml import etree except ImportError: try: import xml.etree.cElementTree as etree except ImportError: try: import xml.etree.ElementTree as etree except: raise Exception("Unable to find valid ElementTree module.") # build the hosts only/exclude list ip_exclude = [] if ip_ignore_list: ip_exclude = ip_ignore_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals ip_only = [] if ip_include_list: ip_only = ip_include_list # TODO: check for ip subnet/range and break it out to individuals log(" [*] Processing Metasploit Pro report file: %s" % (filename)) try: xml = etree.parse(filename) except etree.ParseError as e: raise Exception(" [!] Invalid XML file (%s): %s " % (filename, e)) root = xml.getroot() # parse the hosts now hosts = root.findall("hosts/host") log(" [-] Parsing %d hosts" % (len(hosts))) stats = {} stats['hosts_added'] = 0 stats['hosts_skipped'] = 0 stats['hosts_updated'] = 0 stats['services_added'] = 0 stats['services_updated'] = 0 stats['accounts_added'] = 0 stats['accounts_updated'] = 0 for host in hosts: didwhat = "Unknown" if host.findtext('state') != "alive": stats['hosts_skipped'] += 1 continue hostfields = {} ipaddr = host.findtext('address') if len(ip_only) > 0 and ipaddr not in ip_only: log(" [-] Node is not in the only list... skipping") stats['hosts_skipped'] += 1 continue if IS_IPADDRESS()(ipaddr)[1] is not None: logger.error("Invalid IP Address in report: %s" % ipaddr) log(" [!] Invalid IP Address in report: %s" % ipaddr) continue macaddr = host.findtext('mac') if macaddr: hostfields['f_macaddr'] = macaddr hostname = host.findtext('name') if hostname: hostfields['f_hostname'] = hostname # check to see if IP exists in DB already hostfields['f_asset_group'] = asset_group hostfields['f_engineer'] = engineer if update_hosts: # update or add, doesn't matter which host_rec = get_or_create_record(ipaddr, **hostfields) stats['hosts_added'] += 1 else: # weird logic.. get a host record, if it doesn't exist create it otherwise skip because update_hosts=False host_rec = get_host_record(ipaddr) if not host_rec: host_rec = get_or_create_record(ipaddr, **hostfields) stats['hosts_added'] += 1 log(" [-] Adding IP: %s" % (ipaddr)) else: stats['hosts_skipped'] += 1 log(" [-] Skipped IP: %s" % (ipaddr)) continue # add the <info> and <comments> as a note to the host info_note = host.findtext('info') or None if info_note and info_note.startswith('Domain controller for '): db.t_netbios.update_or_insert(f_hosts_id=host_rec.id, f_type="PDC", f_domain=info_note[22:].upper()) elif info_note: db.t_host_notes.update_or_insert( f_hosts_id=host_rec.id, f_note=info_note, ) db.commit() for comment in host.findall('comments/comment'): db.t_host_notes.update_or_insert( f_hosts_id=host_rec.id, f_note=comment.text, ) # process the services, adding any new for svc in host.findall('services/service'): svc_fields = { 'f_number': svc.findtext('port'), 'f_proto': svc.findtext('proto'), 'f_status': svc.findtext('state'), 'f_name': svc.findtext('name') or '', 'f_banner': svc.findtext('info') or '', 'f_hosts_id': host_rec.id, } if svc_fields['f_name'] in ['http', 'https']: svc_fields['f_name'] = svc_fields['f_name'].upper() svc_rec = services.get_record(create_or_update=True, **svc_fields) for cred in host.findall('creds/cred'): # handle credential data f_password = None f_compromised = False cred_type = cred.findtext('ptype') if cred_type == "smb_hash": # add smb hashes to info/0 service svc_fields = { 'f_number': '0', 'f_proto': 'info', 'f_hosts_id': host_rec.id, } svc_rec = services.get_record(create_or_update=True, **svc_fields) pwhash = cred.findtext('pass') f_password = lookup_hash(pwhash) (lm, nt) = pwhash.split(':') user = cred.findtext('user') query = (db.t_accounts.f_services_id == svc_rec.id) & ( db.t_accounts.f_username.upper() == user.upper()) acct_row = db(query).select().first() if acct_row: # we have an account already, lets see if the hashes are in there h1 = acct_row.f_hash1 if isinstance(h1, str): if acct_row.f_hash1.upper() != lm.upper(): acct_row.f_hash1 = lm.upper() acct_row.f_hash1_type = "LM" acct_row.f_hash2 = nt.upper() acct_row.f_hash2_type = "NT" if f_password: acct_row.f_compromised = True acct_row.f_password = f_password if not acct_row.f_source: acct_row.f_source = "Metasploit Import" acct_row.update_record() db.commit() stats['accounts_updated'] += 1 didwhat = "Updated" else: # add a new account record if f_password: f_compromised = True else: f_compromised = False acct_data = dict(f_services_id=svc_rec.id, f_username=user, f_password=f_password, f_compromised=f_compromised, f_hash1=lm.upper(), f_hash1_type='LM', f_hash2=nt.upper(), f_hash2_type='NT', f_source="Metasploit Import") acct_id = db.t_accounts.insert(**acct_data) db.commit() stats['accounts_added'] += 1 didwhat = "Added" elif cred_type == 'smb_challenge': # add smb challenge hashes to info/0 service svc_fields = { 'f_number': '0', 'f_proto': 'info', 'f_hosts_id': host_rec.id, } svc_rec = services.get_record(create_or_update=True, **svc_fields) user = cred.findtext('user') query = (db.t_accounts.f_services_id == svc_rec.id) & ( db.t_accounts.f_username.upper() == user.upper()) acct_row = db(query).select().first() if acct_row: # we have an account already, lets see if the hashes are in there h1 = acct_row.f_hash1 if isinstance(h1, str): if acct_row.f_hash1.upper() != lm.upper(): acct_row.f_password = f_password acct_row.f_hash1 = pwhash.upper() acct_row.f_hash1_type = 'NTCHALLENGE' acct_row.f_domain = cred.findtext('proof') if not acct_row.f_source: acct_row.f_source = "Metasploit Capture" acct_row.update_record() db.commit() stats['accounts_updated'] += 1 didwhat = "Updated" else: # new account record f_password = lookup_hash(pwhash) if f_password: f_compromised = True else: f_compromised = False acct_data = dict(f_services_id=svc_rec.id, f_username=user, f_password=f_password, f_compromised=f_compromised, f_hash1=pwhash.upper(), f_hash1_type='NTCHALLENGE', f_source="Metasploit Capture") acct_id = db.t_accounts.insert(**acct_data) db.commit() stats['accounts_added'] += 1 didwhat = "Added" elif cred_type == 'rakp_hmac_sha1_hash': # IPMI 2.0 RAKP Remote SHA1 Hashes f_hash1 = cred.findtext('pass') f_hash1_type = cred.findtext('ptype') user = cred.findtext('user') svcname = cred.findtext('sname') query = (db.t_accounts.f_services_id == svc_rec.id) & ( db.t_accounts.f_username.upper() == user.upper()) acct_row = db(query).select().first() f_source = "Metasploit Import" if acct_row: # we have an account already, lets see if the hashes are in there if acct_row.f_hash1 != f_hash1: acct_row.f_hash1 = f_hash1 acct_row.f_hash1_type = f_hash1_type if not acct_row.f_source: acct_row.f_source = f_source acct_row.update_record() db.commit() stats['accounts_updated'] += 1 didwhat = "Updated" else: # new account record acct_data = dict(f_services_id=svc_rec.id, f_username=user, f_hash1=f_hash1, f_hash1_type=f_hash1_type, f_source=f_source, f_compromised=True) acct_id = db.t_accounts.insert(**acct_data) db.commit() stats['accounts_added'] += 1 didwhat = "Added" else: # for cred_type == 'password' or 'exploit': # add regular password if svc_fields['f_number'] == '445': svc_fields['f_proto'] = 'info' svc_fields['f_number'] = '0' svc_rec = services.get_record(create_or_update=True, **svc_fields) f_password = cred.findtext('pass') if f_password == "*BLANK PASSWORD*": f_password = '' user = cred.findtext('user') svcname = cred.findtext('sname') # do some case mangling for known variations we want in all upper case if svcname == "vnc": user = "******" query = (db.t_accounts.f_services_id == svc_rec.id) & ( db.t_accounts.f_username.upper() == user.upper()) acct_row = db(query).select().first() f_source = cred.findtext('type') if f_source == 'captured': f_source = "Metasploit Capture" else: f_source = "Metasploit Import" if acct_row: # we have an account already, lets see if the hashes are in there if acct_row.f_password != f_password: acct_row.f_password = f_password acct_row.f_compromised = True if not acct_row.f_source: acct_row.f_source = f_source acct_row.update_record() db.commit() stats['accounts_updated'] += 1 didwhat = "Updated" else: # new account record acct_data = dict(f_services_id=svc_rec.id, f_username=user, f_password=f_password, f_source=f_source, f_compromised=True) acct_id = db.t_accounts.insert(**acct_data) db.commit() stats['accounts_added'] += 1 didwhat = "Added" log(" [-] Account %s: (%s) %s" % (didwhat, ipaddr, user)) do_host_status() msg = " [*] Import complete: hosts: (%s/A, %s/U, %s/S) - services: (%s/A, %s/U), creds: (%s/A, %s/U)"\ % ( stats['hosts_added'], stats['hosts_updated'], stats['hosts_skipped'], stats['services_added'], stats['services_updated'], stats['accounts_added'], stats['accounts_updated'] ) log(msg) return msg
def process_report( filename=None, host_list=[], query=None, ip_ignore_list=None, ip_include_list=None, engineer=1, asset_group="ShodanHQ Import", ): """ Processes a ShodanHQ XML Report adding records to the db """ settings = current.globalenv['settings'] #try: # from shodan import WebAPI # from shodan.api import WebAPIError # webapi = WebAPI(settings.shodanhq_apikey) #except ImportError: # webapi = None sd = ShodanData() sd.engineer = engineer sd.asset_group = asset_group # build the hosts only/exclude list if ip_ignore_list: sd.ip_exclude = ip_ignore_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals if ip_include_list: sd.ip_only = ip_include_list.split('\r\n') # TODO: check for ip subnet/range and break it out to individuals hosts = [] if filename: log(" [*] Processing ShodanHQ report file: %s" % (filename)) try: from lxml import etree except ImportError: try: import xml.etree.cElementTree as etree except ImportError: try: import xml.etree.ElementTree as etree except: raise Exception("Unable to find valid ElementTree module.") try: xml = etree.parse(filename) except etree.ParseError as e: raise Exception(" [!] Invalid XML file (%s): %s " % (filename, e)) return root = xml.getroot() hosts = root.findall("host") """ elif host_list and webapi: if not isinstance(host_list, list): host_list = [host_list] log(" [!] Searching for %s hosts from ShodanHQ" % (len(host_list)), level=logging.DEBUG) for h in host_list: try: host_result = webapi.host(h) if host_result.get('ip'): hosts.append(host_result) except WebAPIError, e: log(" [!] (%s) ShodanHQ error response: %s" % (h, str(e)), level=logging.ERROR) except Exception, e: log(" [!] (%s) No response from ShodanHQ: %s" % (h, str(e)), level=logging.ERROR) elif query and webapi: log(" [!] Sending ShodanHQ WebAPI query: %s" % (str(query)), level=logging.DEBUG) try: query_result = webapi.search(query[0], limit=query[1]) if query_result.get('total') > 0: hosts.append(query_result.get('matches')) except WebAPIError, e: log(" [!] (%s) ShodanHQ error response: %s" % (query, str(e)), level=logging.ERROR) except Exception, e: log(" [!] (%s) No response from ShodanHQ: %s" % (query, str(e)), level=logging.ERROR) """ log(" [-] Parsing %d hosts" % (len(hosts))) for host in hosts: sd.parse_host(host) do_host_status() msg = " [*] Import complete: hosts: (%s/A, %s/U, %s/S) - services: (%s/A, %s/U)"\ % ( sd.stats['hosts_added'], sd.stats['hosts_updated'], sd.stats['hosts_skipped'], sd.stats['services_added'], sd.stats['services_updated'], ) log(msg) return msg