def _disp_recs_tailf(flt, field): # 1. init firstrecs = list(db.passive.get( flt, sort=[(field, -1)], limit=10)) firstrecs.reverse() # in case we don't have (yet) records matching our criteria r = {'firstseen': 0, 'lastseen': 0} for r in firstrecs: if 'addr' in r: print(utils.force_int2ip(r['addr']), end=' ') else: print(r['targetval'], end=' ') disp_rec(r) sys.stdout.flush() # 2. loop try: while True: prevtime = r[field] time.sleep(1) for r in db.passive.get( db.passive.flt_and( baseflt, db.passive.searchnewer(prevtime, new=field == 'lastseen'), ), sort=[(field, 1)]): if 'addr' in r: print(utils.force_int2ip(r['addr']), end=' ') else: print(r['targetval'], end=' ') disp_rec(r) sys.stdout.flush() except KeyboardInterrupt: pass
def _disp_recs_tailf(flt, field): # 1. init firstrecs = list(db.passive.get(flt, sort=[(field, -1)], limit=10)) firstrecs.reverse() # in case we don't have (yet) records matching our criteria r = {'firstseen': 0, 'lastseen': 0} for r in firstrecs: if 'addr' in r: print(utils.force_int2ip(r['addr']), end=' ') else: print(r['targetval'], end=' ') disp_rec(r) sys.stdout.flush() # 2. loop try: while True: prevtime = r[field] time.sleep(1) for r in db.passive.get(db.passive.flt_and( baseflt, db.passive.searchnewer(prevtime, new=field == 'firstseen'), ), sort=[(field, 1)]): if 'addr' in r: print(utils.force_int2ip(r['addr']), end=' ') else: print(r['targetval'], end=' ') disp_rec(r) sys.stdout.flush() except KeyboardInterrupt: pass
def _disp_recs_tailf(dbase: DBPassive, flt: Filter, field: str) -> None: # 1. init firstrecs = list(dbase.get(flt, sort=[(field, -1)], limit=10)) firstrecs.reverse() # in case we don't have (yet) records matching our criteria r = {"firstseen": 0, "lastseen": 0} for r in firstrecs: if "addr" in r: print(utils.force_int2ip(r["addr"]), end=" ") else: print(r["targetval"], end=" ") disp_rec(r) sys.stdout.flush() # 2. loop try: while True: prevtime = r[field] time.sleep(1) for r in dbase.get( dbase.flt_and( flt, dbase.searchnewer(prevtime, new=field == "firstseen"), ), sort=[(field, 1)], ): if "addr" in r: print(utils.force_int2ip(r["addr"]), end=" ") else: print(r["targetval"], end=" ") disp_rec(r) sys.stdout.flush() except KeyboardInterrupt: pass
def disp_rec(r: Record) -> None: firstseen = r["firstseen"] lastseen = r["lastseen"] if "addr" in r and r["addr"]: if r["source"].startswith("PTR-"): print("%s PTR %s (%s, %s time%s, %s - %s)" % ( utils.force_int2ip(r["addr"]), r["value"], r["source"][4:], r["count"], r["count"] > 1 and "s" or "", firstseen, lastseen, )) elif r["source"].startswith("A-") or r["source"].startswith("AAAA-"): print("%s %s %s (%s, %s time%s, %s - %s)" % ( r["value"], r["source"].split("-", 1)[0], utils.force_int2ip(r["addr"]), ":".join(r["source"].split("-")[1:]), r["count"], r["count"] > 1 and "s" or "", firstseen, lastseen, )) else: utils.LOGGER.warning("Cannot display record %r", r) else: if r["source"].split("-")[0] in ["CNAME", "NS", "MX"]: print("%s %s %s (%s, %s time%s, %s - %s)" % ( r["value"], r["source"].split("-", 1)[0], r["targetval"], ":".join(r["source"].split("-")[1:]), r["count"], r["count"] > 1 and "s" or "", firstseen, lastseen, )) # Case of a A or AAAA request with no answer (i.e., no addr in r) elif r["source"].startswith("A-") or r["source"].startswith("AAAA-"): print("%s %s %s (%s, %s time%s, %s - %s)" % ( r["value"], r["source"].split("-", 1)[0], None, ":".join(r["source"].split("-")[1:]), r["count"], r["count"] > 1 and "s" or "", firstseen, lastseen, )) else: utils.LOGGER.warning("Cannot display record %r", r)
def disp_rec(r): firstseen = r['firstseen'] lastseen = r['lastseen'] if 'addr' in r and r['addr']: if r['source'].startswith('PTR-'): print('%s PTR %s (%s, %s time%s, %s - %s)' % ( utils.force_int2ip(r['addr']), r['value'], r['source'][4:], r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) elif r['source'].startswith('A-') or r['source'].startswith('AAAA-'): print('%s %s %s (%s, %s time%s, %s - %s)' % ( r['value'], r['source'].split('-', 1)[0], utils.force_int2ip(r['addr']), ':'.join(r['source'].split('-')[1:]), r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) else: utils.LOGGER.warning("Cannot display record %r", r) else: if r['source'].split('-')[0] in ['CNAME', 'NS', 'MX']: print('%s %s %s (%s, %s time%s, %s - %s)' % ( r['value'], r['source'].split('-', 1)[0], r['targetval'], ':'.join(r['source'].split('-')[1:]), r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) # Case of a A or AAAA request with no answer (i.e., no addr in r) elif r['source'].startswith('A-') or r['source'].startswith('AAAA-'): print('%s %s %s (%s, %s time%s, %s - %s)' % ( r['value'], r['source'].split('-', 1)[0], None, ':'.join(r['source'].split('-')[1:]), r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) else: utils.LOGGER.warning("Cannot display record %r", r)
def _insert_or_update(self, timestamp, values, lastseen=None): stmt = insert(self.tables.passive)\ .values(dict(values, addr=utils.force_int2ip(values['addr']))) try: self.db.execute(stmt) except IntegrityError: whereclause = and_( self.tables.passive.addr == values['addr'], self.tables.passive.sensor == values['sensor'], self.tables.passive.recontype == values['recontype'], self.tables.passive.source == values['source'], self.tables.passive.value == values['value'], self.tables.passive.targetval == values['targetval'], self.tables.passive.info == values['info'], self.tables.passive.port == values['port'] ) upsert = { 'firstseen': func.least( self.tables.passive.firstseen, timestamp, ), 'lastseen': func.greatest( self.tables.passive.lastseen, lastseen or timestamp, ), 'count': self.tables.passive.count + values['count'], } updt = update( self.tables.passive ).where(whereclause).values(upsert) self.db.execute(updt)
def getkeys(self, host): for script in self.getscripts(host): yield Key(utils.force_int2ip(host['addr']), script["port"], "ssl", script["script"][self.scriptid]['pubkey']['type'], script["script"][self.scriptid]['pubkey']['bits'], self.pem2key(script["script"][self.scriptid]['pem']), utils.decode_hex(script["script"][self.scriptid]['md5']))
def main(): parser = argparse.ArgumentParser( description='Tool for ip addresses manipulation.', ) parser.add_argument( 'ips', nargs='*', help='Display results for specified IP addresses or ranges.', ) args = parser.parse_args() while '-' in args.ips: idx = args.ips.index('-') args.ips = ( args.ips[:idx - 1] + ['%s-%s' % (args.ips[idx - 1], args.ips[idx + 1])] + args.ips[idx + 2:] ) for a in args.ips: if '/' in a: a = utils.net2range(a) print("%s-%s" % (a[0], a[1])) elif '-' in a: a = a.split('-', 1) if a[0].isdigit(): a[0] = int(a[0]) if a[1].isdigit(): a[1] = int(a[1]) for n in utils.range2nets((a[0], a[1])): print(n) else: if a.isdigit(): a = utils.force_int2ip(int(a)) else: a = utils.force_ip2int(a) print(a)
def _insert_or_update(self, timestamp, values, lastseen=None): stmt = insert(self.tables.passive)\ .values(dict(values, addr=utils.force_int2ip(values['addr']))) try: self.db.execute(stmt) except IntegrityError: whereclause = and_( self.tables.passive.addr == values['addr'], self.tables.passive.sensor == values['sensor'], self.tables.passive.recontype == values['recontype'], self.tables.passive.source == values['source'], self.tables.passive.value == values['value'], self.tables.passive.targetval == values['targetval'], self.tables.passive.info == values['info'], self.tables.passive.port == values['port']) upsert = { 'firstseen': func.least( self.tables.passive.firstseen, timestamp, ), 'lastseen': func.greatest( self.tables.passive.lastseen, lastseen or timestamp, ), 'count': self.tables.passive.count + values['count'], } updt = update( self.tables.passive).where(whereclause).values(upsert) self.db.execute(updt)
def main() -> None: parser = argparse.ArgumentParser( description="Tool for ip addresses manipulation.", ) parser.add_argument( "ips", nargs="*", help="Display results for specified IP addresses or ranges.", ) args = parser.parse_args() while "-" in args.ips: idx = args.ips.index("-") args.ips = (args.ips[:idx - 1] + ["%s-%s" % (args.ips[idx - 1], args.ips[idx + 1])] + args.ips[idx + 2:]) for a in args.ips: if "/" in a: a = utils.net2range(a) print("%s-%s" % (a[0], a[1])) elif "-" in a: a = a.split("-", 1) if a[0].isdigit(): a[0] = int(a[0]) if a[1].isdigit(): a[1] = int(a[1]) for n in utils.range2nets((a[0], a[1])): print(n) else: if a.isdigit(): a = utils.force_int2ip(int(a)) else: a = utils.force_ip2int(a) print(a)
def getkeys(self, record): yield Key( utils.force_int2ip(record['addr']), record["port"], "ssh", record['infos']['algo'][4:], record['infos']['bits'], RSA.construct((long(record['infos']['modulus']), long(record['infos']['exponent']))), utils.decode_hex(record['infos']['md5hash']))
def _disp_recs_tail(flt, field, nbr): recs = list(db.passive.get(flt, sort=[(field, -1)], limit=nbr)) recs.reverse() for r in recs: if 'addr' in r: print(utils.force_int2ip(r['addr']), end=' ') else: print(r['targetval'], end=' ') disp_rec(r)
def _disp_recs_tail(flt: Filter, field: str, nbr: Optional[int]) -> None: recs = list(db.passive.get(flt, sort=[(field, -1)], limit=nbr)) recs.reverse() for r in recs: if "addr" in r: print(utils.force_int2ip(r["addr"]), end=" ") else: print(r["targetval"], end=" ") disp_rec(r)
def disp_rec(r): firstseen = r['firstseen'] if not isinstance(firstseen, datetime): firstseen = datetime.fromtimestamp(firstseen) lastseen = r['lastseen'] if not isinstance(lastseen, datetime): lastseen = datetime.fromtimestamp(lastseen) if 'addr' in r and r['addr']: if r['source'].startswith('PTR-'): print('%s PTR %s (%s, %s time%s, %s - %s)' % ( utils.force_int2ip(r['addr']), r['value'], r['source'][4:], r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) elif r['source'].startswith('A-'): print('%s A %s (%s, %s time%s, %s - %s)' % ( r['value'], utils.force_int2ip(r['addr']), r['source'][2:], r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) else: utils.LOGGER.warning("Cannot display record %r", r) else: if r['source'].split('-')[0] in ['CNAME', 'NS', 'MX']: print('%s %s %s (%s, %s time%s, %s - %s)' % ( r['value'], r['source'].split('-')[0], r['targetval'], ':'.join(r['source'].split('-')[1:]), r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) else: utils.LOGGER.warning("Cannot display record %r", r)
def _disp_recs_tail(flt, field, nbr): recs = list(db.passive.get( flt, sort=[(field, -1)], limit=nbr)) recs.reverse() for r in recs: if 'addr' in r: print(utils.force_int2ip(r['addr']), end=' ') else: print(r['targetval'], end=' ') disp_rec(r)
def getkeys(self, record): certtext = self._pem2key(record['fullvalue'] if 'fullvalue' in record else record['value']) if certtext is None: return yield Key( utils.force_int2ip(record['addr']), record["port"], "ssl", certtext['type'], int(certtext['len']), RSA.construct( (long(self.modulus_badchars.sub(b"", certtext['modulus']), 16), long(certtext['exponent']))), utils.decode_hex(record['infos']['md5hash']))
def disp_rec(r): firstseen = r['firstseen'] lastseen = r['lastseen'] if 'addr' in r and r['addr']: if r['source'].startswith('PTR-'): print('%s PTR %s (%s, %s time%s, %s - %s)' % ( utils.force_int2ip(r['addr']), r['value'], r['source'][4:], r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) elif r['source'].startswith('A-'): print('%s A %s (%s, %s time%s, %s - %s)' % ( r['value'], utils.force_int2ip(r['addr']), r['source'][2:], r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) else: utils.LOGGER.warning("Cannot display record %r", r) else: if r['source'].split('-')[0] in ['CNAME', 'NS', 'MX']: print('%s %s %s (%s, %s time%s, %s - %s)' % ( r['value'], r['source'].split('-')[0], r['targetval'], ':'.join(r['source'].split('-')[1:]), r['count'], r['count'] > 1 and 's' or '', firstseen, lastseen, )) else: utils.LOGGER.warning("Cannot display record %r", r)
def getkeys(self, host): for script in self.getscripts(host): for key in script['script'][self.scriptid]: if key['type'][4:] == self.keytype: data = utils.decode_b64(key['key'].encode()) # Handle bug (in Nmap?) where data gets encoded # twice. if data[:1] != b'\x00': data = utils.decode_b64(data) yield Key( utils.force_int2ip(host['addr']), script["port"], "ssh", key['type'][4:], int(float(key['bits'])), # for some reason, # Nmap sometimes # outputs 1024.0 self.data2key(data), utils.decode_hex(key['fingerprint']), )
def disp_recs_std( dbase: DBPassive, flt: Filter, sort: Sort, limit: Optional[int], skip: Optional[int] ) -> None: old_addr = None sort = sort or [("addr", 1), ("port", 1), ("recontype", 1), ("source", 1)] for rec in dbase.get(flt, sort=sort, limit=limit, skip=skip): if "addr" not in rec or not rec["addr"]: continue if old_addr != rec["addr"]: if old_addr is not None: print() old_addr = rec["addr"] print(utils.force_int2ip(old_addr)) ipinfo = db.data.infos_byip(old_addr) if ipinfo: if "address_type" in ipinfo: print("\t", end=" ") print(ipinfo["address_type"], end=" ") print() if "country_code" in ipinfo: print("\t", end=" ") print(ipinfo["country_code"], end=" ") if "country_name" in ipinfo: cname = ipinfo["country_name"] else: try: cname = db.data.country_name_by_code(ipinfo["country_code"]) except AttributeError: cname = None if cname: print("[%s]" % cname, end=" ") print() if "as_num" in ipinfo: print("\t", end=" ") print("AS%d" % ipinfo["as_num"], end=" ") if "as_name" in ipinfo: print("[%s]" % ipinfo["as_name"], end=" ") print() elif "as_name" in ipinfo: print("\t", end=" ") print("AS????? [%s]" % ipinfo["as_name"], end=" ") print() disp_rec(rec)
def disp_recs_std(flt, sort, limit, skip): old_addr = None sort = sort or [('addr', 1), ('port', 1), ('recontype', 1), ('source', 1)] for rec in db.passive.get(flt, sort=sort, limit=limit, skip=skip): if 'addr' not in rec or not rec['addr']: continue if old_addr != rec['addr']: if old_addr is not None: print() old_addr = rec['addr'] print(utils.force_int2ip(old_addr)) ipinfo = db.data.infos_byip(old_addr) if ipinfo: if 'address_type' in ipinfo: print('\t', end=' ') print(ipinfo['address_type'], end=' ') print() if 'country_code' in ipinfo: print('\t', end=' ') print(ipinfo['country_code'], end=' ') if 'country_name' in ipinfo: cname = ipinfo['country_name'] else: try: cname = db.data.country_name_by_code( ipinfo['country_code'] ) except AttributeError: cname = None if cname: print('[%s]' % cname, end=' ') print() if 'as_num' in ipinfo: print('\t', end=' ') print('AS%d' % ipinfo['as_num'], end=' ') if 'as_name' in ipinfo: print('[%s]' % ipinfo['as_name'], end=' ') print() elif 'as_name' in ipinfo: print('\t', end=' ') print('AS????? [%s]' % ipinfo['as_name'], end=' ') print() disp_rec(rec)
def main(): if USING_ARGPARSE: parser = argparse.ArgumentParser( description='Tool for ip addresses manipulation.', ) else: parser = optparse.OptionParser( description='Tool for ip addresses manipulation.', ) parser.parse_args_orig = parser.parse_args def my_parse_args(): res = parser.parse_args_orig() res[0].ensure_value('ips', res[1]) return res[0] parser.parse_args = my_parse_args parser.add_argument = parser.add_option if USING_ARGPARSE: parser.add_argument('ips', nargs='*', help='Display results for specified IP addresses' ' or ranges.') args = parser.parse_args() for a in args.ips: if '/' in a: a = utils.net2range(a) print("%s-%s" % (a[0], a[1])) elif '-' in a: a = a.split('-', 1) if a[0].isdigit(): a[0] = int(a[0]) if a[1].isdigit(): a[1] = int(a[1]) for n in utils.range2nets((a[0], a[1])): print(n) else: if a.isdigit(): a = utils.force_int2ip(int(a)) else: a = utils.force_ip2int(a) print(a)
def disp_recs_std(flt): oa = None c = db.passive.get(flt, sort=[('addr', 1), ('recontype', 1), ('source', 1), ('port', 1)]) for rec in c: if 'addr' not in rec or not rec['addr']: continue if oa != rec['addr']: if oa is not None: print() oa = rec['addr'] print(utils.force_int2ip(oa)) c = db.data.infos_byip(oa) if c: if 'country_code' in c: print('\t', end=' ') print(c['country_code'], end=' ') if 'country_name' in c: cname = c['country_name'] else: try: cname = db.data.country_name_by_code( c['country_code']) except AttributeError: cname = None if cname: print('[%s]' % cname, end=' ') print() if 'as_num' in c: print('\t', end=' ') print('AS%d' % c['as_num'], end=' ') if 'as_name' in c: print('[%s]' % c['as_name'], end=' ') print() elif 'as_name' in c: print('\t', end=' ') print('AS???? [%s]' % c['as_name'], end=' ') disp_rec(rec)
def disp_recs_std(flt, sort, limit, skip): old_addr = None sort = sort or [('addr', 1), ('port', 1), ('recontype', 1), ('source', 1)] for rec in db.passive.get(flt, sort=sort, limit=limit, skip=skip): if 'addr' not in rec or not rec['addr']: continue if old_addr != rec['addr']: if old_addr is not None: print() old_addr = rec['addr'] print(utils.force_int2ip(old_addr)) ipinfo = db.data.infos_byip(old_addr) if ipinfo: if 'country_code' in ipinfo: print('\t', end=' ') print(ipinfo['country_code'], end=' ') if 'country_name' in ipinfo: cname = ipinfo['country_name'] else: try: cname = db.data.country_name_by_code( ipinfo['country_code'] ) except AttributeError: cname = None if cname: print('[%s]' % cname, end=' ') print() if 'as_num' in ipinfo: print('\t', end=' ') print('AS%d' % ipinfo['as_num'], end=' ') if 'as_name' in ipinfo: print('[%s]' % ipinfo['as_name'], end=' ') print() elif 'as_name' in ipinfo: print('\t', end=' ') print('AS???? [%s]' % ipinfo['as_name'], end=' ') disp_rec(rec)
def _insert_or_update(self, timestamp, values, lastseen=None, replacecount=False): stmt = insert(self.tables.passive).values( dict(values, addr=utils.force_int2ip(values["addr"]))) try: self.db.execute(stmt) except IntegrityError: whereclause = and_( self.tables.passive.addr == values["addr"], self.tables.passive.sensor == values["sensor"], self.tables.passive.recontype == values["recontype"], self.tables.passive.source == values["source"], self.tables.passive.value == values["value"], self.tables.passive.targetval == values["targetval"], self.tables.passive.info == values["info"], self.tables.passive.port == values["port"], ) upsert = { "firstseen": func.least( self.tables.passive.firstseen, timestamp, ), "lastseen": func.greatest( self.tables.passive.lastseen, lastseen or timestamp, ), "count": (values["count"] if replacecount else self.tables.passive.count + values["count"]), } updt = update( self.tables.passive).where(whereclause).values(upsert) self.db.execute(updt)
def get_nmap(subdb): """Get records from Nmap & View databases :param str subdb: database to query (must be "scans" or "view") :query str q: query (including limit/skip and sort) :query str f: filter :query str callback: callback to use for JSONP results :query bool ipsasnumbers: to get IP addresses as numbers rather than as strings :query bool datesasstrings: to get dates as strings rather than as timestamps :query str format: "json" (the default) or "ndjson" :status 200: no error :status 400: invalid referer :>jsonarr object: results """ subdb_tool = "view" if subdb == "view" else "scancli" subdb = db.view if subdb == "view" else db.nmap flt_params = get_base(subdb) # PostgreSQL: the query plan if affected by the limit and gives # really poor results. This is a temporary workaround (look for # XXX-WORKAROUND-PGSQL). # result = subdb.get(flt_params.flt, limit=flt_params.limit, # skip=flt_params.skip, sort=flt_params.sortby) result = subdb.get( flt_params.flt, skip=flt_params.skip, sort=flt_params.sortby, fields=flt_params.fields, ) if flt_params.unused: msg = "Option%s not understood: %s" % ( "s" if len(flt_params.unused) > 1 else "", ", ".join(flt_params.unused), ) if flt_params.callback is not None: yield webutils.js_alert("param-unused", "warning", msg) utils.LOGGER.warning(msg) elif flt_params.callback is not None: yield webutils.js_del_alert("param-unused") if config.DEBUG: msg1 = "filter: %r" % subdb.flt2str(flt_params.flt) msg2 = "user: %r" % webutils.get_user() utils.LOGGER.debug(msg1) utils.LOGGER.debug(msg2) if flt_params.callback is not None: yield webutils.js_alert("filter", "info", msg1) yield webutils.js_alert("user", "info", msg2) version_mismatch = {} if flt_params.callback is None: if flt_params.fmt == "json": yield "[\n" else: yield "%s([\n" % flt_params.callback # XXX-WORKAROUND-PGSQL # for rec in result: for i, rec in enumerate(result): for fld in ["_id", "scanid"]: try: del rec[fld] except KeyError: pass if flt_params.ipsasnumbers: rec["addr"] = utils.force_ip2int(rec["addr"]) if not flt_params.datesasstrings: for field in subdb.datetime_fields: _set_datetime_field(subdb, rec, field) for port in rec.get("ports", []): if "screendata" in port: port["screendata"] = utils.encode_b64(port["screendata"]) for script in port.get("scripts", []): if "masscan" in script: try: del script["masscan"]["raw"] except KeyError: pass if not flt_params.ipsasnumbers: if "traces" in rec: for trace in rec["traces"]: trace["hops"].sort(key=lambda x: x["ttl"]) for hop in trace["hops"]: hop["ipaddr"] = utils.force_int2ip(hop["ipaddr"]) addresses = rec.get("addresses", {}).get("mac") if addresses: newaddresses = [] for addr in addresses: manuf = utils.mac2manuf(addr) if manuf and manuf[0]: newaddresses.append({"addr": addr, "manuf": manuf[0]}) else: newaddresses.append({"addr": addr}) rec["addresses"]["mac"] = newaddresses if flt_params.fmt == "ndjson": yield "%s\n" % json.dumps(rec, default=utils.serialize) else: yield "%s\t%s" % ( "" if i == 0 else ",\n", json.dumps(rec, default=utils.serialize), ) check = subdb.cmp_schema_version_host(rec) if check: version_mismatch[check] = version_mismatch.get(check, 0) + 1 # XXX-WORKAROUND-PGSQL if flt_params.limit and i + 1 >= flt_params.limit: break if flt_params.callback is None: if flt_params.fmt == "json": yield "\n]\n" else: yield "\n]);\n" messages = { 1: lambda count: ("%d document%s displayed %s out-of-date. Please run " "the following command: 'ivre %s " "--update-schema;" % ( count, "s" if count > 1 else "", "are" if count > 1 else "is", subdb_tool, )), -1: lambda count: ("%d document%s displayed ha%s been inserted by " "a more recent version of IVRE. Please update " "IVRE!" % (count, "s" if count > 1 else "", "ve" if count > 1 else "s")), } for mismatch, count in version_mismatch.items(): message = messages[mismatch](count) if flt_params.callback is not None: yield webutils.js_alert( "version-mismatch-%d" % ((mismatch + 1) // 2), "warning", message) utils.LOGGER.warning(message)
def displayhost(record, showscripts=True, showtraceroute=True, showos=True, out=sys.stdout): """Displays (on `out`, by default `sys.stdout`) the Nmap scan result contained in `record`. """ line = "Host %s" % utils.force_int2ip(record['addr']) if record.get('hostnames'): line += " (%s)" % '/'.join(x['name'] for x in record['hostnames']) if 'source' in record: line += ' from %s' % record['source'] if record.get('categories'): line += ' (%s)' % ', '.join(record['categories']) if 'state' in record: line += ' (%s' % record['state'] if 'state_reason' in record: line += ': %s' % record['state_reason'] line += ')\n' out.write(line) if 'infos' in record: infos = record['infos'] if 'country_code' in infos or 'country_name' in infos: out.write("\t%s - %s" % (infos.get('country_code', '?'), infos.get('country_name', '?'))) if 'city' in infos: out.write(' - %s' % infos['city']) out.write('\n') if 'as_num' in infos or 'as_name' in infos: out.write("\tAS%s - %s\n" % (infos.get('as_num', '?'), infos.get('as_name', '?'))) if 'starttime' in record and 'endtime' in record: out.write("\tscan %s - %s\n" % (record['starttime'], record['endtime'])) for state, counts in viewitems(record.get('extraports', {})): out.write("\t%d ports %s (%s)\n" % (counts["total"], state, ", ".join("%d %s" % (count, reason) for reason, count in viewitems(counts["reasons"]) if reason != "total"))) ports = record.get('ports', []) ports.sort(key=lambda x: (x.get('protocol') or '', x['port'])) for port in ports: if port.get('port') == -1: record['scripts'] = port['scripts'] continue if 'state_reason' in port: reason = " (%s)" % ', '.join( [port['state_reason']] + ["%s=%s" % (field[13:], value) for field, value in viewitems(port) if field.startswith('state_reason_')] ) else: reason = "" if 'service_name' in port: srv = port['service_name'] if 'service_method' in port: srv += ' (%s)' % port['service_method'] for field in ['service_product', 'service_version', 'service_extrainfo', 'service_ostype', 'service_hostname']: if field in port: srv += ' %s' % port[field] else: srv = "" out.write("\t%-10s%-8s%-22s%s\n" % ('%s/%d' % (port.get('protocol'), port['port']), port['state_state'], reason, srv)) if showscripts: out.writelines(_scriptoutput(port)) if showscripts: scripts = _scriptoutput(record) if scripts: out.write('\tHost scripts:\n') out.writelines(scripts) if showtraceroute and record.get('traces'): for trace in record['traces']: proto = trace['protocol'] if proto in ['tcp', 'udp']: proto += '/%d' % trace['port'] out.write('\tTraceroute (using %s)\n' % proto) hops = trace['hops'] hops.sort(key=lambda hop: hop['ttl']) for hop in hops: out.write( '\t\t%3s %15s %7s\n' % ( hop['ttl'], utils.force_int2ip(hop['ipaddr']), hop['rtt'], ) ) if showos and record.get('os', {}).get('osclass'): osclasses = record['os']['osclass'] maxacc = str(max(int(x['accuracy']) for x in osclasses)) osclasses = [osclass for osclass in osclasses if osclass['accuracy'] == maxacc] out.write('\tOS fingerprint\n') for osclass in osclasses: out.write( '\t\t%(osfamily)s / %(type)s / %(vendor)s / ' 'accuracy = %(accuracy)s\n' % osclass)
def get_nmap(): flt_params = get_nmap_base() # PostgreSQL: the query plan if affected by the limit and gives # really poor results. This is a temporary workaround (look for # XXX-WORKAROUND-PGSQL). # result = db.view.get(flt_params.flt, limit=flt_params.limit, # skip=flt_params.skip, sort=flt_params.sortby) result = db.view.get(flt_params.flt, skip=flt_params.skip, sort=flt_params.sortby) if flt_params.unused: msg = 'Option%s not understood: %s' % ( 's' if len(flt_params.unused) > 1 else '', ', '.join(flt_params.unused), ) if flt_params.callback is not None: yield webutils.js_alert("param-unused", "warning", msg) utils.LOGGER.warning(msg) elif flt_params.callback is not None: yield webutils.js_del_alert("param-unused") if config.DEBUG: msg1 = "filter: %s" % db.view.flt2str(flt_params.flt) msg2 = "user: %r" % webutils.get_user() utils.LOGGER.debug(msg1) utils.LOGGER.debug(msg2) if flt_params.callback is not None: yield webutils.js_alert("filter", "info", msg1) yield webutils.js_alert("user", "info", msg2) version_mismatch = {} if flt_params.callback is None: yield "[\n" else: yield "%s([\n" % flt_params.callback # XXX-WORKAROUND-PGSQL # for rec in result: for i, rec in enumerate(result): for fld in ['_id', 'scanid']: try: del rec[fld] except KeyError: pass if not flt_params.ipsasnumbers: rec['addr'] = utils.force_int2ip(rec['addr']) for field in ['starttime', 'endtime']: if field in rec: if not flt_params.datesasstrings: rec[field] = int(utils.datetime2timestamp(rec[field])) for port in rec.get('ports', []): if 'screendata' in port: port['screendata'] = utils.encode_b64(port['screendata']) for script in port.get('scripts', []): if "masscan" in script: try: del script['masscan']['raw'] except KeyError: pass if not flt_params.ipsasnumbers: if 'traces' in rec: for trace in rec['traces']: trace['hops'].sort(key=lambda x: x['ttl']) for hop in trace['hops']: hop['ipaddr'] = utils.force_int2ip(hop['ipaddr']) yield "%s\t%s" % ('' if i == 0 else ',\n', json.dumps(rec, default=utils.serialize)) check = db.view.cmp_schema_version_host(rec) if check: version_mismatch[check] = version_mismatch.get(check, 0) + 1 # XXX-WORKAROUND-PGSQL if i + 1 >= flt_params.limit: break if flt_params.callback is None: yield "\n]\n" else: yield "\n]);\n" messages = { 1: lambda count: ("%d document%s displayed %s out-of-date. Please run " "the following command: 'ivre scancli " "--update-schema;" % (count, 's' if count > 1 else '', 'are' if count > 1 else 'is')), -1: lambda count: ('%d document%s displayed ha%s been inserted by ' 'a more recent version of IVRE. Please update ' 'IVRE!' % (count, 's' if count > 1 else '', 've' if count > 1 else 's')), } for mismatch, count in viewitems(version_mismatch): message = messages[mismatch](count) if flt_params.callback is not None: yield webutils.js_alert( "version-mismatch-%d" % ((mismatch + 1) // 2), "warning", message ) utils.LOGGER.warning(message)
def get_nmap(subdb): """Get records from Nmap & View databases :param str subdb: database to query (must be "scans" or "view") :query str q: query (including limit/skip and sort) :query str callback: callback to use for JSONP results :query bool ipsasnumbers: to get IP addresses as numbers rather than as strings :query bool datesasstrings: to get dates as strings rather than as timestamps :status 200: no error :status 400: invalid referer :>jsonarr object: results """ subdb_tool = "view" if subdb == 'view' else "scancli" subdb = db.view if subdb == 'view' else db.nmap flt_params = get_nmap_base(subdb) # PostgreSQL: the query plan if affected by the limit and gives # really poor results. This is a temporary workaround (look for # XXX-WORKAROUND-PGSQL). # result = subdb.get(flt_params.flt, limit=flt_params.limit, # skip=flt_params.skip, sort=flt_params.sortby) result = subdb.get(flt_params.flt, skip=flt_params.skip, sort=flt_params.sortby) if flt_params.unused: msg = 'Option%s not understood: %s' % ( 's' if len(flt_params.unused) > 1 else '', ', '.join(flt_params.unused), ) if flt_params.callback is not None: yield webutils.js_alert("param-unused", "warning", msg) utils.LOGGER.warning(msg) elif flt_params.callback is not None: yield webutils.js_del_alert("param-unused") if config.DEBUG: msg1 = "filter: %s" % subdb.flt2str(flt_params.flt) msg2 = "user: %r" % webutils.get_user() utils.LOGGER.debug(msg1) utils.LOGGER.debug(msg2) if flt_params.callback is not None: yield webutils.js_alert("filter", "info", msg1) yield webutils.js_alert("user", "info", msg2) version_mismatch = {} if flt_params.callback is None: yield "[\n" else: yield "%s([\n" % flt_params.callback # XXX-WORKAROUND-PGSQL # for rec in result: for i, rec in enumerate(result): for fld in ['_id', 'scanid']: try: del rec[fld] except KeyError: pass if not flt_params.ipsasnumbers: rec['addr'] = utils.force_int2ip(rec['addr']) for field in ['starttime', 'endtime']: if field in rec: if not flt_params.datesasstrings: rec[field] = int(utils.datetime2timestamp(rec[field])) for port in rec.get('ports', []): if 'screendata' in port: port['screendata'] = utils.encode_b64(port['screendata']) for script in port.get('scripts', []): if "masscan" in script: try: del script['masscan']['raw'] except KeyError: pass if not flt_params.ipsasnumbers: if 'traces' in rec: for trace in rec['traces']: trace['hops'].sort(key=lambda x: x['ttl']) for hop in trace['hops']: hop['ipaddr'] = utils.force_int2ip(hop['ipaddr']) addresses = rec.get('addresses', {}).get('mac') if addresses: newaddresses = [] for addr in addresses: manuf = utils.mac2manuf(addr) if manuf and manuf[0]: newaddresses.append({'addr': addr, 'manuf': manuf[0]}) else: newaddresses.append({'addr': addr}) rec['addresses']['mac'] = newaddresses yield "%s\t%s" % ('' if i == 0 else ',\n', json.dumps(rec, default=utils.serialize)) check = subdb.cmp_schema_version_host(rec) if check: version_mismatch[check] = version_mismatch.get(check, 0) + 1 # XXX-WORKAROUND-PGSQL if i + 1 >= flt_params.limit: break if flt_params.callback is None: yield "\n]\n" else: yield "\n]);\n" messages = { 1: lambda count: ("%d document%s displayed %s out-of-date. Please run " "the following command: 'ivre %s " "--update-schema;" % (count, 's' if count > 1 else '', 'are' if count > 1 else 'is', subdb_tool)), -1: lambda count: ('%d document%s displayed ha%s been inserted by ' 'a more recent version of IVRE. Please update ' 'IVRE!' % (count, 's' if count > 1 else '', 've' if count > 1 else 's')), } for mismatch, count in viewitems(version_mismatch): message = messages[mismatch](count) if flt_params.callback is not None: yield webutils.js_alert( "version-mismatch-%d" % ((mismatch + 1) // 2), "warning", message) utils.LOGGER.warning(message)
def get_nmap(): flt_params = get_nmap_base() ## PostgreSQL: the query plan if affected by the limit and gives ## really poor results. This is a temporary workaround (look for ## XXX-WORKAROUND-PGSQL) # result = db.view.get(flt_params.flt, limit=flt_params.limit, # skip=flt_params.skip, sort=flt_params.sortby) result = db.view.get(flt_params.flt, skip=flt_params.skip, sort=flt_params.sortby) if flt_params.unused: msg = 'Option%s not understood: %s' % ( 's' if len(flt_params.unused) > 1 else '', ', '.join(flt_params.unused), ) if flt_params.callback is not None: yield webutils.js_alert("param-unused", "warning", msg) utils.LOGGER.warning(msg) elif flt_params.callback is not None: yield webutils.js_del_alert("param-unused") if config.DEBUG: msg1 = "filter: %s" % db.view.flt2str(flt_params.flt) msg2 = "user: %r" % webutils.get_user() utils.LOGGER.debug(msg1) utils.LOGGER.debug(msg2) if flt_params.callback is not None: yield webutils.js_alert("filter", "info", msg1) yield webutils.js_alert("user", "info", msg2) version_mismatch = {} if flt_params.callback is None: yield "[\n" else: yield "%s([\n" % flt_params.callback ## XXX-WORKAROUND-PGSQL # for rec in result: for i, rec in enumerate(result): for fld in ['_id', 'scanid']: try: del rec[fld] except KeyError: pass if not flt_params.ipsasnumbers: rec['addr'] = utils.force_int2ip(rec['addr']) for field in ['starttime', 'endtime']: if field in rec: if not flt_params.datesasstrings: rec[field] = int(utils.datetime2timestamp(rec[field])) for port in rec.get('ports', []): if 'screendata' in port: port['screendata'] = utils.encode_b64(port['screendata']) for script in port.get('scripts', []): if "masscan" in script: try: del script['masscan']['raw'] except KeyError: pass if not flt_params.ipsasnumbers: if 'traces' in rec: for trace in rec['traces']: trace['hops'].sort(key=lambda x: x['ttl']) for hop in trace['hops']: hop['ipaddr'] = utils.force_int2ip(hop['ipaddr']) yield "%s\t%s" % ('' if i == 0 else ',\n', json.dumps(rec, default=utils.serialize)) check = db.view.cmp_schema_version_host(rec) if check: version_mismatch[check] = version_mismatch.get(check, 0) + 1 # XXX-WORKAROUND-PGSQL if i + 1 >= flt_params.limit: break if flt_params.callback is None: yield "\n]\n" else: yield "\n]);\n" messages = { 1: lambda count: ("%d document%s displayed %s out-of-date. Please run " "the following command: 'ivre scancli " "--update-schema;" % (count, 's' if count > 1 else '', 'are' if count > 1 else 'is')), -1: lambda count: ('%d document%s displayed ha%s been inserted by ' 'a more recent version of IVRE. Please update ' 'IVRE!' % (count, 's' if count > 1 else '', 've' if count > 1 else 's')), } for mismatch, count in viewitems(version_mismatch): message = messages[mismatch](count) if flt_params.callback is not None: yield webutils.js_alert( "version-mismatch-%d" % ((mismatch + 1) // 2), "warning", message) utils.LOGGER.warning(message)
def main(): global baseflt if USING_ARGPARSE: parser = argparse.ArgumentParser( description='Access and query the passive database.', parents=[db.passive.argparser, utils.CLI_ARGPARSER], ) else: parser = optparse.OptionParser( description='Access and query the passive database.', ) for args, kargs in chain(db.passive.argparser.args, utils.CLI_ARGPARSER): parser.add_option(*args, **kargs) parser.parse_args_orig = parser.parse_args def my_parse_args(): res = parser.parse_args_orig() res[0].ensure_value('ips', res[1]) return res[0] parser.parse_args = my_parse_args parser.add_argument = parser.add_option baseflt = db.passive.flt_empty disp_recs = disp_recs_std # display modes parser.add_argument('--tail', metavar='COUNT', type=int, help='Output latest COUNT results.') parser.add_argument('--tailnew', metavar='COUNT', type=int, help='Output latest COUNT new results.') parser.add_argument('--tailf', action='store_true', help='Output continuously latest results.') parser.add_argument('--tailfnew', action='store_true', help='Output continuously latest results.') parser.add_argument('--top', metavar='FIELD / ~FIELD', help='Output most common (least common: ~) values for ' 'FIELD, by default 10, use --limit to change that, ' '--limit 0 means unlimited.') parser.add_argument('--dnsbl-update', action='store_true', help='Update the current database with DNS Blacklist') if USING_ARGPARSE: parser.add_argument('ips', nargs='*', help='Display results for specified IP addresses' ' or ranges.') args = parser.parse_args() baseflt = db.passive.parse_args(args, baseflt) if args.init: if os.isatty(sys.stdin.fileno()): sys.stdout.write( 'This will remove any passive information in your ' 'database. Process ? [y/N] ' ) ans = input() if ans.lower() != 'y': exit(0) db.passive.init() exit(0) if args.ensure_indexes: if os.isatty(sys.stdin.fileno()): sys.stdout.write( 'This will lock your database. Process ? [y/N] ' ) ans = input() if ans.lower() != 'y': exit(0) db.passive.ensure_indexes() exit(0) if args.update_schema: db.passive.migrate_schema(None) exit(0) if args.dnsbl_update: db.passive.update_dns_blacklist() exit(0) if args.short: disp_recs = disp_recs_short elif args.distinct is not None: disp_recs = functools.partial(disp_recs_distinct, args.distinct) elif args.json: disp_recs = disp_recs_json elif args.top is not None: disp_recs = disp_recs_top(args.top) elif args.tail is not None: disp_recs = disp_recs_tail(args.tail) elif args.tailnew is not None: disp_recs = disp_recs_tailnew(args.tailnew) elif args.tailf: disp_recs = disp_recs_tailf() elif args.tailfnew: disp_recs = disp_recs_tailfnew() elif args.count: disp_recs = disp_recs_count elif args.delete: disp_recs = db.passive.remove elif args.explain: disp_recs = disp_recs_explain if args.sort is None: sort = [] else: sort = [(field[1:], -1) if field.startswith('~') else (field, 1) for field in args.sort] if not args.ips: if not baseflt and not args.limit and disp_recs == disp_recs_std: # default to tail -f mode disp_recs = disp_recs_tailfnew() disp_recs(baseflt, sort, args.limit or db.passive.no_limit, args.skip or 0) exit(0) first = True for a in args.ips: if first: first = False else: print() flt = baseflt.copy() if '/' in a: flt = db.passive.flt_and(flt, db.passive.searchnet(a)) elif '-' in a: a = a.split('-', 1) if a[0].isdigit(): a[0] = int(a[0]) if a[1].isdigit(): a[1] = int(a[1]) flt = db.passive.flt_and(flt, db.passive.searchrange(a[0], a[1])) else: if a.isdigit(): a = utils.force_int2ip(int(a)) flt = db.passive.flt_and(flt, db.passive.searchhost(a)) disp_recs(flt, sort, args.limit or db.passive.no_limit, args.skip or 0)
def main(): global baseflt if USING_ARGPARSE: parser = argparse.ArgumentParser( description='Access and query the passive database.', parents=[db.passive.argparser], ) else: parser = optparse.OptionParser( description='Access and query the passive database.', ) for args, kargs in db.passive.argparser.args: parser.add_option(*args, **kargs) parser.parse_args_orig = parser.parse_args def my_parse_args(): res = parser.parse_args_orig() res[0].ensure_value('ips', res[1]) return res[0] parser.parse_args = my_parse_args parser.add_argument = parser.add_option baseflt = db.passive.flt_empty disp_recs = disp_recs_std # DB parser.add_argument('--init', '--purgedb', action='store_true', help='Purge or create and initialize the database.') parser.add_argument('--ensure-indexes', action='store_true', help='Create missing indexes (will lock the ' 'database).') # display modes parser.add_argument('--short', action='store_true', help='Output only IP addresses, one per line.') parser.add_argument('--tail', metavar='COUNT', type=int, help='Output latest COUNT results.') parser.add_argument('--tailnew', metavar='COUNT', type=int, help='Output latest COUNT new results.') parser.add_argument('--tailf', action='store_true', help='Output continuously latest results.') parser.add_argument('--tailfnew', action='store_true', help='Output continuously latest results.') parser.add_argument('--count', action='store_true', help='Count matched results.') parser.add_argument('--explain', action='store_true', help='MongoDB specific: .explain() the query.') parser.add_argument('--distinct', metavar='FIELD', help='Output only unique FIELD part of the ' 'results, one per line.') parser.add_argument('--delete', action='store_true', help='DELETE the matched results instead of ' 'displaying them.') if USING_ARGPARSE: parser.add_argument('ips', nargs='*', help='Display results for specified IP addresses' ' or ranges.') args = parser.parse_args() baseflt = db.passive.parse_args(args, baseflt) if args.init: if os.isatty(sys.stdin.fileno()): sys.stdout.write( 'This will remove any passive information in your ' 'database. Process ? [y/N] ') ans = input() if ans.lower() != 'y': exit(0) db.passive.init() exit(0) if args.ensure_indexes: if os.isatty(sys.stdin.fileno()): sys.stdout.write('This will lock your database. Process ? [y/N] ') ans = input() if ans.lower() != 'y': exit(0) db.passive.ensure_indexes() exit(0) if args.short: disp_recs = disp_recs_short elif args.distinct is not None: disp_recs = functools.partial(disp_recs_distinct, args.distinct) elif args.tail is not None: disp_recs = disp_recs_tail(args.tail) elif args.tailnew is not None: disp_recs = disp_recs_tailnew(args.tailnew) elif args.tailf: disp_recs = disp_recs_tailf() elif args.tailfnew: disp_recs = disp_recs_tailfnew() elif args.count: disp_recs = disp_recs_count elif args.delete: disp_recs = db.passive.remove elif args.explain: disp_recs = disp_recs_explain if not args.ips: if not baseflt and disp_recs == disp_recs_std: # default to tail -f mode disp_recs = disp_recs_tailfnew() disp_recs(baseflt) exit(0) first = True for a in args.ips: if first: first = False else: print() flt = baseflt.copy() if ':' in a: a = a.split(':', 1) if a[0].isdigit(): a[0] = int(a[0]) if a[1].isdigit(): a[1] = int(a[1]) flt = db.passive.flt_and(flt, db.passive.searchrange(a[0], a[1])) elif '-' in a: a = a.split('-', 1) if a[0].isdigit(): a[0] = int(a[0]) if a[1].isdigit(): a[1] = int(a[1]) flt = db.passive.flt_and(flt, db.passive.searchrange(a[0], a[1])) elif '/' in a: flt = db.passive.flt_and(flt, db.passive.searchnet(a)) else: if a.isdigit(): a = utils.force_int2ip(int(a)) flt = db.passive.flt_and(flt, db.passive.searchhost(a)) disp_recs(flt)
def get_nmap_action(action): flt_params = get_nmap_base() preamble = "[\n" postamble = "]\n" r2res = lambda x: x if action == "timeline": if hasattr(db.nmap, "get_open_port_count"): result = list( db.nmap.get_open_port_count( flt_params.flt, archive=flt_params.archive, )) count = len(result) else: result = db.nmap.get( flt_params.flt, archive=flt_params.archive, fields=['addr', 'starttime', 'openports.count']) count = result.count() if request.params.get("modulo") is None: r2time = lambda r: int(utils.datetime2timestamp(r['starttime'])) else: r2time = lambda r: (int(utils.datetime2timestamp(r['starttime'])) % int(request.params.get("modulo"))) if flt_params.ipsasnumbers: r2res = lambda r: [ r2time(r), utils.force_ip2int(r['addr']), r['openports']['count'] ] else: r2res = lambda r: [ r2time(r), utils.force_int2ip(r['addr']), r['openports']['count'] ] elif action == "coordinates": preamble = '{"type": "GeometryCollection", "geometries": [\n' postamble = ']}\n' result = list( db.nmap.getlocations(flt_params.flt, archive=flt_params.archive)) count = len(result) r2res = lambda r: { "type": "Point", "coordinates": r['_id'], "properties": { "count": r['count'] }, } elif action == "countopenports": if hasattr(db.nmap, "get_open_port_count"): result = db.nmap.get_open_port_count(flt_params.flt, archive=flt_params.archive) else: result = db.nmap.get(flt_params.flt, archive=flt_params.archive, fields=['addr', 'openports.count']) if hasattr(result, "count"): count = result.count() else: count = db.nmap.count(flt_params.flt, archive=flt_params.archive, fields=['addr', 'openports.count']) if flt_params.ipsasnumbers: r2res = lambda r: [ utils.force_ip2int(r['addr']), r['openports']['count'] ] else: r2res = lambda r: [ utils.force_int2ip(r['addr']), r['openports']['count'] ] elif action == "ipsports": if hasattr(db.nmap, "get_ips_ports"): result = list( db.nmap.get_ips_ports(flt_params.flt, archive=flt_params.archive)) count = sum(len(host.get('ports', [])) for host in result) else: result = db.nmap.get( flt_params.flt, archive=flt_params.archive, fields=['addr', 'ports.port', 'ports.state_state']) count = sum(len(host.get('ports', [])) for host in result) result.rewind() if flt_params.ipsasnumbers: r2res = lambda r: [ utils.force_ip2int(r['addr']), [[p['port'], p['state_state']] for p in r.get('ports', []) if 'state_state' in p] ] else: r2res = lambda r: [ utils.force_int2ip(r['addr']), [[p['port'], p['state_state']] for p in r.get('ports', []) if 'state_state' in p] ] elif action == "onlyips": result = db.nmap.get(flt_params.flt, archive=flt_params.archive, fields=['addr']) if hasattr(result, "count"): count = result.count() else: count = db.nmap.count(flt_params.flt, archive=flt_params.archive, fields=['addr']) if flt_params.ipsasnumbers: r2res = lambda r: utils.force_ip2int(r['addr']) else: r2res = lambda r: utils.force_int2ip(r['addr']) elif action == "diffcats": if request.params.get("onlydiff"): output = db.nmap.diff_categories(request.params.get("cat1"), request.params.get("cat2"), flt=flt_params.flt, include_both_open=False) else: output = db.nmap.diff_categories(request.params.get("cat1"), request.params.get("cat2"), flt=flt_params.flt) count = 0 result = {} if flt_params.ipsasnumbers: for res in output: result.setdefault(res["addr"], []).append([res['port'], res['value']]) count += 1 else: for res in output: result.setdefault(utils.int2ip(res["addr"]), []).append([res['port'], res['value']]) count += 1 result = viewitems(result) if flt_params.callback is not None: if count >= config.WEB_WARN_DOTS_COUNT: yield ( 'if(confirm("You are about to ask your browser to display %d ' 'dots, which is a lot and might slow down, freeze or crash ' 'your browser. Do you want to continue?")) {\n' % count) yield '%s(\n' % flt_params.callback yield preamble # hack to avoid a trailing comma result = iter(result) try: rec = next(result) except StopIteration: pass else: yield json.dumps(r2res(rec)) for rec in result: yield ",\n" + json.dumps(r2res(rec)) yield "\n" yield postamble if flt_params.callback is not None: yield ");" if count >= config.WEB_WARN_DOTS_COUNT: yield '}\n' else: yield "\n"
def main(): global baseflt if USING_ARGPARSE: parser = argparse.ArgumentParser( description='Access and query the passive database.', parents=[db.passive.argparser, utils.CLI_ARGPARSER], ) else: parser = optparse.OptionParser( description='Access and query the passive database.', ) for args, kargs in chain(db.passive.argparser.args, utils.CLI_ARGPARSER): parser.add_option(*args, **kargs) parser.parse_args_orig = parser.parse_args def my_parse_args(): res = parser.parse_args_orig() res[0].ensure_value('ips', res[1]) return res[0] parser.parse_args = my_parse_args parser.add_argument = parser.add_option baseflt = db.passive.flt_empty disp_recs = disp_recs_std # display modes parser.add_argument('--tail', metavar='COUNT', type=int, help='Output latest COUNT results.') parser.add_argument('--tailnew', metavar='COUNT', type=int, help='Output latest COUNT new results.') parser.add_argument('--tailf', action='store_true', help='Output continuously latest results.') parser.add_argument('--tailfnew', action='store_true', help='Output continuously latest results.') parser.add_argument('--top', metavar='FIELD / ~FIELD', help='Output most common (least common: ~) values for ' 'FIELD, by default 10, use --limit to change that, ' '--limit 0 means unlimited.') if USING_ARGPARSE: parser.add_argument('ips', nargs='*', help='Display results for specified IP addresses' ' or ranges.') args = parser.parse_args() baseflt = db.passive.parse_args(args, baseflt) if args.init: if os.isatty(sys.stdin.fileno()): sys.stdout.write( 'This will remove any passive information in your ' 'database. Process ? [y/N] ') ans = input() if ans.lower() != 'y': exit(0) db.passive.init() exit(0) if args.ensure_indexes: if os.isatty(sys.stdin.fileno()): sys.stdout.write('This will lock your database. Process ? [y/N] ') ans = input() if ans.lower() != 'y': exit(0) db.passive.ensure_indexes() exit(0) if args.update_schema: db.passive.migrate_schema(None) exit(0) if args.short: disp_recs = disp_recs_short elif args.distinct is not None: disp_recs = functools.partial(disp_recs_distinct, args.distinct) elif args.json: disp_recs = disp_recs_json elif args.top is not None: disp_recs = disp_recs_top(args.top) elif args.tail is not None: disp_recs = disp_recs_tail(args.tail) elif args.tailnew is not None: disp_recs = disp_recs_tailnew(args.tailnew) elif args.tailf: disp_recs = disp_recs_tailf() elif args.tailfnew: disp_recs = disp_recs_tailfnew() elif args.count: disp_recs = disp_recs_count elif args.delete: disp_recs = db.passive.remove elif args.explain: disp_recs = disp_recs_explain if args.sort is None: sort = [] else: sort = [(field[1:], -1) if field.startswith('~') else (field, 1) for field in args.sort] if not args.ips: if not baseflt and not args.limit and disp_recs == disp_recs_std: # default to tail -f mode disp_recs = disp_recs_tailfnew() disp_recs(baseflt, sort, args.limit or db.passive.no_limit, args.skip or 0) exit(0) first = True for a in args.ips: if first: first = False else: print() flt = baseflt.copy() if '/' in a: flt = db.passive.flt_and(flt, db.passive.searchnet(a)) elif '-' in a: a = a.split('-', 1) if a[0].isdigit(): a[0] = int(a[0]) if a[1].isdigit(): a[1] = int(a[1]) flt = db.passive.flt_and(flt, db.passive.searchrange(a[0], a[1])) else: if a.isdigit(): a = utils.force_int2ip(int(a)) flt = db.passive.flt_and(flt, db.passive.searchhost(a)) disp_recs(flt, sort, args.limit or db.passive.no_limit, args.skip or 0)
def main(): try: import argparse parser = argparse.ArgumentParser( description='Access and query the active scans database.', parents=[db.db.nmap.argparser]) USING_ARGPARSE = True except ImportError: import optparse parser = optparse.OptionParser( description='Access and query the active scans database.') for args, kargs in db.db.nmap.argparser.args: parser.add_option(*args, **kargs) parser.parse_args_orig = parser.parse_args parser.parse_args = lambda: parser.parse_args_orig()[0] parser.add_argument = parser.add_option USING_ARGPARSE = False parser.add_argument('--init', '--purgedb', action='store_true', help='Purge or create and initialize the database.') parser.add_argument('--ensure-indexes', action='store_true', help='Create missing indexes (will lock the ' 'database).') parser.add_argument('--short', action='store_true', help='Output only IP addresses, one per line.') parser.add_argument('--json', action='store_true', help='Output results as JSON documents.') parser.add_argument('--no-screenshots', action='store_true', help='When used with --json, do not output ' 'screenshots data.') parser.add_argument('--honeyd', action='store_true', help='Output results as a honeyd config file.') parser.add_argument('--nmap-xml', action='store_true', help='Output results as a nmap XML output file.') parser.add_argument( '--graphroute', choices=["dot", "rtgraph3d"] if graphroute.HAVE_DBUS else ["dot"], help='Create a graph from traceroute results. ' 'dot: output result as Graphviz "dot" format to stdout.' '%s' % (" rtgraph3d: send results to rtgraph3d." if graphroute.HAVE_DBUS else "")) parser.add_argument('--graphroute-cluster', choices=['AS', 'Country'], help='Cluster IP according to the specified criteria' '(only for --graphroute dot)') if graphroute.HAVE_DBUS: parser.add_argument('--graphroute-dont-reset', action='store_true', help='Do NOT reset graph (only for ' '--graphroute rtgraph3d)') parser.add_argument('--graphroute-include', choices=['last-hop', 'target'], help='How far should graphroute go? Default if to ' 'exclude the last hop and the target for each result.') parser.add_argument('--count', action='store_true', help='Count matched results.') parser.add_argument('--explain', action='store_true', help='MongoDB specific: .explain() the query.') parser.add_argument('--distinct', metavar='FIELD', help='Output only unique FIELD part of the ' 'results, one per line.') parser.add_argument('--top', metavar='FIELD / ~FIELD', help='Output most common (least common: ~) values for ' 'FIELD, by default 10, use --limit to change that, ' '--limit 0 means unlimited.') parser.add_argument('--delete', action='store_true', help='DELETE the matched results instead of ' 'displaying them.') parser.add_argument('--move-to-archives', action='store_true', help='ARCHIVE the matched results instead of ' 'displaying them (i.e., move the results to ' 'the archive collections).') parser.add_argument('--move-from-archives', action='store_true', help='UNARCHIVE the matched results instead of ' 'displaying them (i.e., move the results from ' 'the archive collections to the "fresh" results ' 'collections).') parser.add_argument('--update-schema', action='store_true', help='update (host) schema. Use with --version to ' 'specify your current version and run twice, once ' 'with --archive.') parser.add_argument('--csv', metavar='TYPE', help='Output result as a CSV file', choices=['ports', 'hops']) parser.add_argument('--csv-separator', metavar='SEPARATOR', default=",", help='Select separator for --csv output') parser.add_argument('--csv-add-infos', action='store_true', help="Include country_code and as_number" "fields to CSV file") parser.add_argument('--csv-na-str', default="NA", help='String to use for "Not Applicable" value ' '(defaults to "NA")') if USING_ARGPARSE: parser.add_argument('--sort', metavar='FIELD / ~FIELD', nargs='+', help='Sort results according to FIELD; use ~FIELD ' 'to reverse sort order.') else: parser.add_argument('--sort', metavar='FIELD / ~FIELD', help='Sort results according to FIELD; use ~FIELD ' 'to reverse sort order.') parser.add_argument('--limit', type=int, help='Ouput at most LIMIT results.') parser.add_argument('--skip', type=int, help='Skip first SKIP results.') args = parser.parse_args() out = sys.stdout hostfilter = db.db.nmap.parse_args(args) sortkeys = [] if args.init: if os.isatty(sys.stdin.fileno()): sys.stdout.write( 'This will remove any scan result in your database. ' 'Process ? [y/N] ') ans = input() if ans.lower() != 'y': sys.exit(-1) db.db.nmap.init() sys.exit(0) if args.ensure_indexes: if os.isatty(sys.stdin.fileno()): sys.stdout.write('This will lock your database. ' 'Process ? [y/N] ') ans = input() if ans.lower() != 'y': sys.exit(-1) db.db.nmap.ensure_indexes() sys.exit(0) if args.top is not None: field, least = ((args.top[1:], True) if args.top[:1] in '!-~' else (args.top, False)) topnbr = {0: None, None: 10}.get(args.limit, args.limit) for entry in db.db.nmap.topvalues(field, flt=hostfilter, topnbr=topnbr, least=least, archive=args.archives): if isinstance(entry['_id'], (list, tuple)): sep = ' / ' if isinstance(entry['_id'], tuple) else ', ' if entry['_id']: if isinstance(entry['_id'][0], (list, tuple)): entry['_id'] = sep.join('/'.join( str(subelt) for subelt in elt) if elt else "None" for elt in entry['_id']) else: entry['_id'] = sep.join( str(elt) for elt in entry['_id']) else: entry['_id'] = "None" print("%(_id)s: %(count)d" % entry) sys.exit(0) if args.sort is not None: sortkeys = [(field[1:], -1) if field.startswith('~') else (field, 1) for field in args.sort] if args.short: for val in db.db.nmap.distinct("addr", flt=hostfilter, sort=sortkeys, limit=args.limit, skip=args.skip, archive=args.archives): out.write(utils.force_int2ip(val) + '\n') sys.exit(0) elif args.distinct is not None: for val in db.db.nmap.distinct(args.distinct, flt=hostfilter, sort=sortkeys, limit=args.limit, skip=args.skip, archive=args.archives): out.write(str(val) + '\n') sys.exit(0) if args.json: import json def displayfunction(x): if os.isatty(sys.stdout.fileno()): indent = 4 else: indent = None for h in x: for fld in ['_id', 'scanid']: try: del h[fld] except KeyError: pass for port in h.get('ports', []): if args.no_screenshots: for fname in ['screenshot', 'screendata']: if fname in port: del port[fname] elif 'screendata' in port: port['screendata'] = utils.encode_b64( db.db.nmap.from_binary(port['screendata'])) for script in port.get('scripts', []): if 'masscan' in script and 'raw' in script['masscan']: script['masscan']['raw'] = utils.encode_b64( db.db.nmap.from_binary( script['masscan']['raw'])) print( json.dumps(h, indent=indent, default=db.db.nmap.serialize)) elif args.honeyd: def displayfunction(x): display_honeyd_preamble(out) honeyd_routes = {} honeyd_entries = set() for h in x: honeyd_routes, honeyd_entries = display_honeyd_conf( h, honeyd_routes, honeyd_entries, out) display_honeyd_epilogue(honeyd_routes, honeyd_entries, out) elif args.nmap_xml: def displayfunction(x): display_xml_preamble(out=out) if x.count() == 1 and not isinstance(x[0]['scanid'], list): scan = db.db.nmap.getscan(x[0]['scanid'], archive=args.archives) if 'scaninfos' in scan and scan['scaninfos']: for k in scan['scaninfos'][0]: scan['scaninfo.%s' % k] = scan['scaninfos'][0][k] del scan['scaninfos'] else: scan = {} display_xml_scan(scan, out=out) for h in x: display_xml_host(h, out=out) display_xml_epilogue(out=out) elif args.graphroute is not None: def displayfunction(cursor): graph, entry_nodes = graphroute.buildgraph( cursor, include_last_hop=args.graphroute_include == "last-hop", include_target=args.graphroute_include == "target", ) if args.graphroute == "dot": if args.graphroute_cluster == "AS": def cluster(ipaddr): res = db.db.data.as_byip(ipaddr) if res is None: return return (res['as_num'], "%(as_num)d\n[%(as_name)s]" % res) elif args.graphroute_cluster == "Country": def cluster(ipaddr): res = db.db.data.country_byip(ipaddr) if res is None: return return (res['country_code'], "%(country_code)s - %(country_name)s" % res) else: cluster = None graphroute.writedotgraph(graph, sys.stdout, cluster=cluster) elif args.graphroute == "rtgraph3d": g = graphroute.display3dgraph( graph, reset_world=not args.graphroute_dont_reset) for n in entry_nodes: g.glow(utils.int2ip(n)) elif args.explain: def displayfunction(x): out.write(db.db.nmap.explain(x, indent=4) + '\n') elif args.delete: def displayfunction(x): for h in x: db.db.nmap.remove(h, archive=args.archives) elif args.move_to_archives: args.archives = False def displayfunction(x): for h in x: db.db.nmap.archive(h) elif args.move_from_archives: args.archives = True def displayfunction(x): for h in x: db.db.nmap.archive(h, unarchive=True) elif args.csv is not None: fields = { "ports": OrderedDict([["addr", utils.int2ip], [ "ports", OrderedDict([["port", str], ["state_state", True]]) ]]), "hops": OrderedDict([["addr", utils.int2ip], [ "traces", OrderedDict([[ "hops", OrderedDict([ ["ipaddr", utils.int2ip], ["ttl", str], [ "rtt", lambda x: (args.csv_na_str if x == '--' else str(x)) ], ]) ]]) ]]), "rtt": OrderedDict([["addr", utils.int2ip], [ "traces", OrderedDict([[ "hops", OrderedDict([ [ "rtt", lambda x: (args.csv_na_str if x == '--' else str(x)) ], ]) ]]) ]]), }.get(args.csv) if fields is None: parser.error("Invalid choice for --csv.") if args.csv_add_infos: fields['infos'] = OrderedDict([ ["country_code", True], ["city", True], ["as_num", str], ]) def displayfunction(x): out.write(args.csv_separator.join(utils.fields2csv_head(fields))) out.write('\n') for h in x: displayhost_csv(fields, args.csv_separator, args.csv_na_str, h, out=out) else: def displayfunction(cursor): nmapout.displayhosts(cursor, out=out) if args.update_schema: db.db.nmap.migrate_schema(args.archives, args.version) elif args.count: out.write( str(db.db.nmap.count(hostfilter, archive=args.archives)) + '\n') else: kargs = {"archive": args.archives} if args.limit is not None: kargs["limit"] = args.limit if args.skip is not None: kargs["skip"] = args.skip if sortkeys: kargs["sort"] = sortkeys cursor = db.db.nmap.get(hostfilter, **kargs) displayfunction(cursor) sys.exit(0)
def display_xml_host(h, out=sys.stdout): out.write('<host') for k in ["timedout", "timeoutcounter"]: if k in h: out.write(' %s=%s' % (k, saxutils.quoteattr(h[k]))) for k in ["starttime", "endtime"]: if k in h: out.write(' %s=%s' % (k, saxutils.quoteattr(h[k].strftime('%s')))) out.write('>') if 'state' in h: out.write('<status state="%s"' % h['state']) for k in ["reason", "reason_ttl"]: kk = "state_%s" % k if kk in h: out.write(' %s="%s"' % (k, h[kk])) out.write('/>') out.write('\n') if 'addr' in h: out.write('<address addr="%s" addrtype="ipv4"/>\n' % utils.force_int2ip(h['addr'])) for t in h.get('addresses', []): for a in h['addresses'][t]: out.write('<address addr="%s" addrtype="%s"/>\n' % (a, t)) if 'hostnames' in h: out.write('<hostnames>\n') for hostname in h['hostnames']: out.write('<hostname') for k in ['name', 'type']: if k in hostname: out.write(' %s="%s"' % (k, hostname[k])) out.write('/>\n') out.write('</hostnames>\n') out.write('<ports>') for state, counts in viewitems(h.get('extraports', {})): out.write('<extraports state="%s" count="%d">\n' % (state, counts['total'])) for reason, count in viewitems(counts['reasons']): out.write('<extrareasons reason="%s" count="%d"/>\n' % (reason, count)) out.write('</extraports>\n') for p in h.get('ports', []): if p.get('port') == -1: h['scripts'] = p['scripts'] continue out.write('<port') if 'protocol' in p: out.write(' protocol="%s"' % p['protocol']) if 'port' in p: out.write(' portid="%s"' % p['port']) out.write('><state') for k in ['state', 'reason', 'reason_ttl']: kk = 'state_%s' % k if kk in p: out.write(' %s=%s' % (k, saxutils.quoteattr(str(p[kk])))) out.write('/>') if 'service_name' in p: out.write('<service name="%s"' % p['service_name']) for k in [ 'servicefp', 'product', 'version', 'extrainfo', 'ostype', 'method', 'conf' ]: kk = "service_%s" % k if kk in p: if isinstance(p[kk], basestring): out.write(' %s=%s' % (k, saxutils.quoteattr(p[kk]))) else: out.write(' %s="%s"' % (k, p[kk])) # TODO: CPE out.write('></service>') for s in p.get('scripts', []): display_xml_script(s, out=out) out.write('</port>\n') out.write('</ports>\n') if 'scripts' in h: out.write('<hostscript>') for s in h['scripts']: display_xml_script(s, out=out) out.write('</hostscript>') for trace in h.get('traces', []): out.write('<trace') if 'port' in trace: out.write(' port=%s' % (saxutils.quoteattr(str(trace['port'])))) if 'protocol' in trace: out.write(' proto=%s' % (saxutils.quoteattr(trace['protocol']))) out.write('>\n') for hop in sorted(trace.get('hops', []), key=lambda hop: hop['ttl']): out.write('<hop') if 'ttl' in hop: out.write(' ttl=%s' % (saxutils.quoteattr(str(hop['ttl'])))) if 'ipaddr' in hop: out.write(' ipaddr=%s' % (saxutils.quoteattr(utils.int2ip(hop['ipaddr'])))) if 'rtt' in hop: out.write( ' rtt=%s' % (saxutils.quoteattr('%.2f' % hop['rtt'] if isinstance( hop['rtt'], float) else hop['rtt']))) if 'host' in hop: out.write(' host=%s' % (saxutils.quoteattr(hop['host']))) out.write('/>\n') out.write('</trace>\n') out.write('</host>\n')
def disp_recs_short(flt): for addr in db.passive.distinct('addr', flt=flt): print(utils.force_int2ip(addr))
def display_short(db, flt, srt, lmt, skp): for val in db.distinct("addr", flt=flt, sort=srt, limit=lmt, skip=skp): sys.stdout.write(utils.force_int2ip(val) + '\n')