def scan(self, fd, scanners, type, mime, cookie, scores=None, **args): if 'x86 boot sector' in type: try: parts = sk.mmls(fd) except IOError,e: print e return for part in parts: ## Make a unique and sensible name for this partition name = "%s @ 0x%X" % (part[2], part[0]) ## Add new maps for each partition map = CacheManager.AFF4_MANAGER.create_cache_map( fd.case, "%s/%s" % (fd.urn.parser.query, name)) map.write_from(fd.urn, SECTOR_SIZE * part[0], SECTOR_SIZE * part[1]) map.close() ## Now we recursively scan each object fsfd = FileSystem.DBFS(fd.case) new_fd = fsfd.open(inode_id = map.inode_id) try: fs = sk.skfs(new_fd) fs.close() ## Lets add a hint Magic.set_magic(fd.case, inode_id = map.inode_id, mime = "application/filesystem", magic = "Filesystem") except: pass Scanner.scan_inode_distributed(fd.case, map.inode_id, scanners, cookie)
def Callback(mode, packet, connection): if mode == 'est': if 'map' not in connection: ## Lookup the urn this packet came from urn = urn_dispatcher[packet.pcap_file_id] ip = packet.find_type("IP") ## We can only get tcp or udp packets here try: tcp = packet.find_type("TCP") except AttributeError: tcp = packet.find_type("UDP") base_urn = "/%s-%s/%s-%s/" % ( ip.source_addr, ip.dest_addr, tcp.source, tcp.dest) timestamp = pyaff4.XSDDatetime() timestamp.set(packet.ts_sec) map_stream = CacheManager.AFF4_MANAGER.create_cache_map( case, base_urn + "forward", timestamp = timestamp, target = urn) connection['map'] = map_stream ## These streams are used to point at the start of ## each packet header - this helps us get back to ## the packet information for each bit of data map_stream_pkt = CacheManager.AFF4_MANAGER.create_cache_map( case, base_urn + "forward.pkt", timestamp = timestamp, target = urn, inherited = map_stream.urn) connection['map.pkt'] = map_stream_pkt r_map_stream = CacheManager.AFF4_MANAGER.create_cache_map( case, base_urn + "reverse", timestamp = timestamp, target = urn, inherited = map_stream.urn) connection['reverse']['map'] = r_map_stream ## These streams are used to point at the start of ## each packet header - this helps us get back to ## the packet information for each bit of data r_map_stream_pkt = CacheManager.AFF4_MANAGER.create_cache_map( case, base_urn + "reverse.pkt", timestamp = timestamp, target = urn, inherited = r_map_stream.urn) connection['reverse']['map.pkt'] = r_map_stream_pkt ## Add to connection table map_stream.insert_to_table("connection_details", dict(reverse = r_map_stream.inode_id, src_ip = ip.src, src_port = tcp.source, dest_ip = ip.dest, dest_port = tcp.dest, _ts_sec = "from_unixtime(%s)" % packet.ts_sec, ) ) elif mode == 'data': try: tcp = packet.find_type("TCP") except AttributeError: tcp = packet.find_type("UDP") try: length = len(tcp.data) except: return urn = urn_dispatcher[packet.pcap_file_id] if packet.offset==0: pdb.set_trace() connection['map'].write_from(urn, packet.offset + tcp.data_offset, length) connection['map.pkt'].write_from(urn, packet.offset, length) elif mode == 'destroy': if connection['map'].size > 0 or connection['reverse']['map'].size > 0: map_stream = connection['map'] r_map_stream = connection['reverse']['map'] map_stream_pkt = connection['map.pkt'] Magic.set_magic(case, map_stream_pkt.inode_id, "Packet Map") r_map_stream_pkt = connection['reverse']['map.pkt'] Magic.set_magic(case, r_map_stream_pkt.inode_id, "Packet Map") r_map_stream.set_attribute(PYFLAG_REVERSE_STREAM, map_stream.urn) map_stream.set_attribute(PYFLAG_REVERSE_STREAM, r_map_stream.urn) ## Close all the streams r_map_stream_pkt.close() map_stream_pkt.close() r_map_stream.close() map_stream.close() ## FIXME - this needs to be done out of process using ## the distributed architecture!!! ## Open read only versions of these streams for ## scanning dbfs = FileSystem.DBFS(case) map_stream = dbfs.open(inode_id = map_stream.inode_id) r_map_stream = dbfs.open(inode_id = r_map_stream.inode_id) Scanner.scan_inode_distributed(case, map_stream.inode_id, scanners, cookie) Scanner.scan_inode_distributed(case, r_map_stream.inode_id, scanners, cookie)