def load(self, mount_point, iosource_name, scanners=None, directory = None): ## Ensure that mount point is normalised: mount_point = posixpath.normpath(mount_point) DBFS.load(self, mount_point, iosource_name) ## Just add a single inode: inode = "I%s|o0" % iosource_name self.VFSCreate(None,inode, "%s/raw_filesystem" % mount_point) ## Call the scanners on the new inode if scanners: scanner_string = ",".join(scanners) pdbh = DB.DBO() pdbh.insert('jobs', command = 'Scan', arg1 = self.case, arg2 = inode, arg3= scanner_string, cookie = int(time.time()), _fast = True, )
def load(self, mount_point, iosource_name, scanners=None, directory=None): ## Ensure that mount point is normalised: mount_point = posixpath.normpath(mount_point) DBFS.load(self, mount_point, iosource_name) ## Just add a single inode: inode = "I%s|o0" % iosource_name self.VFSCreate(None, inode, "%s/raw_filesystem" % mount_point) ## Call the scanners on the new inode if scanners: scanner_string = ",".join(scanners) pdbh = DB.DBO() pdbh.insert( "jobs", command="Scan", arg1=self.case, arg2=inode, arg3=scanner_string, cookie=int(time.time()), _fast=True, )
def load(self, mount_point, iosource_name, scanners=None, directory=None): DBFS.load(self, mount_point, iosource_name) iosrc = self.iosource path = iosrc.directory if not path.startswith(posixpath.normpath(config.UPLOADDIR)): path = FlagFramework.sane_join(config.UPLOADDIR, path) path = path.encode("ascii", "ignore") pyflaglog.log(pyflaglog.DEBUG, "Loading files from directory %s" % path) dbh_file = DB.DBO(self.case) dbh_file.mass_insert_start("file") dbh_inode = DB.DBO(self.case) dbh_inode.mass_insert_start("inode") if scanners: scanner_string = ",".join(scanners) pdbh = DB.DBO() pdbh.mass_insert_start("jobs") cookie = int(time.time()) ## This deals with a mounted filesystem - we dont get the full ## forensic joy, but we can handle more filesystems than ## sleuthkit can. The downside is that the user has to mount ## the filesystem first, we also need to be running as root or ## we may not be able to stat all the files :-( def insert_into_table(mode, root, name): rel_root = FlagFramework.normpath(DB.expand("%s/%s/", (mount_point, root[len(path) :]))) try: s = os.lstat(os.path.join(root, name)) except OSError: pyflaglog.log( pyflaglog.WARNING, DB.expand("Unable to stat %s - mount the directory with the uid option", root) ) return inode = DB.expand("I%s|M%s", (iosource_name, s.st_ino)) dbh_inode.insert( "inode", inode=inode, uid=s.st_uid, gid=s.st_gid, _mtime="from_unixtime(%s)" % s.st_mtime, _atime="from_unixtime(%s)" % s.st_atime, _ctime="from_unixtime(%s)" % s.st_ctime, status="alloc", mode=str(oct(s.st_mode)), size=s.st_size, _fast=True, ) inode_id = dbh_inode.autoincrement() dbh_file.mass_insert(inode_id=inode_id, inode=inode, mode=mode, status="alloc", path=rel_root, name=name) ## If needed schedule inode for scanning: if scanners and mode == "r/r": pdbh.mass_insert(command="Scan", arg1=self.case, arg2=inode, arg3=scanner_string, cookie=cookie) ## Fixme - handle symlinks try: link = os.readlink(DB.expand("%s/%s", (root, name)).encode("utf8")) except OSError: link = "" # dbh.execute("insert into inode_%s set inode='M%s',uid=%r,gid=%r, mtime=%r,atime=%r,ctime=%r,mode=%r,links=%r,link=%r,size=%r",(self.table,s.st_ino,s.st_uid,s.st_gid,s.st_mtime,s.st_atime,s.st_ctime,str(oct(s.st_mode))[1:],s.st_nlink,link,s.st_size)) ## Just walk over all the files and stat them all building the tables. for root, dirs, files in os.walk(path): for name in dirs: insert_into_table("d/d", root, name) for name in files: insert_into_table("r/r", root, name) dbh_file.mass_insert_commit() dbh_inode.mass_insert_commit()
def load(self, mount_point, iosource_name, scanners=None, directory = None): DBFS.load(self, mount_point, iosource_name) iosrc = self.iosource path = iosrc.directory if not path.startswith(posixpath.normpath(config.UPLOADDIR)): path = FlagFramework.sane_join(config.UPLOADDIR, path) path = path.encode("ascii","ignore") pyflaglog.log(pyflaglog.DEBUG,"Loading files from directory %s" % path) dbh_file=DB.DBO(self.case) dbh_file.mass_insert_start('file') dbh_inode=DB.DBO(self.case) dbh_inode.mass_insert_start('inode') if scanners: scanner_string = ",".join(scanners) pdbh = DB.DBO() pdbh.mass_insert_start('jobs') cookie = int(time.time()) ## This deals with a mounted filesystem - we dont get the full ## forensic joy, but we can handle more filesystems than ## sleuthkit can. The downside is that the user has to mount ## the filesystem first, we also need to be running as root or ## we may not be able to stat all the files :-( def insert_into_table(mode ,root ,name): rel_root = FlagFramework.normpath(DB.expand("%s/%s/" , (mount_point, root[len(path):]))) try: s=os.lstat(os.path.join(root,name)) except OSError: pyflaglog.log(pyflaglog.WARNING, DB.expand("Unable to stat %s - mount the directory with the uid option", root)) return inode = DB.expand("I%s|M%s", (iosource_name, s.st_ino)) dbh_inode.insert('inode', inode = inode, uid = s.st_uid, gid = s.st_gid, _mtime = "from_unixtime(%s)" % s.st_mtime, _atime = "from_unixtime(%s)" % s.st_atime, _ctime = "from_unixtime(%s)" % s.st_ctime, status = 'alloc', mode = str(oct(s.st_mode)), size = s.st_size, _fast=True) inode_id = dbh_inode.autoincrement() dbh_file.mass_insert(inode_id = inode_id, inode = inode, mode = mode, status = 'alloc', path = rel_root, name = name) ## If needed schedule inode for scanning: if scanners and mode=='r/r': pdbh.mass_insert( command = 'Scan', arg1 = self.case, arg2 = inode, arg3= scanner_string, cookie=cookie, ) ## Fixme - handle symlinks try: link=os.readlink(DB.expand("%s/%s", (root,name)).encode("utf8")) except OSError: link='' # dbh.execute("insert into inode_%s set inode='M%s',uid=%r,gid=%r, mtime=%r,atime=%r,ctime=%r,mode=%r,links=%r,link=%r,size=%r",(self.table,s.st_ino,s.st_uid,s.st_gid,s.st_mtime,s.st_atime,s.st_ctime,str(oct(s.st_mode))[1:],s.st_nlink,link,s.st_size)) ## Just walk over all the files and stat them all building the tables. for root, dirs, files in os.walk(path): for name in dirs: insert_into_table('d/d',root,name) for name in files: insert_into_table('r/r',root,name) dbh_file.mass_insert_commit() dbh_inode.mass_insert_commit()
def load(self, mount_point, iosource_name,scanners = None): DBFS.load(self, mount_point, iosource_name) ## Open the file descriptor self.fd = IO.open(self.case, iosource_name) ## Use the C implementation to read the pcap files: pcap_file = pypcap.PyPCAP(self.fd) ## Build our streams: pyflaglog.log(pyflaglog.DEBUG, "Reassembling streams, this might take a while") pcap_dbh = DB.DBO(self.case) pcap_dbh.mass_insert_start("pcap") pcap_dbh.execute("select max(id) as m from pcap") max_id = pcap_dbh.fetch()['m'] or 0 cookie, processor = self.make_processor(iosource_name, scanners) ## Process the file with it: while 1: try: packet = pcap_file.dissect() max_id += 1 ## FIXME - this is a bottleneck. For now we use mass ## insert but this will break when we have multiple ## concurrent loaders. Record the packet in the pcap ## table: args = dict( iosource = iosource_name, offset = packet.offset, length = packet.caplen, _ts_sec = "from_unixtime('%s')" % packet.ts_sec, ts_usec = packet.ts_usec, ) ## Try to insert the ipid field try: args['ipid']= packet.root.eth.payload.id except: pass pcap_dbh.mass_insert(**args) #pcap_id = pcap_dbh.autoincrement() pcap_id = max_id pcap_file.set_id(pcap_id) ## Some progress reporting if pcap_id % 10000 == 0: pyflaglog.log(pyflaglog.DEBUG, "processed %s packets (%s bytes)" % (pcap_id, packet.offset)) processor.process(packet) except StopIteration: break processor.flush() pcap_dbh.check_index("connection_details",'src_ip') pcap_dbh.check_index("connection_details",'src_port') pcap_dbh.check_index("connection_details",'dest_ip') pcap_dbh.check_index("connection_details",'dest_port') pcap_dbh.check_index('connection_details','inode_id')
def load(self, mount_point, iosource_name, scanners=None, directory=None): ## Ensure that mount point is normalised: self.iosource_name = iosource_name mount_point = os.path.normpath(mount_point) self.mount_point = mount_point DBFS.load(self, mount_point, iosource_name) # open the iosource iosrc = IO.open(self.case, iosource_name) ## Get a db handle dbh = DB.DBO(self.case) dbh.mass_insert_start('tasks') (addr_space, symtab, types) = load_and_identify_image(iosrc) self.load_open_files(dbh, addr_space, types, symtab) ## process_list should probably be a generator here (or not, ## the list is unlikely to be that big) for task in process_list(addr_space, types, symtab): ## Skip invalid tasks (This should probably be done in ## process_list itself so it doesnt yield rubbish) if not addr_space.is_valid_address(task): continue pid = process_pid(addr_space, types, task) or -1 create_time = process_create_time(addr_space, types, task) task_info = { 'iosource': iosource_name, 'image_file_name': process_imagename(addr_space, types, task) or "UNKNOWN", 'pid': pid, 'offset': task, 'active_threads': process_num_active_threads(addr_space, types, task) or -1, 'inherited_from': process_inherited_from(addr_space, types, task) or -1, 'handle_count': process_handle_count(addr_space, types, task) or -1, '_create_time': "from_unixtime('%s')" % create_time } ## Put the data in the db dbh.mass_insert(**task_info) ## Create some VFS nodes: new_inode = "I%s|N%s" % (iosource_name, task) inode_id = self.VFSCreate(None, new_inode, "%s/%s/exe" % (mount_point, task_info['pid']), _mtime=create_time, link=task_info['image_file_name'], _fast=True) ## Try to read the PEB: peb = process_peb(addr_space, types, task) process_address_space = process_addr_space(addr_space, types, task, None) command_line = process_command_line(process_address_space, types, peb) if command_line: dbh.insert('xattr', inode_id=inode_id, property="command_line", value=command_line, _fast=True) if peb: modules = process_ldrs(process_address_space, types, peb) for module in modules: if not process_address_space.is_valid_address(module): continue path = module_path(process_address_space, types, module) base = module_base(process_address_space, types, module) or 0 size = module_size(process_address_space, types, module) dbh.insert("modules", iosource=iosource_name, pid=pid, path=path, base=base, _fast=True) self.VFSCreate(None, None, "%s/%s/Modules/Base 0x%X" % (mount_point, task_info['pid'], base), _mtime=create_time, link=path, size=size, _fast=True) ## Now look for the connections: for connection in tcb_connections(addr_space, types, symtab): if not addr_space.is_valid_address(connection): continue dbh.insert("mconnections", pid=connection_pid(addr_space, types, connection), lport=connection_lport(addr_space, types, connection), laddr=connection_laddr(addr_space, types, connection), rport=connection_rport(addr_space, types, connection), raddr=connection_raddr(addr_space, types, connection), iosource=iosource_name, _fast=True) ## Now do the sockets: for socket in open_sockets(addr_space, types, symtab): if not addr_space.is_valid_address(connection): continue dbh.insert("sockets", pid=socket_pid(addr_space, types, socket), proto=socket_protocol(addr_space, types, socket), port=socket_local_port(addr_space, types, socket), _create_time="from_unixtime('%s')" % socket_create_time(addr_space, types, socket), iosource=iosource_name)
def load(self, mount_point, iosource_name, scanners = None, directory=None): """ Loads the filesystem on mount point from iosource_name. If scanners are specified - generate jobs for workers as soon as the inodes are added. If directory is specified we only load the specified directory. """ ## Ensure that mount point is normalised: mount_point = posixpath.normpath(mount_point) DBFS.load(self, mount_point, iosource_name) # open the skfs iosrc = self.iosource fs = sk.skfs(iosrc) dbh_file=DB.DBO(self.case) dbh_inode=DB.DBO(self.case) dbh_block=DB.DBO(self.case) if scanners: scanner_string = ",".join(scanners) pdbh = DB.DBO() pdbh.mass_insert_start('jobs') cookie = int(time.time()) dbh_file.cursor.ignore_warnings = True dbh_inode.cursor.ignore_warnings = True dbh_block.cursor.ignore_warnings = True dbh_file.mass_insert_start("file") #dbh_inode.mass_insert_start("inode") dbh_block.mass_insert_start("block") def insert_file(inode_id, inode, type, path, name): path = path.decode("utf8","ignore") name = name.decode("utf8","ignore") inodestr = "I%s|K%s" % (iosource_name, inode) pathstr = "%s%s/" % (mount_point, path) if pathstr.startswith("//"): pathstr = pathstr[1:] if pathstr.endswith("//"): pathstr = pathstr[:-1] if inode.alloc == 0: allocstr = "deleted" type = type[:-1]+'-' elif inode.alloc == 1: allocstr = "alloc" elif inode.alloc == 2: allocstr = "realloc" fields = { "inode":inodestr, "mode":type, "status":allocstr, "path":pathstr, "name":name } if(inode_id): fields['inode_id'] = inode_id try: fields["link"] = fs.readlink(inode=inode) except IOError: pass # insert file entry dbh_file.mass_insert(**fields) def runs(blocks): # converts an ordered list e.g. [1,2,3,4,7,8,9] into a list of # 'runs'; tuples of (start, length) e.g. [(1,4),(7,3)] if len(blocks) == 0: return index = 0 start = None length = 1 for i in blocks: if start==None: start = i elif i==start+length: length+=1 else: yield index,start,length index += 1 start = i length = 1 yield index,start,length def insert_inode(inode): """ Inserts inode into database and returns new inode_id and a stat object for the newly inserted inode """ inode_id = None # dont do anything for realloc inodes or those with an invalid # inode number. inode_id 1 is the default (dummy) entry #if inode.alloc == 2 or str(inode) == "0-0-0": if str(inode) == "0-0-0": return 1 inodestr = "I%s|K%s" % (iosource_name, inode) if inode.alloc: status = 'alloc' else: status = 'deleted' args = dict(inode = inodestr, status = status, _fast = True) try: print "%r" % inode if inode.__str__()=="22-0-0": print "found it" raise IOError("foo") ## If this fails we return the default deleted Inode ## because we dont know anything about this inode (we ## dont know its run list or attributes). f = fs.open(inode=str(inode)) s = fs.fstat(f) args.update(dict( uid = s.st_uid, gid = s.st_gid, mode = s.st_mode, links = s.st_nlink, link = "", size = s.st_size, )) if s.st_mtime: args['_mtime'] = "from_unixtime(%d)" % s.st_mtime if s.st_atime: args['_atime'] = "from_unixtime(%d)" % s.st_atime if s.st_ctime: args['_ctime'] = "from_unixtime(%d)" % s.st_ctime #insert block runs index = 0 for (index, start, count) in runs(f.blocks()): dbh_block.mass_insert( inode = inodestr, index = index, block = start, count = count ) #f.close() except IOError,e: pyflaglog.log(pyflaglog.WARNING, "Error creating inode: %s", e) dbh_inode.insert( "inode", **args) inode_id = dbh_inode.autoincrement() ## If needed schedule inode for scanning: if scanners: pdbh.mass_insert( command = 'Scan', arg1 = self.case, arg2 = inodestr, arg3= scanner_string, cookie=cookie, ) return inode_id
def load(self, mount_point, iosource_name, scanners = None, directory=None): ## Ensure that mount point is normalised: self.iosource_name = iosource_name mount_point = os.path.normpath(mount_point) self.mount_point = mount_point DBFS.load(self, mount_point, iosource_name) # open the iosource iosrc = IO.open(self.case, iosource_name) ## Get a db handle dbh = DB.DBO(self.case) dbh.mass_insert_start('tasks') (addr_space, symtab, types) = load_and_identify_image(iosrc) self.load_open_files(dbh, addr_space, types, symtab) ## process_list should probably be a generator here (or not, ## the list is unlikely to be that big) for task in process_list(addr_space, types, symtab): ## Skip invalid tasks (This should probably be done in ## process_list itself so it doesnt yield rubbish) if not addr_space.is_valid_address(task): continue pid = process_pid(addr_space, types, task) or -1 create_time = process_create_time(addr_space, types, task) task_info = { 'iosource': iosource_name, 'image_file_name': process_imagename(addr_space, types, task) or "UNKNOWN", 'pid': pid, 'offset': task, 'active_threads': process_num_active_threads(addr_space, types, task) or -1, 'inherited_from': process_inherited_from(addr_space, types,task) or -1, 'handle_count': process_handle_count(addr_space, types, task) or -1, '_create_time': "from_unixtime('%s')" % create_time } ## Put the data in the db dbh.mass_insert(**task_info) ## Create some VFS nodes: new_inode = "I%s|N%s" % (iosource_name, task) inode_id = self.VFSCreate(None, new_inode, "%s/%s/exe" % (mount_point, task_info['pid']), _mtime = create_time, link = task_info['image_file_name'], _fast = True) ## Try to read the PEB: peb = process_peb(addr_space, types, task) process_address_space = process_addr_space(addr_space, types, task, None) command_line = process_command_line(process_address_space, types, peb) if command_line: dbh.insert('xattr', inode_id=inode_id, property = "command_line", value = command_line, _fast = True) if peb: modules = process_ldrs(process_address_space, types, peb) for module in modules: if not process_address_space.is_valid_address(module): continue path = module_path(process_address_space, types, module) base = module_base(process_address_space, types, module) or 0 size = module_size(process_address_space, types, module) dbh.insert("modules", iosource = iosource_name, pid = pid, path = path, base = base, _fast = True ) self.VFSCreate(None, None, "%s/%s/Modules/Base 0x%X" % ( mount_point, task_info['pid'], base), _mtime = create_time, link = path, size = size, _fast = True) ## Now look for the connections: for connection in tcb_connections(addr_space, types, symtab): if not addr_space.is_valid_address(connection): continue dbh.insert("mconnections", pid = connection_pid(addr_space, types, connection), lport = connection_lport(addr_space, types, connection), laddr = connection_laddr(addr_space, types, connection), rport = connection_rport(addr_space, types, connection), raddr = connection_raddr(addr_space, types, connection), iosource = iosource_name, _fast = True) ## Now do the sockets: for socket in open_sockets(addr_space, types, symtab): if not addr_space.is_valid_address(connection): continue dbh.insert("sockets", pid = socket_pid(addr_space, types, socket), proto = socket_protocol(addr_space, types, socket), port = socket_local_port(addr_space, types, socket), _create_time = "from_unixtime('%s')" % socket_create_time(addr_space, types, socket), iosource = iosource_name )