Example #1
0
    def test03MultipleSources(self):
        """ Test that multiple images can be loaded on the same VFS """
        pyflagsh.shell_execv(
            command="execute",
            argv=[
                "Load Data.Load IO Data Source",
                "case=%s" % self.test_case,
                "iosource=second_image",
                "subsys=EWF",
                "filename=ntfs_image.e01",
            ],
        )
        pyflagsh.shell_execv(
            command="execute",
            argv=[
                "Load Data.Load Filesystem image",
                "case=%s" % self.test_case,
                "iosource=second_image",
                "fstype=Sleuthkit",
                "mount_point=/ntfsimage/",
            ],
        )

        ## Try to read a file from the first source:
        fsfd = DBFS(self.test_case)
        fd = fsfd.open("/stdimage/dscf1081.jpg")
        m = hashlib.md5()
        m.update(fd.read())
        self.assertEqual(m.hexdigest(), "11bec410aebe0c22c14f3eaaae306f46")

        ## Try to read a file from the second source:
        fd = fsfd.open("/ntfsimage/Books/80day11.txt")
        m = hashlib.md5()
        m.update(fd.read())
        self.assertEqual(m.hexdigest(), "f5b394b5d0ca8c9ce206353e71d1d1f2")
Example #2
0
    def test01RunScanners(self):
        """ Running Logical Index Scanner """
        ## Make sure the word secret is in there.
        pdbh = DB.DBO()
        pdbh.execute("select * from dictionary where word='secret' limit 1")
        row = pdbh.fetch()
        if not row:
            pdbh.insert('dictionary', **{'word':'secret', 'class':'English', 'type':'word'})
        
        env = pyflagsh.environment(case=self.test_case)
        pyflagsh.shell_execv(env=env, command="scan",
                             argv=["*",'IndexScan'])

        dbh = DB.DBO(self.test_case)
        dbh2 = DB.DBO(self.test_case)
        fsfd = DBFS(self.test_case)
        dbh.execute("select inode_id, word,offset,length from LogicalIndexOffsets join %s.dictionary on LogicalIndexOffsets.word_id=%s.dictionary.id where word='secret'", (config.FLAGDB,config.FLAGDB))
        count = 0
        for row in dbh:
            count += 1
            path, inode, inode_id = fsfd.lookup(inode_id = row['inode_id'])
            fd = fsfd.open(inode=inode)
            fd.overread = True
            fd.slack = True
            fd.seek(row['offset'])
            data = fd.read(row['length'])
            print "Looking for %s: Found in %s at offset %s length %s %r" % (
                row['word'], inode, row['offset'], row['length'],data)
            self.assertEqual(data.lower(), row['word'].lower())

        ## Did we find all the secrets?
        self.assertEqual(count,2)
Example #3
0
    def test03MultipleSources(self):
        """ Test that multiple images can be loaded on the same VFS """
        pyflagsh.shell_execv(command="execute",
                             argv=["Load Data.Load IO Data Source",'case=%s' % self.test_case,
                                   "iosource=second_image",
                                   "subsys=EWF",
                                   "filename=ntfs_image.e01" ,
                                   ])
        pyflagsh.shell_execv(command="execute",
                             argv=["Load Data.Load Filesystem image",'case=%s' % self.test_case,
                                   "iosource=second_image",
                                   "fstype=Sleuthkit",
                                   "mount_point=/ntfsimage/"])

        ## Try to read a file from the first source:
        fsfd = DBFS(self.test_case)
        fd = fsfd.open("/stdimage/dscf1081.jpg")
        m = hashlib.md5()
        m.update(fd.read())
        self.assertEqual(m.hexdigest(),'11bec410aebe0c22c14f3eaaae306f46')

        ## Try to read a file from the second source:
        fd = fsfd.open("/ntfsimage/Books/80day11.txt")
        m = hashlib.md5()
        m.update(fd.read())
        self.assertEqual(m.hexdigest(),'f5b394b5d0ca8c9ce206353e71d1d1f2')
Example #4
0
 def guess(self, fd, result, metadata):
     """ We need to see if its a PCAP file """
     DBFS.guess(self, fd, result, metadata)
     if 'tcpdump' in metadata['magic']:
         result.row("Selecting PCAP Virtual Filesystem automatically" ,**{'colspan':50,'class':'hilight'})
         return 120
     else:
         return -1
Example #5
0
    def run(self,case, inode, scanners, *args):
        factories = Scanner.get_factories(case, scanners.split(","))

        if factories:
            ddfs = DBFS(case)
            fd = ddfs.open(inode = inode)
            Scanner.scanfile(ddfs, fd, factories)
            fd.close()
Example #6
0
 def guess(self, fd, result, metadata):
     """ We can only really handle mounted IO sources, we cant
     actually handle anything else
     """
     DBFS.guess(self, fd, result, metadata)
     if fd.__class__.__name__ == "Mounted":
         return 120
     else:
         return -1
Example #7
0
 def guess(self, fd, result, metadata):
     """ We can only really handle mounted IO sources, we cant
     actually handle anything else
     """
     DBFS.guess(self, fd, result, metadata)
     if fd.__class__.__name__ == 'Mounted':
         return 120
     else:
         return -1
Example #8
0
def get_evidence_tz_name(case, fd):
    """ return the name of the timezone for the given piece of evidence """
    try:
        tz = fd.gettz()
        return tz
    except AttributeError:
        pass

    ## fd is not an a File descendant, it could be a cached file
    ddfs = DBFS(case)
    fd2 = ddfs.open(inode = basename(fd.name))
    return fd2.gettz()
Example #9
0
 def test03ReadNTFSCompressed(self):
     """ Test reading a compressed NTFS file """
     self.fsfd = DBFS(self.test_case)
     fd = self.fsfd.open("/Books/80day11.txt")
     m = hashlib.md5()
     m.update(fd.read())
     self.assertEqual(m.hexdigest(),'f5b394b5d0ca8c9ce206353e71d1d1f2')
Example #10
0
    def display(self, query, result):
        filenames = query.getarray('filename')
        print "Openning AFF4 volumes %s" % (filenames, )
        result.heading("Loading AFF4 Volumes")
        fsfd = DBFS(query['case'])

        for filename in filenames:
            ## Filenames are always specified relative to the upload
            ## directory
            urn = pyaff4.RDFURN()
            urn.set(config.UPLOADDIR)
            urn.add(filename)

            ## We try to load the volume contained in the URI given,
            ## but if that fails we just load the URI as a raw file:
            if not oracle.load(urn):
                fsfd.VFSCreate(urn, urn.parser.query, _fast=True, mode=-1)
                return

            stream_urn = pyaff4.RDFURN()
            iter = oracle.get_iter(urn, pyaff4.AFF4_CONTAINS)
            while oracle.iter_next(iter, stream_urn):
                result.row("Adding %s" % stream_urn.value)

                ## FIXME - what kind of objects do we import?
                ## Segments might be too much
                fsfd.VFSCreate(stream_urn,
                               stream_urn.parser.query,
                               _fast=True,
                               mode=-1)

        return

        ## FIXME - record the fact that these volumes are loaded
        ## already into this case...

        ## Load all the objects inside the volumes
        for v in loaded_volumes:
            for urn in aff4.oracle.resolve_list(v, AFF4_CONTAINS):
                type = aff4.oracle.resolve(urn, AFF4_TYPE)
                if 1 or type in SUPPORTED_STREAMS:
                    if "/" in urn:
                        path = "%s/%s" % (base_dir, urn[urn.index("/"):])
                    else:
                        path = base_dir

                    fsfd.VFSCreate(urn, path, _fast=True, mode=-1)
Example #11
0
class SKFSTests2(tests.FDTest):
    """ Test Sleuthkit file like object for compressed files """
    test_case = "PyFlagNTFSTestCase"
    test_file = "/Books/80day11.txt"

    def setUp(self):
        self.fs = DBFS(self.test_case)
        self.fd = self.fs.open(self.test_file)
Example #12
0
 def test02ReadNTFSFile(self):
     """ Test reading a regular NTFS file """
     self.fsfd = DBFS(self.test_case)
     ## This file is Images/250px-Holmes_by_Paget.jpg
     fd = self.fsfd.open(inode='Itest|K33-128-4')
     data = fd.read()
     m = hashlib.md5()
     m.update(data)
     self.assertEqual(m.hexdigest(),'f9c4ea83dfcdcf5eb441e130359f4a0d')
Example #13
0
    def load(self, mount_point, iosource_name, scanners=None, directory = None):
        ## Ensure that mount point is normalised:
        mount_point = posixpath.normpath(mount_point)
        DBFS.load(self, mount_point, iosource_name)

        ## Just add a single inode:
        inode = "I%s|o0" % iosource_name
        self.VFSCreate(None,inode, "%s/raw_filesystem" % mount_point)

        ## Call the scanners on the new inode
        if scanners:
            scanner_string = ",".join(scanners)
            pdbh = DB.DBO()
            pdbh.insert('jobs',
                        command = 'Scan',
                        arg1 = self.case,
                        arg2 = inode,
                        arg3= scanner_string,                       
                        cookie = int(time.time()),
                        _fast = True,
                        )
Example #14
0
class FDTest(unittest.TestCase):
    ## These must be overridden with a file which is at least 100
    ## bytes long
    test_case = ""
    test_inode = ""

    def setUp(self):
        self.fs = DBFS(self.test_case)
        self.fd = self.fs.open(inode=self.test_inode)

    def test01HaveValidSize(self):
        """ Test for valid size """
        self.assert_(self.fd, "No fd found")
        size = self.fd.size
        ## Go to the end:
        self.fd.seek(0, 2)
        self.assert_(size != 0, "Size is zero")
        self.assertEqual(self.fd.tell(), size,
                         "Seek to end of file does not agree with size")

    def test02ReadingTests(self):
        """ Test reading ranges """
        ## Note we assume the file is at least 100 bytes long...
        self.fd.seek(0)
        data = self.fd.read(100)
        self.assertEqual(
            len(data), 100,
            "Data length read does not agree with read - or file too short?")
        self.assertEqual(self.fd.tell(), 100, "Seek after read does not agree")

        ## Check seeking and reading:
        self.fd.seek(50, 0)
        self.assertEqual(self.fd.tell(), 50)
        self.assertEqual(data[50:], self.fd.read(50),
                         "Seek and read does not agree")

    def test03SeekingTests(self):
        """ Test seeking """
        self.fd.seek(50)
        self.assertEqual(self.fd.tell(), 50,
                         "Absolute Seek does not agree with tell")

        ## Relative seeking:
        self.fd.seek(50, 1)
        self.assertEqual(self.fd.tell(), 100, "Relative seek does not agree")

        ## Seeking before the start of file should raise
        self.assertRaises(IOError, lambda: self.fd.seek(-5000, 1))

        ## Check that a read at the end returns zero:
        self.fd.seek(0, 2)
        self.assertEqual(self.fd.read(), '', "Read data past end of file")
Example #15
0
    def load(self, mount_point, iosource_name, scanners=None, directory=None):
        ## Ensure that mount point is normalised:
        mount_point = posixpath.normpath(mount_point)
        DBFS.load(self, mount_point, iosource_name)

        ## Just add a single inode:
        inode = "I%s|o0" % iosource_name
        self.VFSCreate(None, inode, "%s/raw_filesystem" % mount_point)

        ## Call the scanners on the new inode
        if scanners:
            scanner_string = ",".join(scanners)
            pdbh = DB.DBO()
            pdbh.insert(
                "jobs",
                command="Scan",
                arg1=self.case,
                arg2=inode,
                arg3=scanner_string,
                cookie=int(time.time()),
                _fast=True,
            )
Example #16
0
class FDTest(unittest.TestCase):
    ## These must be overridden with a file which is at least 100
    ## bytes long
    test_case = ""
    test_inode = ""
    
    def setUp(self):
        self.fs = DBFS(self.test_case)
        self.fd = self.fs.open(inode=self.test_inode)

    def test01HaveValidSize(self):
        """ Test for valid size """
        self.assert_(self.fd,"No fd found")
        size=self.fd.size
        ## Go to the end:
        self.fd.seek(0,2)
        self.assert_(size != 0, "Size is zero")
        self.assertEqual(self.fd.tell(),size,"Seek to end of file does not agree with size")

    def test02ReadingTests(self):
        """ Test reading ranges """
        ## Note we assume the file is at least 100 bytes long...
        self.fd.seek(0)
        data = self.fd.read(100)
        self.assertEqual(len(data),100, "Data length read does not agree with read - or file too short?")
        self.assertEqual(self.fd.tell(),100, "Seek after read does not agree")

        ## Check seeking and reading:
        self.fd.seek(50,0)
        self.assertEqual(self.fd.tell(), 50)
        self.assertEqual(data[50:], self.fd.read(50), "Seek and read does not agree")

    def test03SeekingTests(self):
        """ Test seeking """
        self.fd.seek(50)
        self.assertEqual(self.fd.tell(),50,"Absolute Seek does not agree with tell")

        ## Relative seeking:
        self.fd.seek(50,1)
        self.assertEqual(self.fd.tell(),100,"Relative seek does not agree")

        ## Seeking before the start of file should raise
        self.assertRaises(IOError, lambda : self.fd.seek(-5000,1))
        
        ## Check that a read at the end returns zero:
        self.fd.seek(0,2)
        self.assertEqual(self.fd.read(),'', "Read data past end of file")
Example #17
0
    def load(self, mount_point, iosource_name,scanners = None):
        DBFS.load(self, mount_point, iosource_name)
        
        ## Open the file descriptor
        self.fd = IO.open(self.case, iosource_name)

        ## Use the C implementation to read the pcap files:
        pcap_file = pypcap.PyPCAP(self.fd)

        ## Build our streams:
        pyflaglog.log(pyflaglog.DEBUG, "Reassembling streams, this might take a while")
        pcap_dbh = DB.DBO(self.case)
        pcap_dbh.mass_insert_start("pcap")

        pcap_dbh.execute("select max(id) as m from pcap")
        max_id = pcap_dbh.fetch()['m'] or 0
        cookie, processor = self.make_processor(iosource_name, scanners)

        ## Process the file with it:
        while 1:
            try:
                packet = pcap_file.dissect()
                max_id += 1

                ## FIXME - this is a bottleneck. For now we use mass
                ## insert but this will break when we have multiple
                ## concurrent loaders.  Record the packet in the pcap
                ## table:
                args = dict(
                    iosource = iosource_name,
                    offset = packet.offset,
                    length = packet.caplen,
                    _ts_sec =  "from_unixtime('%s')" % packet.ts_sec,
                    ts_usec = packet.ts_usec,
                    )

                ## Try to insert the ipid field
                try:
                    args['ipid']= packet.root.eth.payload.id
                except: pass

                pcap_dbh.mass_insert(**args)
                #pcap_id = pcap_dbh.autoincrement()
                pcap_id = max_id
                pcap_file.set_id(pcap_id)

                
                ## Some progress reporting
                if pcap_id % 10000 == 0:
                    pyflaglog.log(pyflaglog.DEBUG, "processed %s packets (%s bytes)" % (pcap_id, packet.offset))

                processor.process(packet)
            except StopIteration:
                break

        processor.flush()

        pcap_dbh.check_index("connection_details",'src_ip')
        pcap_dbh.check_index("connection_details",'src_port')
        pcap_dbh.check_index("connection_details",'dest_ip')
        pcap_dbh.check_index("connection_details",'dest_port')
        pcap_dbh.check_index('connection_details','inode_id')
Example #18
0
 def setUp(self):
     self.fs = DBFS(self.test_case)
     self.fd = self.fs.open(inode=self.test_inode)
Example #19
0
    def load(self, mount_point, iosource_name, scanners=None, directory=None):
        DBFS.load(self, mount_point, iosource_name)
        iosrc = self.iosource
        path = iosrc.directory
        if not path.startswith(posixpath.normpath(config.UPLOADDIR)):
            path = FlagFramework.sane_join(config.UPLOADDIR, path)

        path = path.encode("ascii", "ignore")
        pyflaglog.log(pyflaglog.DEBUG, "Loading files from directory %s" % path)
        dbh_file = DB.DBO(self.case)
        dbh_file.mass_insert_start("file")

        dbh_inode = DB.DBO(self.case)
        dbh_inode.mass_insert_start("inode")

        if scanners:
            scanner_string = ",".join(scanners)
            pdbh = DB.DBO()
            pdbh.mass_insert_start("jobs")
            cookie = int(time.time())

        ## This deals with a mounted filesystem - we dont get the full
        ## forensic joy, but we can handle more filesystems than
        ## sleuthkit can.  The downside is that the user has to mount
        ## the filesystem first, we also need to be running as root or
        ## we may not be able to stat all the files :-(
        def insert_into_table(mode, root, name):
            rel_root = FlagFramework.normpath(DB.expand("%s/%s/", (mount_point, root[len(path) :])))
            try:
                s = os.lstat(os.path.join(root, name))
            except OSError:
                pyflaglog.log(
                    pyflaglog.WARNING, DB.expand("Unable to stat %s - mount the directory with the uid option", root)
                )
                return

            inode = DB.expand("I%s|M%s", (iosource_name, s.st_ino))
            dbh_inode.insert(
                "inode",
                inode=inode,
                uid=s.st_uid,
                gid=s.st_gid,
                _mtime="from_unixtime(%s)" % s.st_mtime,
                _atime="from_unixtime(%s)" % s.st_atime,
                _ctime="from_unixtime(%s)" % s.st_ctime,
                status="alloc",
                mode=str(oct(s.st_mode)),
                size=s.st_size,
                _fast=True,
            )
            inode_id = dbh_inode.autoincrement()

            dbh_file.mass_insert(inode_id=inode_id, inode=inode, mode=mode, status="alloc", path=rel_root, name=name)

            ## If needed schedule inode for scanning:
            if scanners and mode == "r/r":
                pdbh.mass_insert(command="Scan", arg1=self.case, arg2=inode, arg3=scanner_string, cookie=cookie)

            ## Fixme - handle symlinks
            try:
                link = os.readlink(DB.expand("%s/%s", (root, name)).encode("utf8"))
            except OSError:
                link = ""

        #            dbh.execute("insert into inode_%s set inode='M%s',uid=%r,gid=%r, mtime=%r,atime=%r,ctime=%r,mode=%r,links=%r,link=%r,size=%r",(self.table,s.st_ino,s.st_uid,s.st_gid,s.st_mtime,s.st_atime,s.st_ctime,str(oct(s.st_mode))[1:],s.st_nlink,link,s.st_size))

        ## Just walk over all the files and stat them all building the tables.
        for root, dirs, files in os.walk(path):
            for name in dirs:
                insert_into_table("d/d", root, name)
            for name in files:
                insert_into_table("r/r", root, name)

        dbh_file.mass_insert_commit()
        dbh_inode.mass_insert_commit()
Example #20
0
    def load(self, mount_point, iosource_name, scanners=None, directory=None):
        ## Ensure that mount point is normalised:
        self.iosource_name = iosource_name
        mount_point = os.path.normpath(mount_point)
        self.mount_point = mount_point

        DBFS.load(self, mount_point, iosource_name)

        # open the iosource
        iosrc = IO.open(self.case, iosource_name)

        ## Get a db handle
        dbh = DB.DBO(self.case)
        dbh.mass_insert_start('tasks')

        (addr_space, symtab, types) = load_and_identify_image(iosrc)
        self.load_open_files(dbh, addr_space, types, symtab)

        ## process_list should probably be a generator here (or not,
        ## the list is unlikely to be that big)
        for task in process_list(addr_space, types, symtab):
            ## Skip invalid tasks (This should probably be done in
            ## process_list itself so it doesnt yield rubbish)
            if not addr_space.is_valid_address(task): continue
            pid = process_pid(addr_space, types, task) or -1
            create_time = process_create_time(addr_space, types, task)

            task_info = {
                'iosource':
                iosource_name,
                'image_file_name':
                process_imagename(addr_space, types, task) or "UNKNOWN",
                'pid':
                pid,
                'offset':
                task,
                'active_threads':
                process_num_active_threads(addr_space, types, task) or -1,
                'inherited_from':
                process_inherited_from(addr_space, types, task) or -1,
                'handle_count':
                process_handle_count(addr_space, types, task) or -1,
                '_create_time':
                "from_unixtime('%s')" % create_time
            }

            ## Put the data in the db
            dbh.mass_insert(**task_info)

            ## Create some VFS nodes:
            new_inode = "I%s|N%s" % (iosource_name, task)
            inode_id = self.VFSCreate(None,
                                      new_inode,
                                      "%s/%s/exe" %
                                      (mount_point, task_info['pid']),
                                      _mtime=create_time,
                                      link=task_info['image_file_name'],
                                      _fast=True)

            ## Try to read the PEB:
            peb = process_peb(addr_space, types, task)
            process_address_space = process_addr_space(addr_space, types, task,
                                                       None)
            command_line = process_command_line(process_address_space, types,
                                                peb)
            if command_line:
                dbh.insert('xattr',
                           inode_id=inode_id,
                           property="command_line",
                           value=command_line,
                           _fast=True)

            if peb:
                modules = process_ldrs(process_address_space, types, peb)
                for module in modules:
                    if not process_address_space.is_valid_address(module):
                        continue
                    path = module_path(process_address_space, types, module)
                    base = module_base(process_address_space, types,
                                       module) or 0
                    size = module_size(process_address_space, types, module)

                    dbh.insert("modules",
                               iosource=iosource_name,
                               pid=pid,
                               path=path,
                               base=base,
                               _fast=True)

                    self.VFSCreate(None,
                                   None,
                                   "%s/%s/Modules/Base 0x%X" %
                                   (mount_point, task_info['pid'], base),
                                   _mtime=create_time,
                                   link=path,
                                   size=size,
                                   _fast=True)

        ## Now look for the connections:
        for connection in tcb_connections(addr_space, types, symtab):
            if not addr_space.is_valid_address(connection):
                continue

            dbh.insert("mconnections",
                       pid=connection_pid(addr_space, types, connection),
                       lport=connection_lport(addr_space, types, connection),
                       laddr=connection_laddr(addr_space, types, connection),
                       rport=connection_rport(addr_space, types, connection),
                       raddr=connection_raddr(addr_space, types, connection),
                       iosource=iosource_name,
                       _fast=True)

        ## Now do the sockets:
        for socket in open_sockets(addr_space, types, symtab):
            if not addr_space.is_valid_address(connection):
                continue

            dbh.insert("sockets",
                       pid=socket_pid(addr_space, types, socket),
                       proto=socket_protocol(addr_space, types, socket),
                       port=socket_local_port(addr_space, types, socket),
                       _create_time="from_unixtime('%s')" %
                       socket_create_time(addr_space, types, socket),
                       iosource=iosource_name)
Example #21
0
class NTFSTests(unittest.TestCase):
    """ Sleuthkit NTFS Support """
    order = 1
    test_case = "PyFlagNTFSTestCase"
    def test01LoadNTFSFileSystem(self):
        """ Test Loading of NTFS Filesystem """
        pyflagsh.shell_execv(command="execute",
                             argv=["Case Management.Remove case",'remove_case=%s' % self.test_case])

        pyflagsh.shell_execv(command="execute",
                             argv=["Case Management.Create new case",'create_case=%s' % self.test_case])

        pyflagsh.shell_execv(command="execute",
                             argv=["Load Data.Load IO Data Source",'case=%s' % self.test_case,
                                   "iosource=test",
                                   "subsys=EWF",
                                   "filename=ntfs_image.e01",
                                   ])
        pyflagsh.shell_execv(command="execute",
                             argv=["Load Data.Load Filesystem image",'case=%s' % self.test_case,
                                   "iosource=test",
                                   "fstype=Sleuthkit",
                                   "mount_point=/"])
        
        dbh = DB.DBO(self.test_case)
        dbh.execute("select count(*) as count from inode")
        row = dbh.fetch()
        self.assertEqual(row['count'],140)
        dbh.execute("select count(*) as count from file")
        row = dbh.fetch()
        self.assertEqual(row['count'],153)

    def test02ReadNTFSFile(self):
        """ Test reading a regular NTFS file """
        self.fsfd = DBFS(self.test_case)
        ## This file is Images/250px-Holmes_by_Paget.jpg
        fd = self.fsfd.open(inode='Itest|K33-128-4')
        data = fd.read()
        m = hashlib.md5()
        m.update(data)
        self.assertEqual(m.hexdigest(),'f9c4ea83dfcdcf5eb441e130359f4a0d')
        
    def test03ReadNTFSCompressed(self):
        """ Test reading a compressed NTFS file """
        self.fsfd = DBFS(self.test_case)
        fd = self.fsfd.open("/Books/80day11.txt")
        m = hashlib.md5()
        m.update(fd.read())
        self.assertEqual(m.hexdigest(),'f5b394b5d0ca8c9ce206353e71d1d1f2')

    def test04LocatingNTFS_ADS(self):
        """ Test for finding ADS files """
        ## Do type scanning:
        env = pyflagsh.environment(case=self.test_case)
        pyflagsh.shell_execv(env=env, command="scan",
                             argv=["*",'TypeScan'])

        dbh = DB.DBO(self.test_case)
        dbh.execute('select type.type from type,inode where type.inode_id=inode.inode_id and type like "%executable%" and inode.inode like "%33-128-7%"')
        row = dbh.fetch()

        self.assert_(row, "Executable within ADS was not found???")
Example #22
0
    def load(self, mount_point, iosource_name, scanners=None, directory = None):
        DBFS.load(self, mount_point, iosource_name)
        iosrc = self.iosource
        path = iosrc.directory
        if not path.startswith(posixpath.normpath(config.UPLOADDIR)):
            path = FlagFramework.sane_join(config.UPLOADDIR, path)

        path = path.encode("ascii","ignore")
        pyflaglog.log(pyflaglog.DEBUG,"Loading files from directory %s" % path)
        dbh_file=DB.DBO(self.case)
        dbh_file.mass_insert_start('file')
        
        dbh_inode=DB.DBO(self.case)
        dbh_inode.mass_insert_start('inode')

        if scanners:
            scanner_string = ",".join(scanners)
            pdbh = DB.DBO()
            pdbh.mass_insert_start('jobs')
            cookie = int(time.time())

        ## This deals with a mounted filesystem - we dont get the full
        ## forensic joy, but we can handle more filesystems than
        ## sleuthkit can.  The downside is that the user has to mount
        ## the filesystem first, we also need to be running as root or
        ## we may not be able to stat all the files :-(
        def insert_into_table(mode ,root ,name):
            rel_root = FlagFramework.normpath(DB.expand("%s/%s/" ,
                                                        (mount_point, root[len(path):])))
            try:
                s=os.lstat(os.path.join(root,name))
            except OSError:
                pyflaglog.log(pyflaglog.WARNING, DB.expand("Unable to stat %s - mount the directory with the uid option", root))
                return

            inode = DB.expand("I%s|M%s", (iosource_name, s.st_ino))
            dbh_inode.insert('inode',
                             inode = inode,
                             uid = s.st_uid,
                             gid = s.st_gid,
                             _mtime = "from_unixtime(%s)" % s.st_mtime,
                             _atime = "from_unixtime(%s)" % s.st_atime,
                             _ctime = "from_unixtime(%s)" % s.st_ctime,
                             status = 'alloc',
                             mode = str(oct(s.st_mode)),
                             size = s.st_size,
                             _fast=True)
            inode_id = dbh_inode.autoincrement()
            
            dbh_file.mass_insert(inode_id = inode_id,
                                 inode = inode,
                                 mode = mode,
                                 status = 'alloc',
                                 path = rel_root,
                                 name = name)

            ## If needed schedule inode for scanning:
            if scanners and mode=='r/r':
                pdbh.mass_insert(
                    command = 'Scan',
                    arg1 = self.case,
                    arg2 = inode,
                    arg3= scanner_string,
                    cookie=cookie,
                    )
                                 
            ## Fixme - handle symlinks
            try:
                link=os.readlink(DB.expand("%s/%s", (root,name)).encode("utf8"))
            except OSError:
                link=''

#            dbh.execute("insert into inode_%s set inode='M%s',uid=%r,gid=%r, mtime=%r,atime=%r,ctime=%r,mode=%r,links=%r,link=%r,size=%r",(self.table,s.st_ino,s.st_uid,s.st_gid,s.st_mtime,s.st_atime,s.st_ctime,str(oct(s.st_mode))[1:],s.st_nlink,link,s.st_size))

        ## Just walk over all the files and stat them all building the tables.
        for root, dirs, files in os.walk(path):
            for name in dirs:
                insert_into_table('d/d',root,name)
            for name in files:
                insert_into_table('r/r',root,name)

        dbh_file.mass_insert_commit()
        dbh_inode.mass_insert_commit()
Example #23
0
    def load(self, mount_point, iosource_name, scanners = None, directory=None):
        """ Loads the filesystem on mount point from iosource_name. If
        scanners are specified - generate jobs for workers as soon as
        the inodes are added. If directory is specified we only load
        the specified directory.
        """
        ## Ensure that mount point is normalised:
        mount_point = posixpath.normpath(mount_point)
        DBFS.load(self, mount_point, iosource_name)

        # open the skfs
        iosrc = self.iosource
        fs = sk.skfs(iosrc)

        dbh_file=DB.DBO(self.case)
        dbh_inode=DB.DBO(self.case)
        dbh_block=DB.DBO(self.case)
        if scanners:
            scanner_string = ",".join(scanners)
            pdbh = DB.DBO()
            pdbh.mass_insert_start('jobs')
            cookie = int(time.time())
        
        dbh_file.cursor.ignore_warnings = True
        dbh_inode.cursor.ignore_warnings = True
        dbh_block.cursor.ignore_warnings = True

        dbh_file.mass_insert_start("file")
        #dbh_inode.mass_insert_start("inode")
        dbh_block.mass_insert_start("block")

        def insert_file(inode_id, inode, type, path, name):
            path = path.decode("utf8","ignore")
            name = name.decode("utf8","ignore")
            
            inodestr = "I%s|K%s" % (iosource_name, inode)
            pathstr = "%s%s/" % (mount_point, path)

            if pathstr.startswith("//"):
                pathstr = pathstr[1:]
            if pathstr.endswith("//"):
                pathstr = pathstr[:-1]

            if inode.alloc == 0:
                allocstr = "deleted"
                type = type[:-1]+'-'
            elif inode.alloc == 1:
                allocstr = "alloc"
            elif inode.alloc == 2:
                allocstr = "realloc"

            fields = {
                "inode":inodestr,
                "mode":type,
                "status":allocstr,
                "path":pathstr,
                "name":name
            }

            if(inode_id):
            	fields['inode_id'] = inode_id

            try:
                fields["link"] = fs.readlink(inode=inode)
            except IOError:
                pass

            # insert file entry
            dbh_file.mass_insert(**fields)

        def runs(blocks):
            # converts an ordered list e.g. [1,2,3,4,7,8,9] into a list of
            # 'runs'; tuples of (start, length) e.g. [(1,4),(7,3)]
            if len(blocks) == 0:
                return

            index = 0
            start = None
            length = 1
            for i in blocks:
                if start==None:
                    start = i
                elif i==start+length:
                    length+=1
                else:
                    yield index,start,length
                    index += 1
                    start = i
                    length = 1

            yield index,start,length

        def insert_inode(inode):
            """ Inserts inode into database and returns new inode_id and a
            stat object for the newly inserted inode """
            inode_id = None

            # dont do anything for realloc inodes or those with an invalid
            # inode number. inode_id 1 is the default (dummy) entry
            #if inode.alloc == 2 or str(inode) == "0-0-0":
            if str(inode) == "0-0-0":
                return 1

            inodestr = "I%s|K%s" % (iosource_name, inode)

            if inode.alloc:
                status = 'alloc'
            else:
                status = 'deleted'

            args = dict(inode = inodestr,
                        status = status,
                        _fast = True)

            try:
                print "%r" % inode
                if inode.__str__()=="22-0-0":
                    print "found it"
                    raise IOError("foo")
                

                ## If this fails we return the default deleted Inode
                ## because we dont know anything about this inode (we
                ## dont know its run list or attributes).
                f = fs.open(inode=str(inode))
                s = fs.fstat(f)

                args.update(dict(
                    uid = s.st_uid,
                    gid = s.st_gid,
                    mode = s.st_mode,
                    links = s.st_nlink,
                    link = "",
                    size = s.st_size,
                    ))

                if s.st_mtime:
                    args['_mtime'] = "from_unixtime(%d)" % s.st_mtime
                    
                if s.st_atime:
                    args['_atime'] = "from_unixtime(%d)" % s.st_atime
                    
                if s.st_ctime:
                    args['_ctime'] = "from_unixtime(%d)" % s.st_ctime

                #insert block runs
                index = 0
                for (index, start, count) in runs(f.blocks()):
                    dbh_block.mass_insert(
                        inode = inodestr,
                        index = index,
                        block = start,
                        count = count
                    )
                #f.close()

            except IOError,e:
                pyflaglog.log(pyflaglog.WARNING, "Error creating inode: %s", e)

            dbh_inode.insert( "inode", **args)
            inode_id = dbh_inode.autoincrement()
                       
            ## If needed schedule inode for scanning:
            if scanners:
                pdbh.mass_insert(
                    command = 'Scan',
                    arg1 = self.case,
                    arg2 = inodestr,
                    arg3= scanner_string,
                    cookie=cookie,
                    )
            return inode_id
Example #24
0
 def setUp(self):
     self.fs = DBFS(self.test_case)
     self.fd = self.fs.open(inode=self.test_inode)
Example #25
0
    def load(self, mount_point, iosource_name, scanners = None, directory=None):
        ## Ensure that mount point is normalised:
        self.iosource_name = iosource_name
        mount_point = os.path.normpath(mount_point)
        self.mount_point = mount_point
        
        DBFS.load(self, mount_point, iosource_name)
        
        # open the iosource
        iosrc = IO.open(self.case, iosource_name)

        ## Get a db handle
        dbh = DB.DBO(self.case)
        dbh.mass_insert_start('tasks')
        
        (addr_space, symtab, types) = load_and_identify_image(iosrc)
        self.load_open_files(dbh, addr_space, types, symtab)

        ## process_list should probably be a generator here (or not,
        ## the list is unlikely to be that big)
        for task in process_list(addr_space, types, symtab):
            ## Skip invalid tasks (This should probably be done in
            ## process_list itself so it doesnt yield rubbish)
            if not addr_space.is_valid_address(task): continue
            pid = process_pid(addr_space, types, task) or -1
            create_time = process_create_time(addr_space, types, task)

            task_info = {
                'iosource':        iosource_name,
                'image_file_name': process_imagename(addr_space, types, task) or "UNKNOWN",
                'pid':             pid,
                'offset':          task,
                'active_threads':  process_num_active_threads(addr_space, types, task) or -1,
                'inherited_from':  process_inherited_from(addr_space, types,task) or -1,
                'handle_count':    process_handle_count(addr_space, types, task) or -1,
                '_create_time':    "from_unixtime('%s')" % create_time
                }

            ## Put the data in the db
            dbh.mass_insert(**task_info)

            ## Create some VFS nodes:
            new_inode = "I%s|N%s" % (iosource_name, task)
            inode_id = self.VFSCreate(None, new_inode,
                                      "%s/%s/exe" % (mount_point, task_info['pid']),
                                      _mtime = create_time,
                                      link = task_info['image_file_name'],
                                      _fast = True)

            ## Try to read the PEB:
            peb = process_peb(addr_space, types, task)
            process_address_space = process_addr_space(addr_space, types, task, None)
            command_line = process_command_line(process_address_space, types, peb)
            if command_line:
                dbh.insert('xattr',
                           inode_id=inode_id,
                           property = "command_line",
                           value = command_line,
                           _fast = True)

            if peb:
                modules = process_ldrs(process_address_space, types, peb)
                for module in modules:
                    if not process_address_space.is_valid_address(module): continue
                    path = module_path(process_address_space, types, module)
                    base = module_base(process_address_space, types, module) or 0
                    size = module_size(process_address_space, types, module)

                    dbh.insert("modules", iosource = iosource_name,
                               pid = pid,
                               path = path,
                               base = base,
                               _fast = True
                               )

                    self.VFSCreate(None, None,
                                   "%s/%s/Modules/Base 0x%X" % ( mount_point,
                                                                 task_info['pid'],
                                                                 base),
                                   _mtime = create_time,
                                   link = path,
                                   size = size,
                                   _fast = True)
                    
        ## Now look for the connections:
        for connection in tcb_connections(addr_space, types, symtab):
            if not addr_space.is_valid_address(connection):
                continue

            dbh.insert("mconnections",
                       pid = connection_pid(addr_space, types, connection),
                       lport   = connection_lport(addr_space, types, connection),
                       laddr   = connection_laddr(addr_space, types, connection),
                       rport   = connection_rport(addr_space, types, connection),
                       raddr   = connection_raddr(addr_space, types, connection),
                       iosource = iosource_name,
                       _fast = True)

        ## Now do the sockets:
        for socket in open_sockets(addr_space, types, symtab):
            if not addr_space.is_valid_address(connection):
                continue

            dbh.insert("sockets",
                       pid   = socket_pid(addr_space, types, socket),
                       proto = socket_protocol(addr_space, types, socket),
                       port  = socket_local_port(addr_space, types, socket),
                       _create_time  = "from_unixtime('%s')" % socket_create_time(addr_space, types, socket),
                       iosource = iosource_name
                       )
Example #26
0
 def setUp(self):
     self.fs = DBFS(self.test_case)
     self.fd = self.fs.open(self.test_file)