コード例 #1
0
    def test01RunScanners(self):
        """ Running Logical Index Scanner """
        ## Make sure the word secret is in there.
        pdbh = DB.DBO()
        pdbh.execute("select * from dictionary where word='secret' limit 1")
        row = pdbh.fetch()
        if not row:
            pdbh.insert('dictionary', **{'word':'secret', 'class':'English', 'type':'word'})
        
        env = pyflagsh.environment(case=self.test_case)
        pyflagsh.shell_execv(env=env, command="scan",
                             argv=["*",'IndexScan'])

        dbh = DB.DBO(self.test_case)
        dbh2 = DB.DBO(self.test_case)
        fsfd = DBFS(self.test_case)
        dbh.execute("select inode_id, word,offset,length from LogicalIndexOffsets join %s.dictionary on LogicalIndexOffsets.word_id=%s.dictionary.id where word='secret'", (config.FLAGDB,config.FLAGDB))
        count = 0
        for row in dbh:
            count += 1
            path, inode, inode_id = fsfd.lookup(inode_id = row['inode_id'])
            fd = fsfd.open(inode=inode)
            fd.overread = True
            fd.slack = True
            fd.seek(row['offset'])
            data = fd.read(row['length'])
            print "Looking for %s: Found in %s at offset %s length %s %r" % (
                row['word'], inode, row['offset'], row['length'],data)
            self.assertEqual(data.lower(), row['word'].lower())

        ## Did we find all the secrets?
        self.assertEqual(count,2)
コード例 #2
0
ファイル: LoadData.py プロジェクト: olivierh59500/pyflag
    def test03MultipleSources(self):
        """ Test that multiple images can be loaded on the same VFS """
        pyflagsh.shell_execv(command="execute",
                             argv=["Load Data.Load IO Data Source",'case=%s' % self.test_case,
                                   "iosource=second_image",
                                   "subsys=EWF",
                                   "filename=ntfs_image.e01" ,
                                   ])
        pyflagsh.shell_execv(command="execute",
                             argv=["Load Data.Load Filesystem image",'case=%s' % self.test_case,
                                   "iosource=second_image",
                                   "fstype=Sleuthkit",
                                   "mount_point=/ntfsimage/"])

        ## Try to read a file from the first source:
        fsfd = DBFS(self.test_case)
        fd = fsfd.open("/stdimage/dscf1081.jpg")
        m = hashlib.md5()
        m.update(fd.read())
        self.assertEqual(m.hexdigest(),'11bec410aebe0c22c14f3eaaae306f46')

        ## Try to read a file from the second source:
        fd = fsfd.open("/ntfsimage/Books/80day11.txt")
        m = hashlib.md5()
        m.update(fd.read())
        self.assertEqual(m.hexdigest(),'f5b394b5d0ca8c9ce206353e71d1d1f2')
コード例 #3
0
    def display(self, query, result):
        filenames = query.getarray('filename')
        print "Openning AFF4 volumes %s" % (filenames, )
        result.heading("Loading AFF4 Volumes")
        fsfd = DBFS(query['case'])

        for filename in filenames:
            ## Filenames are always specified relative to the upload
            ## directory
            urn = pyaff4.RDFURN()
            urn.set(config.UPLOADDIR)
            urn.add(filename)

            ## We try to load the volume contained in the URI given,
            ## but if that fails we just load the URI as a raw file:
            if not oracle.load(urn):
                fsfd.VFSCreate(urn, urn.parser.query, _fast=True, mode=-1)
                return

            stream_urn = pyaff4.RDFURN()
            iter = oracle.get_iter(urn, pyaff4.AFF4_CONTAINS)
            while oracle.iter_next(iter, stream_urn):
                result.row("Adding %s" % stream_urn.value)

                ## FIXME - what kind of objects do we import?
                ## Segments might be too much
                fsfd.VFSCreate(stream_urn,
                               stream_urn.parser.query,
                               _fast=True,
                               mode=-1)

        return

        ## FIXME - record the fact that these volumes are loaded
        ## already into this case...

        ## Load all the objects inside the volumes
        for v in loaded_volumes:
            for urn in aff4.oracle.resolve_list(v, AFF4_CONTAINS):
                type = aff4.oracle.resolve(urn, AFF4_TYPE)
                if 1 or type in SUPPORTED_STREAMS:
                    if "/" in urn:
                        path = "%s/%s" % (base_dir, urn[urn.index("/"):])
                    else:
                        path = base_dir

                    fsfd.VFSCreate(urn, path, _fast=True, mode=-1)
コード例 #4
0
ファイル: tests.py プロジェクト: johnmccabe/pyflag
 def setUp(self):
     self.fs = DBFS(self.test_case)
     self.fd = self.fs.open(inode=self.test_inode)