예제 #1
0
    def testReadReferencedMap(self):
        ## Now we compare the mapped stream with the stream produced by SK:
        FILENAME = './referencemap.00.zip'
        mapfile = fif.FIFFile([FILENAME], None, False)
        map_logfile_stream = mapfile.open_stream_by_name("logfile1.txt")

        imagefile = fif.FIFFile(['ntfs1-gen2.00.zip'])
        image_stream = imagefile.open_stream_by_name("./images/ntfs1-gen2.dd")
        fs = sk.skfs(image_stream)
        image_sk_file = fs.open('/RAW/logfile1.txt')

        ## Do big reads - sk is very slow with compressed ntfs files
        BLOCKSIZE = 1024 * 1024
        while 1:
            test_data = map_logfile_stream.read(BLOCKSIZE)
            if len(test_data) == 0: break

            data = image_sk_file.read(BLOCKSIZE)
            if data != test_data:
                print len(data), len(test_data)
            self.assertEqual(data, test_data)
        image_sk_file.close()
        fs.close()
        image_stream.close()
        map_logfile_stream.close()
        mapfile.close()
        imagefile.close()
예제 #2
0
    def test_03_ZipVolume(self):
        """ Tests the ZipVolume implementation """
        print "Loading volume"
        z = ZipVolume(None, 'r')
        z.load_from(ResolverTests.filename)

        ## Test the stream implementation
        fd = oracle.open(fully_qualified_name("properties", z.urn))
        try:
            data = z.zf.read("properties")
            self.assertEqual(data, fd.read(1024))
        finally:
            oracle.cache_return(fd)

        stream = oracle.open(fully_qualified_name("default", z.urn))
        try:
            fd = open("output.dd")
            while 1:
                data = stream.read(1024)
                data2 = fd.read(1024)
                if not data or not data2: break

                self.assertEqual(data2, data)

            import sk
            stream.seek(0)
            fs = sk.skfs(stream)

            print fs.listdir('/')
        finally:
            oracle.cache_return(stream)
예제 #3
0
    def test_03_ZipVolume(self):
        """ Tests the ZipVolume implementation """
        print "Loading volume"
        z = ZipVolume(None, 'r')
        z.load_from(ResolverTests.filename)

        ## Test the stream implementation
        fd = oracle.open(fully_qualified_name("properties", z.urn))
        try:
            data = z.zf.read("properties")
            self.assertEqual(data,fd.read(1024))
        finally:
            oracle.cache_return(fd)
            
        stream = oracle.open(fully_qualified_name("default", z.urn))
        try:
            fd = open("output.dd")
            while 1:
                data = stream.read(1024)
                data2 = fd.read(1024)
                if not data or not data2: break

                self.assertEqual(data2, data)

            import sk
            stream.seek(0)
            fs = sk.skfs(stream)

            print fs.listdir('/')
        finally:
            oracle.cache_return(stream)
예제 #4
0
    def testReadAppendedMap(self):
        ## Now we compare the mapped stream with the stream produced by SK:
        FILENAME = './ntfs1-gen2-mapinside.00.zip'
        mapfile = fif.FIFFile([FILENAME], None, False)
        map_logfile_stream = mapfile.open_stream_by_name("logfile1.txt")

        image_stream = mapfile.open_stream_by_name("./images/ntfs1-gen2.dd")
        fs = sk.skfs(image_stream)
        image_sk_file = fs.open('/RAW/logfile1.txt')

        ## Do big reads - sk is very slow with compressed ntfs files
        BLOCKSIZE = 1024*1024
        while 1:
            test_data = map_logfile_stream.read(BLOCKSIZE)
            if len(test_data)==0: break

            data = image_sk_file.read(BLOCKSIZE)
            if data!=test_data:
                print len(data), len(test_data)
            self.assertEqual(data,test_data)
        image_sk_file.close()
        fs.close()
        image_stream.close()
        map_logfile_stream.close()
        mapfile.close()
예제 #5
0
    def scan(self, fd, scanners, type, mime, cookie, scores=None, **args):
        if 'Filesystem' in type:
            print "Will load %s" % fd.urn.value
            fs = sk.skfs(fd)

            for root, dirs, files in fs.walk('/', unalloc=True, inodes=True):
                for d, dirname in dirs:
                    self.create_map(fd, fs, d, FlagFramework.sane_join(root[1], dirname))

                for f, filename in files:
                    self.create_map(fd, fs, f, FlagFramework.sane_join(root[1], filename))
예제 #6
0
def test_time(fd):
    fs = sk.skfs(fd)
    f = fs.open('/Compressed/logfile1.txt')

    count = 0
    while 1:
        data = f.read(1024 * 1024 * 30)
        if len(data) == 0: break

        count += len(data)

    return count
예제 #7
0
    def scan(self, fd, scanners, type, mime, cookie, scores=None, **args):
        if 'Filesystem' in type:
            print "Will load %s" % fd.urn.value
            fs = sk.skfs(fd)

            for root, dirs, files in fs.walk('/', unalloc=True, inodes=True):
                for d, dirname in dirs:
                    self.create_map(fd, fs, d,
                                    FlagFramework.sane_join(root[1], dirname))

                for f, filename in files:
                    self.create_map(fd, fs, f,
                                    FlagFramework.sane_join(root[1], filename))
예제 #8
0
def test_time(fd):
    fs = sk.skfs(fd)
    f = fs.open("/Compressed/logfile1.txt")

    count = 0
    while 1:
        data = f.read(1024 * 1024 * 30)
        if len(data) == 0:
            break

        count += len(data)

    return count
예제 #9
0
    def __init__(self, case, fd, inode):
        File.__init__(self,case,fd,inode)
        
        cache_key = "%s:%s" % (self.case, self.fd.inode)
        try:
            fs = SKCACHE.get(cache_key)
        except KeyError:
            fs = sk.skfs(self.fd)
            SKCACHE.put(fs, key=cache_key)

        inode = self.inode[self.inode.find('|K')+2:]
        self.skfd = fs.open(inode=inode)
        self.skfd.seek(0,2)
        self.size = self.skfd.tell()
        self.skfd.seek(0)
        self.block_size = fs.block_size
예제 #10
0
    def setUp(self):
        shutil.copyfile('../samples/ntfs1-gen2.00.zip', './ntfs1-gen2.00.zip')
        IMAGEFILENAME = 'ntfs1-gen2.00.zip'

        fiffile = fif.FIFFile([IMAGEFILENAME])
        image = fiffile.open_stream_by_name("./images/ntfs1-gen2.dd")
        fs = sk.skfs(image)
        f = fs.open('/RAW/logfile1.txt')

        ## We want to append to the last volume:
        #fiffile.append_volume(FILENAME)
        count = 0

        mapfile = fif.FIFFile()
        new_name = "%s.%02d.zip" % ("referencemap", count)
        mapfile.create_new_volume(new_name)
        ## Create a new Map stream
        new_stream = mapfile.create_stream_for_writing(
            stream_type='aff2-storage:Map', target=image.getId())
        new_stream.properties["aff2-storage:name"] = "logfile1.txt"
        mapfile.properties["aff2-storage:containsImage"] = new_stream.getId()
        mapfile.properties[
            "aff2-storage:next_volume"] = "file://%s" % IMAGEFILENAME
        count = 0
        block_size = fs.block_size
        ## Build up the mapping function
        for block in f.blocks():
            new_stream.add_point(count * block_size, block * block_size, 0)
            count += 1

        new_stream.pack()
        f.seek(0, 2)
        new_stream.size = f.tell()
        new_stream.save_map()
        new_stream.close()
        mapfile.close()
        f.close()
        fs.close()
        image.close()
        fiffile.close()
        mapfile.close()
예제 #11
0
    def setUp(self):
        shutil.copyfile('../samples/ntfs1-gen2.00.zip', './ntfs1-gen2.00.zip')
        IMAGEFILENAME = 'ntfs1-gen2.00.zip'
        
        fiffile = fif.FIFFile([IMAGEFILENAME])
        image = fiffile.open_stream_by_name("./images/ntfs1-gen2.dd")
        fs = sk.skfs(image)
        f = fs.open('/RAW/logfile1.txt')
    
        ## We want to append to the last volume:
        #fiffile.append_volume(FILENAME)
        count = 0

        mapfile = fif.FIFFile()
        new_name = "%s.%02d.zip" % ("referencemap", count)
        mapfile.create_new_volume(new_name)
        ## Create a new Map stream
        new_stream = mapfile.create_stream_for_writing(stream_type = 'aff2-storage:Map',
                                                       target = image.getId())
        new_stream.properties["aff2-storage:name"] = "logfile1.txt"
        mapfile.properties["aff2-storage:containsImage"] = new_stream.getId()
        mapfile.properties["aff2-storage:next_volume"] = "file://%s" % IMAGEFILENAME
        count = 0
        block_size = fs.block_size
        ## Build up the mapping function
        for block in f.blocks():
            new_stream.add_point(count * block_size, block * block_size, 0)
            count += 1

        new_stream.pack()
        f.seek(0,2)
        new_stream.size = f.tell()
        new_stream.save_map()
        new_stream.close()
        mapfile.close()
        f.close()
        fs.close()
        image.close()
        fiffile.close()
        mapfile.close()
예제 #12
0
    def scan(self, fd, scanners, type, mime, cookie, scores=None, **args):
        if 'x86 boot sector' in type:
            try:
                parts = sk.mmls(fd)
            except IOError, e:
                print e
                return

            for part in parts:
                ## Make a unique and sensible name for this partition
                name = "%s @ 0x%X" % (part[2], part[0])

                ## Add new maps for each partition
                map = CacheManager.AFF4_MANAGER.create_cache_map(
                    fd.case, "%s/%s" % (fd.urn.parser.query, name))

                map.write_from(fd.urn, SECTOR_SIZE * part[0],
                               SECTOR_SIZE * part[1])

                map.close()

                ## Now we recursively scan each object
                fsfd = FileSystem.DBFS(fd.case)
                new_fd = fsfd.open(inode_id=map.inode_id)
                try:
                    fs = sk.skfs(new_fd)
                    fs.close()

                    ## Lets add a hint
                    Magic.set_magic(fd.case,
                                    inode_id=map.inode_id,
                                    mime="application/filesystem",
                                    magic="Filesystem")

                except:
                    pass

                Scanner.scan_inode_distributed(fd.case, map.inode_id, scanners,
                                               cookie)
예제 #13
0
    def scan(self, fd, scanners, type, mime, cookie, scores=None, **args):
        if 'x86 boot sector' in type:
            try:
                parts = sk.mmls(fd)
            except IOError,e:
                print e
                return

            for part in parts:
                ## Make a unique and sensible name for this partition
                name = "%s @ 0x%X" % (part[2], part[0])

                ## Add new maps for each partition
                map = CacheManager.AFF4_MANAGER.create_cache_map(
                    fd.case,
                    "%s/%s" % (fd.urn.parser.query, name))

                map.write_from(fd.urn, SECTOR_SIZE * part[0],
                               SECTOR_SIZE * part[1])

                map.close()

                ## Now we recursively scan each object
                fsfd = FileSystem.DBFS(fd.case)
                new_fd = fsfd.open(inode_id = map.inode_id)
                try:
                    fs = sk.skfs(new_fd)
                    fs.close()

                    ## Lets add a hint
                    Magic.set_magic(fd.case,
                                    inode_id = map.inode_id,
                                    mime = "application/filesystem",
                                    magic = "Filesystem")

                except: pass

                Scanner.scan_inode_distributed(fd.case, map.inode_id,
                                               scanners, cookie)
예제 #14
0
    def setUp(self):
        shutil.copyfile("../samples/ntfs1-gen2.00.zip", './ntfs1-gen2-mapinside.00.zip')
        
        MAPFILENAME = './ntfs1-gen2-mapinside.00.zip'
        fiffile = fif.FIFFile([MAPFILENAME], None, False)
        image = fiffile.open_stream_by_name("./images/ntfs1-gen2.dd")
        fs = sk.skfs(image)
        f = fs.open('/RAW/logfile1.txt')
    
        ## We want to append to the last volume:
        fiffile.append_volume(MAPFILENAME)

        ## Create a new Map stream
        new_stream = fiffile.create_stream_for_writing(
                                                   stream_type = 'aff2-storage:Map',
                                                   target = image.getId())
        new_stream.properties["aff2-storage:name"] = "logfile1.txt"
        fiffile.properties["aff2-storage:containsImage"] = new_stream.getId()
        global mappedStreamID
        mappedStreamID = new_stream.getId()
        count = 0
        block_size = fs.block_size
        ## Build up the mapping function
        for block in f.blocks():
            new_stream.add_point(count * block_size, block * block_size, 0)
            count += 1

        new_stream.pack()
        f.seek(0,2)
        new_stream.size = f.tell()
        new_stream.save_map()
        new_stream.close()
        f.close()
        fs.close()
        image.close()
        fiffile.close()
예제 #15
0
    def setUp(self):
        shutil.copyfile("../samples/ntfs1-gen2.00.zip",
                        './ntfs1-gen2-mapinside.00.zip')

        MAPFILENAME = './ntfs1-gen2-mapinside.00.zip'
        fiffile = fif.FIFFile([MAPFILENAME], None, False)
        image = fiffile.open_stream_by_name("./images/ntfs1-gen2.dd")
        fs = sk.skfs(image)
        f = fs.open('/RAW/logfile1.txt')

        ## We want to append to the last volume:
        fiffile.append_volume(MAPFILENAME)

        ## Create a new Map stream
        new_stream = fiffile.create_stream_for_writing(
            stream_type='aff2-storage:Map', target=image.getId())
        new_stream.properties["aff2-storage:name"] = "logfile1.txt"
        fiffile.properties["aff2-storage:containsImage"] = new_stream.getId()
        global mappedStreamID
        mappedStreamID = new_stream.getId()
        count = 0
        block_size = fs.block_size
        ## Build up the mapping function
        for block in f.blocks():
            new_stream.add_point(count * block_size, block * block_size, 0)
            count += 1

        new_stream.pack()
        f.seek(0, 2)
        new_stream.size = f.tell()
        new_stream.save_map()
        new_stream.close()
        f.close()
        fs.close()
        image.close()
        fiffile.close()
예제 #16
0
    parser.add_option("-S",
                      "--subsys",
                      default=None,
                      help="Subsystem to use (e.g. EWF)")

    (options, args) = parser.parse_args()

    raid_map = mapper.load_map_file(options.map, options.period)
    if options.print_map:
        mapper.pretty_print(raid_map, options.period, options.number)
        print mapper.calculate_map(raid_map, options.period, options.number)
        sys.exit(0)

    blocksize = mapper.parse_offsets(options.blocksize)

    fds = []
    for arg in args:
        if arg != "None":
            fds.append(mapper.open_image(arg, options.subsys))
        else:
            fds.append(
                mapper.ParityDisk(
                    [mapper.open_image(arg) for arg in args if arg != 'None']))

    fd = mapper.RaidReassembler(raid_map,
                                fds,
                                blocksize,
                                skip=mapper.parse_offsets(options.skip))
    skfs = sk.skfs(fd, imgoff=128 * 1024 + 512 * 63)
    print skfs.listdir("/")
예제 #17
0
#!/usr/bin/env python
# python implementation of dbtool using sk binding
# should produce the same results as dbtool (!)

import sys
import sk
import pyflag.DB as DB

img = open(sys.argv[1])
fs = sk.skfs(img)

mnt = "mnt"
ios = "Iios"

def runs(blocks):
    # converts an ordered list e.g. [1,2,3,4,7,8,9] into a list of
    # 'runs'; tuples of (start, length) e.g. [(1,4),(7,3)]
    if len(blocks) == 0:
        return

    index = 0
    start = None
    length = 1
    for i in blocks:
        if start==None:
            start = i
        elif i==start+length:
            length+=1
        else:
            yield index,start,length
            index += 1
예제 #18
0
if "://" in options.output:
    output = options.output
else:
    output = "file://%s" % options.output

stream = oracle.open(fully_qualified_name(options.stream, VOLUME))

## Make an output volume FIXME - needs to be a utility
output_volume = ZipVolume(None, 'w')
oracle.set(output_volume.urn, AFF4_STORED, output)
output_volume.finish()
output_urn = output_volume.urn
oracle.cache_return(output_volume)

try:
    fs = sk.skfs(stream)
    block_size = fs.block_size
    for root, dirs, files in fs.walk('/', unalloc=True, inodes=True):
        for f, filename in files:
            print root[1], filename
            pathname = os.path.join(root[1], filename)

            ## The maps have urns based on the path names in the filesystem:
            map_stream = Map(None, 'w')
            map_stream.urn = fully_qualified_name(pathname, output_urn)

            s = fs.stat(inode=str(f))

            ## Some of these properties should be stored
            oracle.set(map_stream.urn, AFF4_SIZE, s.st_size)
            oracle.set(map_stream.urn, AFF4_STORED, output_urn)
예제 #19
0
                      help = "print the map")

    parser.add_option('-o','--output', default="output.dd",
                      help = "Name of the output file")

    parser.add_option("-S", "--subsys",
                      default=None,
                      help="Subsystem to use (e.g. EWF)")
    
    (options, args) = parser.parse_args()

    raid_map = mapper.load_map_file(options.map, options.period)
    if options.print_map:
        mapper.pretty_print(raid_map, options.period, options.number)
        print mapper.calculate_map(raid_map, options.period, options.number)
        sys.exit(0)
        

    blocksize = mapper.parse_offsets(options.blocksize)

    fds=[]
    for arg in args:
        if arg != "None":
            fds.append(mapper.open_image(arg, options.subsys))
        else:
            fds.append(mapper.ParityDisk([mapper.open_image(arg) for arg in args if arg != 'None']))

    fd = mapper.RaidReassembler(raid_map, fds, blocksize, skip=mapper.parse_offsets(options.skip))
    skfs = sk.skfs(fd, imgoff = 128 * 1024 + 512 * 63)
    print skfs.listdir("/")
예제 #20
0
if "://" in options.output:
    output = options.output
else:
    output = "file://%s" % options.output

stream = oracle.open(fully_qualified_name(options.stream, VOLUME))

## Make an output volume FIXME - needs to be a utility
output_volume = ZipVolume(None, "w")
oracle.set(output_volume.urn, AFF4_STORED, output)
output_volume.finish()
output_urn = output_volume.urn
oracle.cache_return(output_volume)

try:
    fs = sk.skfs(stream)
    block_size = fs.block_size
    for root, dirs, files in fs.walk("/", unalloc=True, inodes=True):
        for f, filename in files:
            print root[1], filename
            pathname = os.path.join(root[1], filename)

            ## The maps have urns based on the path names in the filesystem:
            map_stream = Map(None, "w")
            map_stream.urn = fully_qualified_name(pathname, output_urn)

            s = fs.stat(inode=str(f))

            ## Some of these properties should be stored
            oracle.set(map_stream.urn, AFF4_SIZE, s.st_size)
            oracle.set(map_stream.urn, AFF4_STORED, output_urn)
예제 #21
0
    def load(self, mount_point, iosource_name, scanners = None, directory=None):
        """ Loads the filesystem on mount point from iosource_name. If
        scanners are specified - generate jobs for workers as soon as
        the inodes are added. If directory is specified we only load
        the specified directory.
        """
        ## Ensure that mount point is normalised:
        mount_point = posixpath.normpath(mount_point)
        DBFS.load(self, mount_point, iosource_name)

        # open the skfs
        iosrc = self.iosource
        fs = sk.skfs(iosrc)

        dbh_file=DB.DBO(self.case)
        dbh_inode=DB.DBO(self.case)
        dbh_block=DB.DBO(self.case)
        if scanners:
            scanner_string = ",".join(scanners)
            pdbh = DB.DBO()
            pdbh.mass_insert_start('jobs')
            cookie = int(time.time())
        
        dbh_file.cursor.ignore_warnings = True
        dbh_inode.cursor.ignore_warnings = True
        dbh_block.cursor.ignore_warnings = True

        dbh_file.mass_insert_start("file")
        #dbh_inode.mass_insert_start("inode")
        dbh_block.mass_insert_start("block")

        def insert_file(inode_id, inode, type, path, name):
            path = path.decode("utf8","ignore")
            name = name.decode("utf8","ignore")
            
            inodestr = "I%s|K%s" % (iosource_name, inode)
            pathstr = "%s%s/" % (mount_point, path)

            if pathstr.startswith("//"):
                pathstr = pathstr[1:]
            if pathstr.endswith("//"):
                pathstr = pathstr[:-1]

            if inode.alloc == 0:
                allocstr = "deleted"
                type = type[:-1]+'-'
            elif inode.alloc == 1:
                allocstr = "alloc"
            elif inode.alloc == 2:
                allocstr = "realloc"

            fields = {
                "inode":inodestr,
                "mode":type,
                "status":allocstr,
                "path":pathstr,
                "name":name
            }

            if(inode_id):
            	fields['inode_id'] = inode_id

            try:
                fields["link"] = fs.readlink(inode=inode)
            except IOError:
                pass

            # insert file entry
            dbh_file.mass_insert(**fields)

        def runs(blocks):
            # converts an ordered list e.g. [1,2,3,4,7,8,9] into a list of
            # 'runs'; tuples of (start, length) e.g. [(1,4),(7,3)]
            if len(blocks) == 0:
                return

            index = 0
            start = None
            length = 1
            for i in blocks:
                if start==None:
                    start = i
                elif i==start+length:
                    length+=1
                else:
                    yield index,start,length
                    index += 1
                    start = i
                    length = 1

            yield index,start,length

        def insert_inode(inode):
            """ Inserts inode into database and returns new inode_id and a
            stat object for the newly inserted inode """
            inode_id = None

            # dont do anything for realloc inodes or those with an invalid
            # inode number. inode_id 1 is the default (dummy) entry
            #if inode.alloc == 2 or str(inode) == "0-0-0":
            if str(inode) == "0-0-0":
                return 1

            inodestr = "I%s|K%s" % (iosource_name, inode)

            if inode.alloc:
                status = 'alloc'
            else:
                status = 'deleted'

            args = dict(inode = inodestr,
                        status = status,
                        _fast = True)

            try:
                print "%r" % inode
                if inode.__str__()=="22-0-0":
                    print "found it"
                    raise IOError("foo")
                

                ## If this fails we return the default deleted Inode
                ## because we dont know anything about this inode (we
                ## dont know its run list or attributes).
                f = fs.open(inode=str(inode))
                s = fs.fstat(f)

                args.update(dict(
                    uid = s.st_uid,
                    gid = s.st_gid,
                    mode = s.st_mode,
                    links = s.st_nlink,
                    link = "",
                    size = s.st_size,
                    ))

                if s.st_mtime:
                    args['_mtime'] = "from_unixtime(%d)" % s.st_mtime
                    
                if s.st_atime:
                    args['_atime'] = "from_unixtime(%d)" % s.st_atime
                    
                if s.st_ctime:
                    args['_ctime'] = "from_unixtime(%d)" % s.st_ctime

                #insert block runs
                index = 0
                for (index, start, count) in runs(f.blocks()):
                    dbh_block.mass_insert(
                        inode = inodestr,
                        index = index,
                        block = start,
                        count = count
                    )
                #f.close()

            except IOError,e:
                pyflaglog.log(pyflaglog.WARNING, "Error creating inode: %s", e)

            dbh_inode.insert( "inode", **args)
            inode_id = dbh_inode.autoincrement()
                       
            ## If needed schedule inode for scanning:
            if scanners:
                pdbh.mass_insert(
                    command = 'Scan',
                    arg1 = self.case,
                    arg2 = inodestr,
                    arg3= scanner_string,
                    cookie=cookie,
                    )
            return inode_id