Esempio n. 1
0
    def __init__(self, case, fd, inode):
        File.__init__(self,case, fd, inode)
        dbh = DB.DBO(self.case)
        
        ## Ensure we have an index on this column
        dbh.check_index("connection","inode_id")
        dbh.check_index("connection_details","inode_id")
        
        ## We use the inode column in the connection_details table to
        ## cache this so we only have to combine the streams once.
        inode = inode.split("|")[-1]
        self.inode_ids = [ int(x) for x in inode[1:].split("/")]

        ## Fill in some vital stats
        dbh.execute("select inode.inode_id, reverse, src_ip, dest_ip, src_port, dest_port, ts_sec from `connection_details` join inode on inode.inode_id = connection_details.inode_id where inode.inode=%r limit 1", self.inode)
        row=dbh.fetch()
        if not row:
            dbh.execute("select inode_id,reverse, src_ip, dest_ip, src_port, dest_port, ts_sec from `connection_details` where inode_id = %r", self.inode_ids[0])
            row = dbh.fetch()

        ## This updates our properties from the db
        if not row: return
        self.__dict__.update(row)

        ## We allow the user to ask for a number of streams which will
        ## be combined at the same time. This allows us to create a
        ## VFS node for both forward and reverse streams, or even
        ## totally unrelated streams which happen at the same time.
        self.look_for_cached()
        self.read(0)
        
        ## This is a cache of packet lists that we keep so we do not
        ## have to hit the db all the time.
        self.packet_list = None
Esempio n. 2
0
    def __init__(self, case, fd, inode):
        File.__init__(self, case, fd, inode)

        ## Make sure our parent is cached:
        self.fd.cache()

        ## Parse out inode - if we got the compressed length provided,
        ## we use that, otherwise we calculate it from the zipfile
        ## header
        parts = inode.split('|')
        ourpart = parts[-1][1:]
        try:
            offset, size = ourpart.split(":")
            self.compressed_length = int(size)
            offset = int(offset)
        except:
            offset = int(ourpart)

        self.offset = offset
        ## Ensure that we can read the file header:
        b = Zip.Buffer(fd=fd)[offset:]
        self.header = Zip.ZipFileHeader(b)

        ## This is sometimes invalid and set to zero - should we query
        ## the db?
        self.size = int(self.header['uncompr_size'])
        
        if not self.compressed_length:
            self.compressed_length = int(self.header['compr_size'])
            
        self.type = int(self.header['compression_method'])

        ## Where does the data start?
        self.init()
Esempio n. 3
0
    def __init__(self, case, fd, inode):
        File.__init__(self, case, fd, inode)
        # strategy: must determine basepath from parent, get our path
        # from db and then return the file:

        ## Note this _must_ work because we can only ever be called on
        ## a mounted iosource - it is an error otherwise:
        basepath = fd.io.directory

        self.case = case
        dbh = DB.DBO(case)
        dbh.check_index("file", "inode")
        dbh.execute("select path,name from file where inode=%r limit 1", (inode))
        row = dbh.fetch()

        path = row["path"]
        mount_point = fd.io.mount_point
        ## Prune the path down to the mount point:
        if path[: len(mount_point)] != mount_point:
            raise RuntimeError(DB.expand("Something went wrong - %s should be mounted on %s", (path, mount_point)))

        path = path[len(mount_point) :]
        path = basepath + "/" + path + "/" + row["name"]
        if not path.startswith(posixpath.normpath(config.UPLOADDIR)):
            path = FlagFramework.sane_join(config.UPLOADDIR, path)

        if os.path.isdir(path):
            self.fd = StringIO.StringIO("")
        else:
            self.fd = open(path, "r")

        s = os.stat(path)
        self.size = s.st_size
Esempio n. 4
0
 def __init__(self, case, fd, inode):
     self.urn = inode
     fd = aff4.oracle.open(inode, 'r')
     try:
         if not fd: raise IOError("Unable to open %s" % inode)
     finally:
         aff4.oracle.cache_return(fd)
         
     File.__init__(self, case, fd, inode)
Esempio n. 5
0
    def __init__(self, case, fd, inode):
        File.__init__(self,case,fd,inode)
        
        cache_key = "%s:%s" % (self.case, self.fd.inode)
        try:
            fs = SKCACHE.get(cache_key)
        except KeyError:
            fs = sk.skfs(self.fd)
            SKCACHE.put(fs, key=cache_key)

        inode = self.inode[self.inode.find('|K')+2:]
        self.skfd = fs.open(inode=inode)
        self.skfd.seek(0,2)
        self.size = self.skfd.tell()
        self.skfd.seek(0)
        self.block_size = fs.block_size
Esempio n. 6
0
    def __init__(self, case, fd, inode):
        """ This is a top level File driver for opening pcap files.

        Note that pcap files are stored in their own filesystem. We expect the following initialisation:
        @arg fd: is an io source for the pcap file
        @arg inode: The inode of the pcap file in the pcap filesystem, currently ignored.
        """
        File.__init__(self, case, fd, inode)
        ## Calculates the size of this file:
        dbh = DB.DBO(self.case)    
        self.private_dbh = dbh.clone()
        dbh.execute("select max(id) as max from pcap")
        row=dbh.fetch()
        if row['max']:
            self.size = row['max']
        else:
            self.size = 0
        
        self.private_dbh.execute("select id,offset,link_type,ts_sec,length from pcap where id>%r" % int(self.size))
        self.iosource = fd
Esempio n. 7
0
    def __init__(self, case, fd, inode):
        File.__init__(self, case, fd, inode)

        ## Tar file handling requires repeated access into the tar
        ## file. Caching our input fd really helps to speed things
        ## up...
        fd.cache()
        
        # strategy:
        # inode is the index into the namelist of the tar file (i hope this is consistant!!)
        # just read that file!
        parts = inode.split('|')

        try:
            t = ZIPCACHE.get(self.fd.inode)
        except (AttributeError, KeyError):
            try:
                t = tarfile.TarFile(name='/', fileobj=fd)
                ZIPCACHE.put(t, key=self.fd.inode)
            except tarfile.CompressionError,e:
                raise IOError("Tar file: %s" % e)
Esempio n. 8
0
 def __init__(self, case, fd, inode):
     File.__init__(self, case, fd, inode)
     self.cache()
Esempio n. 9
0
 def __init__(self, case, fd, inode):
     File.__init__(self, case, fd, inode)
     self.gz = None