Ejemplo n.º 1
0
    def guess_content_type(self, fd, query, inode_id):
        try:
            if query['hint']: content_type = query['hint']
        except KeyError:
            m = Magic.MagicResolver()
            type, content_type = m.find_inode_magic(self.case, inode_id)

        return content_type
Ejemplo n.º 2
0
    def GetContentType(self):
        """ This should produce the proper mime-type for this image class.

        The default implementation uses magic to determine the content type. This is sufficiently intelligent for most applications. The only reason you might want to override this is if the extra overhead of displaying the image twice is too prohibitive.
        """
        import pyflag.Magic as Magic

        magic = Magic.MagicResolver()
        return magic.estimate_type(self.display(), None, None)
Ejemplo n.º 3
0
    def execute(self):
        #Find the inode of the file:
        import pyflag.Magic as Magic

        m = Magic.MagicResolver()
        for path in self.args:
            type, mime = m.find_inode_magic(case=self.environment._CASE,
                                            urn=path)

            yield dict(type=type, mime=mime)
Ejemplo n.º 4
0
    def execute(self):
        #Find the inode of the file:
        import pyflag.Magic as Magic

        m = Magic.MagicResolver()
        for inode_id in self.args:
            type, mime, scores = m.find_inode_magic(
                case=self.environment._CASE, inode_id=inode_id)

            yield scores
            yield dict(type=type, mime=mime)
Ejemplo n.º 5
0
 def process(self, data, metadata=None):
     ScanIfType.process(self,data,metadata)
     if not self.boring_status and not self.filename:
         ## We need to find the name of the original uncompressed
         ## file so we can set a sensible VFS file name. This is
         ## the algorithm used:
         ## 1) We try to decompress the first data block from the file to see if the original name is in the header
         ## 2) Failing this we check if the inodes filename ends with .gz
         ## 3) Failing that, we call the new file "data"
         m = Magic.MagicResolver()
         magic, type_mime = m.find_inode_magic(self.case, inode_id=self.fd.inode_id,
                                               data=data[:1024])
Ejemplo n.º 6
0
    def scan(self, fd, scanners, type, mime, cookie, scores=None, **args):
        if 'x86 boot sector' in type:
            try:
                parts = sk.mmls(fd)
            except IOError,e:
                print e
                return

            for part in parts:
                ## Make a unique and sensible name for this partition
                name = "%s @ 0x%X" % (part[2], part[0])

                ## Add new maps for each partition
                map = CacheManager.AFF4_MANAGER.create_cache_map(
                    fd.case,
                    "%s/%s" % (fd.urn.parser.query, name))

                map.write_from(fd.urn, SECTOR_SIZE * part[0],
                               SECTOR_SIZE * part[1])

                map.close()

                ## Now we recursively scan each object
                fsfd = FileSystem.DBFS(fd.case)
                new_fd = fsfd.open(inode_id = map.inode_id)
                try:
                    fs = sk.skfs(new_fd)
                    fs.close()

                    ## Lets add a hint
                    Magic.set_magic(fd.case,
                                    inode_id = map.inode_id,
                                    mime = "application/filesystem",
                                    magic = "Filesystem")

                except: pass

                Scanner.scan_inode_distributed(fd.case, map.inode_id,
                                               scanners, cookie)
Ejemplo n.º 7
0
    def guess(self, fd, result, metadata):
        """ Uses fd to guess how suitable this filesystem driver is for this image """
        if not "magic" in metadata:
            fd.seek(0)
            data = fd.read(10240)
            if data:
                import pyflag.Magic as Magic
                magic = Magic.MagicResolver()
                result.ruler()
                sig, ct = magic.get_type(data)
                result.row("Magic identifies this file as: %s" % sig, **{
                    'colspan': 50,
                    'class': 'hilight'
                })
                fd.close()
                metadata['magic'] = sig
            else:
                metadata['magic'] = ''

        return 10
Ejemplo n.º 8
0
    def display(self, query, result):
        new_q = result.make_link(query, '')
        if not query.has_key('limit'): query['limit'] = 0
        dbh = self.DBO(query['case'])

        fsfd = FileSystem.DBFS(query["case"])
        ## If this is a directory, only show the stats
        fd = fsfd.open(inode_id=query['inode_id'])
        if not fd: return

        tmp = result.__class__(result)
        tmp.text(fd.urn)
        result.heading(tmp)

        try:
            m = Magic.MagicResolver()
            type, mime = m.find_inode_magic(query['case'], fd.inode_id)
            result.text("Classified as %s by magic" % type)
        except IOError, e:
            result.text("Unable to classify file, no blocks: %s" % e)
Ejemplo n.º 9
0
    def __init__(self, fd, size_x):
        """ fd is the image, size_x is the requested width of the image. The height will be calculated to preserve aspect ratio """
        self.size_x = size_x
        self.fd = fd
        self.width = 0
        self.height = 0

        ## Calculate the magic of this file:
        import pyflag.Magic as Magic

        magic = Magic.MagicResolver()
        self.magic, self.content_type = magic.find_inode_magic(
            fd.case, urn_id=fd.urn_id)

        ## Now use the magic to dispatch the correct handler:
        ## Use the content type to access the thumbnail
        try:
            method = getattr(self, self.dispatcher[self.content_type])
        except KeyError, e:
            self.Unknown()
            return
Ejemplo n.º 10
0
    def test01Magic(self):
        """ Test that common headers are correctly identified """
        m = Magic.MagicResolver()
        for cls in Registry.MAGIC_HANDLERS.classes:
            print "\nTesting %s" % cls
            for sample_score, sample in cls.samples:
                print "Best match %s" % m.get_type(sample,None,None)[0]

                max_score, scores = m.estimate_type(sample, None, None)
                print "scores: "
                for k,v in scores.items():
                    if v>0:
                        print "      %s, %s (%s)" %( k.__class__, k.type_str(), v)

                self.assertEqual(max_score[1].__class__, cls,
                                 "Sample matched %s better than %s" % (
                    max_score[1].__class__, cls))
                    
                self.assertEqual(sample_score, max_score[0],
                                 "Unexpected score %s, expected %s" % (
                    max_score[0], sample_score) )
                    
Ejemplo n.º 11
0
        def process(self, data, metadata=None):
            ScanIfType.process(self,data,metadata)
            if not self.boring_status and not self.filename:
                ## We need to find the name of the original uncompressed
                ## file so we can set a sensible VFS file name. This is
                ## the algorithm used:
                ## 1) We try to decompress the first data block from the file to see if the original name is in the header
                ## 2) Failing this we check if the inodes filename ends with .gz
                ## 3) Failing that, we call the new file "data"
                m = Magic.MagicResolver()
                magic, type_mime = m.find_inode_magic(self.case, inode_id=self.fd.inode_id,
                                                      data=data[:1024])
                match = re.search(magic,'was "([^"]+)"')
                if match:
                    self.filename = match.groups(1)
                    return

                path, inode, inode_id = self.ddfs.lookup(inode=self.inode)
                original_filename = os.path.basename(path)
                if original_filename.endswith(".gz"):
                    self.filename=original_filename[:-3]
                    return

                self.filename="Uncompressed"
Ejemplo n.º 12
0
 def add_type_info(self, inode_id):
     m = Magic.MagicResolver()
     m.find_inode_magic(case = self.fd.case, inode_id = inode_id)
Ejemplo n.º 13
0
    def Callback(mode, packet, connection):
        if mode == 'est':
            if 'map' not in connection:
                ## Lookup the urn this packet came from
                urn = urn_dispatcher[packet.pcap_file_id]
                ip = packet.find_type("IP")

                ## We can only get tcp or udp packets here
                try:
                    tcp = packet.find_type("TCP")
                except AttributeError:
                    tcp = packet.find_type("UDP")

                base_urn = "/%s-%s/%s-%s/" % (
                    ip.source_addr, ip.dest_addr,
                    tcp.source, tcp.dest)

                timestamp = pyaff4.XSDDatetime()
                timestamp.set(packet.ts_sec)
                map_stream = CacheManager.AFF4_MANAGER.create_cache_map(
                    case, base_urn + "forward", timestamp = timestamp,
                    target = urn)
                connection['map'] = map_stream

                ## These streams are used to point at the start of
                ## each packet header - this helps us get back to
                ## the packet information for each bit of data
                map_stream_pkt = CacheManager.AFF4_MANAGER.create_cache_map(
                    case, base_urn + "forward.pkt", timestamp = timestamp,
                    target = urn, inherited = map_stream.urn)
                connection['map.pkt'] = map_stream_pkt

                r_map_stream = CacheManager.AFF4_MANAGER.create_cache_map(
                    case, base_urn + "reverse", timestamp = timestamp,
                    target = urn, inherited = map_stream.urn)
                connection['reverse']['map'] = r_map_stream

                ## These streams are used to point at the start of
                ## each packet header - this helps us get back to
                ## the packet information for each bit of data
                r_map_stream_pkt = CacheManager.AFF4_MANAGER.create_cache_map(
                    case, base_urn + "reverse.pkt", timestamp = timestamp,
                    target = urn, inherited = r_map_stream.urn)
                connection['reverse']['map.pkt'] = r_map_stream_pkt


                ## Add to connection table
                map_stream.insert_to_table("connection_details",
                                           dict(reverse = r_map_stream.inode_id,
                                                src_ip = ip.src,
                                                src_port = tcp.source,
                                                dest_ip = ip.dest,
                                                dest_port = tcp.dest,
                                                _ts_sec = "from_unixtime(%s)" % packet.ts_sec,
                                                )
                                           )

        elif mode == 'data':
            try:
                tcp = packet.find_type("TCP")
            except AttributeError:
                tcp = packet.find_type("UDP")

            try:
                length = len(tcp.data)
            except: return
            
            urn = urn_dispatcher[packet.pcap_file_id]

            if packet.offset==0: pdb.set_trace()

            connection['map'].write_from(urn, packet.offset + tcp.data_offset, length)
            connection['map.pkt'].write_from(urn, packet.offset, length)

        elif mode == 'destroy':
            if connection['map'].size > 0 or connection['reverse']['map'].size > 0:

                map_stream = connection['map']

                r_map_stream = connection['reverse']['map']

                map_stream_pkt = connection['map.pkt']
                Magic.set_magic(case, map_stream_pkt.inode_id,
                                "Packet Map")

                r_map_stream_pkt = connection['reverse']['map.pkt']
                Magic.set_magic(case, r_map_stream_pkt.inode_id,
                                "Packet Map")

                r_map_stream.set_attribute(PYFLAG_REVERSE_STREAM, map_stream.urn)
                map_stream.set_attribute(PYFLAG_REVERSE_STREAM, r_map_stream.urn)

                ## Close all the streams
                r_map_stream_pkt.close()
                map_stream_pkt.close()
                r_map_stream.close()
                map_stream.close()

                ## FIXME - this needs to be done out of process using
                ## the distributed architecture!!!

                ## Open read only versions of these streams for
                ## scanning
                dbfs = FileSystem.DBFS(case)
                map_stream = dbfs.open(inode_id = map_stream.inode_id)
                r_map_stream = dbfs.open(inode_id = r_map_stream.inode_id)

                Scanner.scan_inode_distributed(case, map_stream.inode_id,
                                               scanners, cookie)
                Scanner.scan_inode_distributed(case, r_map_stream.inode_id,
                                               scanners, cookie)
Ejemplo n.º 14
0
 def process(self, data, metadata=None):
     if self.type_str==None:
         m = Magic.MagicResolver()
         self.type_str, self.type_mime = m.cache_type(self.case, self.fd.inode_id, data[:1024])
         metadata['mime'] = self.type_mime
         metadata['type'] = self.type_str
Ejemplo n.º 15
0
        "select inode_id, scanner_cache from vfs where inode_id=%r limit 1",
        fd.inode_id)
    row = dbh.fetch()
    try:
        scanners_run = row['scanner_cache'].split(',')
    except:
        scanners_run = []

    ## Force the scanners to run anyway
    if force: scanners_run = []

    fd.inode_id = row['inode_id']

    ## The new scanning framework is much simpler - we just call the
    ## scan() method on each factory.
    m = Magic.MagicResolver()
    type, mime, scores = m.find_inode_magic(case, fd.inode_id)

    for c in get_factories(scanners):
        if c.__class__.__name__ not in scanners_run:
            fd.seek(0)
            try:
                c.scan(fd,
                       scanners=scanners,
                       type=type,
                       mime=mime,
                       cookie=cookie,
                       scores=scores)
            except Exception, e:
                print e
                #continue