Пример #1
0
    def execute(self):
        if len(self.args)<2:
            yield self.help()
            return

        ## Try to glob the inode list:
        dbh=DB.DBO(self.environment._CASE)
        dbh.execute("select inode_id from vfs where !isnull(inode_id) and path rlike %r",
                    (fnmatch.translate(self.args[0])))
        pdbh = DB.DBO()
        pdbh.mass_insert_start('jobs')
        ## This is a cookie used to identify our requests so that we
        ## can check they have been done later.
        cookie = time.time()
        scanners = []
        for i in range(1,len(self.args)):
            scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))

        scanners = ScannerUtils.fill_in_dependancies(scanners)
        for row in dbh:
            Scanner.scan_inode_distributed(dbh.case, row['inode_id'], scanners,
                                           cookie=cookie)

            
        self.wait_for_scan(cookie)
        yield "Scanning complete"
Пример #2
0
    def scan(self, fd, scanners, type, mime, cookie, scores=None, **args):
        if 'x86 boot sector' in type:
            try:
                parts = sk.mmls(fd)
            except IOError,e:
                print e
                return

            for part in parts:
                ## Make a unique and sensible name for this partition
                name = "%s @ 0x%X" % (part[2], part[0])

                ## Add new maps for each partition
                map = CacheManager.AFF4_MANAGER.create_cache_map(
                    fd.case,
                    "%s/%s" % (fd.urn.parser.query, name))

                map.write_from(fd.urn, SECTOR_SIZE * part[0],
                               SECTOR_SIZE * part[1])

                map.close()

                ## Now we recursively scan each object
                fsfd = FileSystem.DBFS(fd.case)
                new_fd = fsfd.open(inode_id = map.inode_id)
                try:
                    fs = sk.skfs(new_fd)
                    fs.close()

                    ## Lets add a hint
                    Magic.set_magic(fd.case,
                                    inode_id = map.inode_id,
                                    mime = "application/filesystem",
                                    magic = "Filesystem")

                except: pass

                Scanner.scan_inode_distributed(fd.case, map.inode_id,
                                               scanners, cookie)
Пример #3
0
    def parse(self, forward_fd, reverse_fd, scanners):
        while True:
            request = { 'url':'/unknown_request_%s' % forward_fd.inode_id,
                        'method': 'GET' }
            response = {}
            parse = False
            request_body = response_body = None

            ## First parse both request and response
            ## Get the current timestamp of the request
            packet = NetworkScanner.dissect_packet(forward_fd)
            if self.read_request(request, forward_fd):
                try:
                    request['timestamp'] = packet.ts_sec
                except AttributeError:
                    request['timestamp'] = 0

                parse = True
                request_body = self.skip_body(request, forward_fd)
                request_body.dirty = 0

            packet = NetworkScanner.dissect_packet(reverse_fd)
            if self.read_response(response, reverse_fd):
                try:
                    response['timestamp'] = packet.ts_sec
                except AttributeError:
                    response['timestamp'] = 0

                parse = True
                response_body = self.skip_body(response, reverse_fd)

            ## We hang all the parameters on the response object
            ## (i.e. file attachment, post parameters, cookies)
            if response_body and request_body:
                self.process_cookies(request, response_body)
                self.process_post_body(request, request_body, response_body)
                if request_body.size > 0:
                    request_body.close()

            if response_body and response_body.size > 0:
                ## Store information about the object in the http table:
                url = request.get('url','/')

                ## We try to store the url in a normalized form so we
                ## can find it regardless of the various permutations
                ## it can go though
                response_body.insert_to_table("http",
                                              dict(method = request.get('method'),
                                                   url = url,
                                                   status = response.get('HTTP_code'),
                                                   content_type = response.get('content-type'),
                                                   useragent = request.get('user-agent'),
                                                   host = request.get('host'),
                                                   tld = make_tld(request.get('host',''))
                                                   )
                                              )
                response_body.close()
                Scanner.scan_inode_distributed(forward_fd.case, response_body.inode_id,
                                               scanners, self.cookie)

            if not parse: break
Пример #4
0
    def Callback(mode, packet, connection):
        if mode == 'est':
            if 'map' not in connection:
                ## Lookup the urn this packet came from
                urn = urn_dispatcher[packet.pcap_file_id]
                ip = packet.find_type("IP")

                ## We can only get tcp or udp packets here
                try:
                    tcp = packet.find_type("TCP")
                except AttributeError:
                    tcp = packet.find_type("UDP")

                base_urn = "/%s-%s/%s-%s/" % (
                    ip.source_addr, ip.dest_addr,
                    tcp.source, tcp.dest)

                timestamp = pyaff4.XSDDatetime()
                timestamp.set(packet.ts_sec)
                map_stream = CacheManager.AFF4_MANAGER.create_cache_map(
                    case, base_urn + "forward", timestamp = timestamp,
                    target = urn)
                connection['map'] = map_stream

                ## These streams are used to point at the start of
                ## each packet header - this helps us get back to
                ## the packet information for each bit of data
                map_stream_pkt = CacheManager.AFF4_MANAGER.create_cache_map(
                    case, base_urn + "forward.pkt", timestamp = timestamp,
                    target = urn, inherited = map_stream.urn)
                connection['map.pkt'] = map_stream_pkt

                r_map_stream = CacheManager.AFF4_MANAGER.create_cache_map(
                    case, base_urn + "reverse", timestamp = timestamp,
                    target = urn, inherited = map_stream.urn)
                connection['reverse']['map'] = r_map_stream

                ## These streams are used to point at the start of
                ## each packet header - this helps us get back to
                ## the packet information for each bit of data
                r_map_stream_pkt = CacheManager.AFF4_MANAGER.create_cache_map(
                    case, base_urn + "reverse.pkt", timestamp = timestamp,
                    target = urn, inherited = r_map_stream.urn)
                connection['reverse']['map.pkt'] = r_map_stream_pkt


                ## Add to connection table
                map_stream.insert_to_table("connection_details",
                                           dict(reverse = r_map_stream.inode_id,
                                                src_ip = ip.src,
                                                src_port = tcp.source,
                                                dest_ip = ip.dest,
                                                dest_port = tcp.dest,
                                                _ts_sec = "from_unixtime(%s)" % packet.ts_sec,
                                                )
                                           )

        elif mode == 'data':
            try:
                tcp = packet.find_type("TCP")
            except AttributeError:
                tcp = packet.find_type("UDP")

            try:
                length = len(tcp.data)
            except: return
            
            urn = urn_dispatcher[packet.pcap_file_id]

            if packet.offset==0: pdb.set_trace()

            connection['map'].write_from(urn, packet.offset + tcp.data_offset, length)
            connection['map.pkt'].write_from(urn, packet.offset, length)

        elif mode == 'destroy':
            if connection['map'].size > 0 or connection['reverse']['map'].size > 0:

                map_stream = connection['map']

                r_map_stream = connection['reverse']['map']

                map_stream_pkt = connection['map.pkt']
                Magic.set_magic(case, map_stream_pkt.inode_id,
                                "Packet Map")

                r_map_stream_pkt = connection['reverse']['map.pkt']
                Magic.set_magic(case, r_map_stream_pkt.inode_id,
                                "Packet Map")

                r_map_stream.set_attribute(PYFLAG_REVERSE_STREAM, map_stream.urn)
                map_stream.set_attribute(PYFLAG_REVERSE_STREAM, r_map_stream.urn)

                ## Close all the streams
                r_map_stream_pkt.close()
                map_stream_pkt.close()
                r_map_stream.close()
                map_stream.close()

                ## FIXME - this needs to be done out of process using
                ## the distributed architecture!!!

                ## Open read only versions of these streams for
                ## scanning
                dbfs = FileSystem.DBFS(case)
                map_stream = dbfs.open(inode_id = map_stream.inode_id)
                r_map_stream = dbfs.open(inode_id = r_map_stream.inode_id)

                Scanner.scan_inode_distributed(case, map_stream.inode_id,
                                               scanners, cookie)
                Scanner.scan_inode_distributed(case, r_map_stream.inode_id,
                                               scanners, cookie)