Пример #1
0
    def longls(self,path='/', dirs = None):
        dbh=DB.DBO(self.case)
        if self.isdir(path):
            ## If we are listing a directory, we list the files inside the directory            
            if not path.endswith('/'):
                path=path+'/'

            where = DB.expand(" path=%r " ,path)
        else:
            ## We are listing the exact file specified:
            where = DB.expand(" path=%r and name=%r", (
                FlagFramework.normpath(posixpath.dirname(path)+'/'),
                posixpath.basename(path)))
                   
        mode =''
        if(dirs == 1):
            mode=" and file.mode like 'd%'"
        elif(dirs == 0):
            mode=" and file.mode like 'r%'"

        dbh.execute("select * from file where %s %s", (where, mode))
        result = [dent for dent in dbh]

        for dent in result:
            if dent['inode']:
                dbh.execute("select * from inode where inode = %r", dent['inode'])
                data = dbh.fetch()
                if data:
                    dent.update(data)

        return result
Пример #2
0
    def display(self, query, result):
        path = query['path']
        key = query['key']
        result.heading("Registry Key Contents")
        result.text(DB.expand("Key %s/%s:", (path, key)),
                    style='red',
                    font='typewriter')
        dbh = DB.DBO(query['case'])

        def hexdump(query, out):
            """ Show the hexdump for the key """
            dbh.execute(
                "select value from reg where path=%r and reg_key=%r limit 1",
                (path, key))
            row = dbh.fetch()
            if row:
                HexDump(row['value'], out).dump()
            return out

        def strings(query, out):
            """ Draw the strings in the key """
            out.para("not implimented yet")
            return out

        def stats(query, out):
            """ display stats on a key """
            out.para("not implemented yet")
            return out

        result.notebook(names=["HexDump", "Strings", "Statistics"],
                        callbacks=[hexdump, strings, stats],
                        context="display_mode")
Пример #3
0
            def pane_cb(path,tmp):
                query['order']='Filename'

                ## If we are asked to show a file, we will show the
                ## contents of the directory the file is in:
                fsfd = FileSystem.DBFS( query["case"])
                if not fsfd.isdir(path):
                    path=os.path.dirname(path)

                tmp.table(
                    elements = [ InodeIDType(case=query['case']),
                                 FilenameType(basename=True, case=query['case']),
                                 DeletedType(),
                                 IntegerType('File Size','size'),
                                 TimestampType('Last Modified','mtime'),
                                 StringType('Mode','mode', table='file') ],
                    table='inode',
                    where=DB.expand("file.path=%r and file.mode!='d/d'", (path+'/')),
                    case=query['case'],
                    pagesize=10,
                    filter="filter2",
                    )

                target = tmp.defaults.get('open_tree','/')
                tmp.toolbar(text=DB.expand("Scan %s",target),
                            icon="examine.png",
                            link=query_type(family="Load Data", report="ScanFS",
                                            path=target,
                                            case=query['case']), pane='popup'
                            )
Пример #4
0
            def pane_cb(path, result):
                query['order'] = 'Filename'
                if path == '': path = '/'

                ## If we are asked to show a file, we will show the
                ## contents of the directory the file is in:
                fsfd = FileSystem.DBFS(query["case"])
                if not fsfd.isdir(path):
                    path = os.path.dirname(path)

                self.make_table_widget(
                    ['URN', 'Name', 'Size', 'Modified'],
                    query,
                    result,
                    where=DB.expand(
                        "path=%r and (isnull(type) or type!='directory')",
                        (path)),
                )

                result.toolbar(text=DB.expand("Scan %s", path),
                               icon="examine.png",
                               link=query_type(family="Load Data",
                                               report="ScanFS",
                                               path=path,
                                               case=query['case']),
                               pane='popup')
Пример #5
0
    def test01RunScanners(self):
        """ Running Logical Index Scanner """
        ## Make sure the word secret is in there.
        pdbh = DB.DBO()
        pdbh.execute("select * from dictionary where word='secret' limit 1")
        row = pdbh.fetch()
        if not row:
            pdbh.insert('dictionary', **{'word':'secret', 'class':'English', 'type':'word'})
        
        env = pyflagsh.environment(case=self.test_case)
        pyflagsh.shell_execv(env=env, command="scan",
                             argv=["*",'IndexScan'])

        dbh = DB.DBO(self.test_case)
        dbh2 = DB.DBO(self.test_case)
        fsfd = DBFS(self.test_case)
        dbh.execute("select inode_id, word,offset,length from LogicalIndexOffsets join %s.dictionary on LogicalIndexOffsets.word_id=%s.dictionary.id where word='secret'", (config.FLAGDB,config.FLAGDB))
        count = 0
        for row in dbh:
            count += 1
            path, inode, inode_id = fsfd.lookup(inode_id = row['inode_id'])
            fd = fsfd.open(inode=inode)
            fd.overread = True
            fd.slack = True
            fd.seek(row['offset'])
            data = fd.read(row['length'])
            print "Looking for %s: Found in %s at offset %s length %s %r" % (
                row['word'], inode, row['offset'], row['length'],data)
            self.assertEqual(data.lower(), row['word'].lower())

        ## Did we find all the secrets?
        self.assertEqual(count,2)
Пример #6
0
    def startup(self, dbh, case):
        print "Checking schema for compliance"
        ## Make sure that the schema conforms
        dbh.execute("select value from meta where property='flag_db'")
        DB.check_column_in_table(None, 'sql_cache', 'status',
                                 'enum("progress","dirty","cached")')
        for row in dbh:
            try:
                DB.check_column_in_table(row['value'], 'sql_cache', 'status',
                                         'enum("progress","dirty","cached")')
            except:
                continue

        ## Check the schema:
        dbh.check_index("jobs", "state")
        DB.check_column_in_table(None, 'jobs', 'priority', 'int default 10')
        DB.check_column_in_table(None, 'jobs', 'pid', 'int default 0')
        DB.check_column_in_table(
            None, 'jobs', 'when_valid',
            'TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL')

        ## Check for the high_priority_jobs table (its basically
        ## another jobs table for high priority jobs - so workers
        ## first check this table before the main jobs table).
        try:
            dbh.execute("select * from high_priority_jobs limit 1")
        except:
            dbh.execute(
                "create table if not exists high_priority_jobs like jobs")

        ## Schedule the first periodic task:
        task = Periodic()
        task.run()
Пример #7
0
    def analyse(self, query):
        context = self.get_context(query)

        word_id = Indexing.insert_dictionary_word(query['word'], query['type'])
        pdbh = DB.DBO()
        sql = DB.expand("select inode.inode_id as `inode_id` "\
                        "%s where (%s) and (%s)", (context.get('tables',''),
                                                   context.get('inode_sql','1'),
                                                   context.get('where','1')))

        Indexing.schedule_inode_index_sql(query['case'],
                                          sql,
                                          word_id,
                                          query['cookie'],
                                          unique=True)

        ## Now wait here until everyone is finished:
        while 1:
            pdbh.execute("select count(*) as c from jobs where cookie=%r",
                         query['cookie'])
            row = pdbh.fetch()
            self.rows_left = row['c']
            if row['c'] == 0: break

            time.sleep(1)

        return 1
Пример #8
0
    def startup(self):
        print "Checking schema for compliance"
        ## Make sure that the schema conforms
        dbh = DB.DBO()
        dbh.execute("select value from meta where property='flag_db'")
        DB.check_column_in_table(None, 'sql_cache', 'status',
                                 'enum("progress","dirty","cached")')
        for row in dbh:
            try:
                DB.check_column_in_table(row['value'], 'sql_cache', 'status',
                                         'enum("progress","dirty","cached")')
            except: continue

        ## Check the schema:
        dbh.check_index("jobs", "state")
        DB.check_column_in_table(None, 'jobs', 'priority', 'int default 10')
        DB.check_column_in_table(None, 'jobs', 'pid', 'int default 0')
        DB.check_column_in_table(None, 'jobs', 'when_valid',
                                 'TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL')

        ## Check for the high_priority_jobs table (its basically
        ## another jobs table for high priority jobs - so workers
        ## first check this table before the main jobs table).
        try:
            dbh.execute("select * from high_priority_jobs limit 1")
        except:
            dbh.execute("create table if not exists high_priority_jobs like jobs")
        
        ## Schedule the first periodic task:
        task = Periodic()
        task.run()
Пример #9
0
    def longls(self, path='/', dirs=None):
        dbh = DB.DBO(self.case)
        if self.isdir(path):
            ## If we are listing a directory, we list the files inside the directory
            if not path.endswith('/'):
                path = path + '/'

            where = DB.expand(" path=%r ", path)
        else:
            ## We are listing the exact file specified:
            where = DB.expand(
                " path=%r and name=%r",
                (FlagFramework.normpath(posixpath.dirname(path) + '/'),
                 posixpath.basename(path)))

        mode = ''
        if (dirs == 1):
            mode = " and file.mode like 'd%'"
        elif (dirs == 0):
            mode = " and file.mode like 'r%'"

        dbh.execute("select * from file where %s %s", (where, mode))
        result = [dent for dent in dbh]

        for dent in result:
            if dent['inode']:
                dbh.execute("select * from inode where inode = %r",
                            dent['inode'])
                data = dbh.fetch()
                if data:
                    dent.update(data)

        return result
Пример #10
0
            def pane_cb(path, tmp):
                query['order'] = 'Filename'

                ## If we are asked to show a file, we will show the
                ## contents of the directory the file is in:
                fsfd = FileSystem.DBFS(query["case"])
                if not fsfd.isdir(path):
                    path = os.path.dirname(path)

                tmp.table(
                    elements=[
                        InodeIDType(case=query['case']),
                        FilenameType(basename=True, case=query['case']),
                        DeletedType(),
                        IntegerType('File Size', 'size'),
                        TimestampType('Last Modified', 'mtime'),
                        StringType('Mode', 'mode', table='file')
                    ],
                    table='inode',
                    where=DB.expand("file.path=%r and file.mode!='d/d'",
                                    (path + '/')),
                    case=query['case'],
                    pagesize=10,
                    filter="filter2",
                )

                target = tmp.defaults.get('open_tree', '/')
                tmp.toolbar(text=DB.expand("Scan %s", target),
                            icon="examine.png",
                            link=query_type(family="Load Data",
                                            report="ScanFS",
                                            path=target,
                                            case=query['case']),
                            pane='popup')
Пример #11
0
    def execute(self):
        if len(self.args) < 2:
            yield self.help()
            return

        ## Try to glob the inode list:
        dbh = DB.DBO(self.environment._CASE)
        dbh.execute(
            "select inode_id from vfs where !isnull(inode_id) and path rlike %r",
            (fnmatch.translate(self.args[0])))
        pdbh = DB.DBO()
        pdbh.mass_insert_start('jobs')
        ## This is a cookie used to identify our requests so that we
        ## can check they have been done later.
        cookie = time.time()
        scanners = []
        for i in range(1, len(self.args)):
            scanners.extend(
                fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))

        scanners = ScannerUtils.fill_in_dependancies(scanners)
        for row in dbh:
            Scanner.scan_inode_distributed(dbh.case,
                                           row['inode_id'],
                                           scanners,
                                           cookie=cookie)

        self.wait_for_scan(cookie)
        yield "Scanning complete"
Пример #12
0
def drop_preset(preset):
    """ Drops the specified preset name """
    pyflaglog.log(pyflaglog.DEBUG, "Droppping preset %s" % preset)
    for case, table in find_tables(preset):
        drop_table(case, table)

    dbh = DB.DBO()
    if preset:
        dbh.delete("log_presets", where=DB.expand("name = %r", preset))
Пример #13
0
    def operator_hit(self, column, operator, arg):
        """ Search for a hit in the dictionary """
        ## Try to work out if we need to reindex:
        reindex = False
        dbh = DB.DBO()
        dbh.execute("select id from dictionary where word = %r limit 1", arg)
        row = dbh.fetch()

        if self.ui:
            ## If the word is not in the dictionary, we definitely want to reindex
            if not row:
                count, total, tables, sql = self.outstanding_inodes()
                message = "Word %s is not in the dictionary" % arg

            ## If the word is in the dictionary we want to know how may
            ## inodes are outdated
            else:
                count, total, tables, sql = self.outstanding_inodes(
                    word_id=row['id'])
                message = "There are some inodes which are not up to date"

            ## Any inodes to process?
            if count > 0:
                reindex = True

        ## We do not need to reindex - just do it
        if not reindex:
            return DB.expand(
                "(%s = %s)",
                (self.escape_column_name(self.column), row.get('id', 0)))

        ## Allow the user to reindex the currently selected set of
        ## inodes with a new dictionary based on the new word
        self.ui.heading(message)
        self.ui.para(
            "This will affect %s inodes and require rescanning %s bytes" %
            (count, total))

        ## Make up the link for the use:
        context = FlagFramework.STORE.put(
            dict(tables=tables,
                 inode_sql=sql,
                 previous_query=self.ui.defaults,
                 target='parent_pane',
                 where=self.table_where_clause))

        link = query_type(report="Add Word",
                          family="Keyword Indexing",
                          case=self.case,
                          context=context,
                          word=arg)

        self.ui.link("Click here to scan these inodes", link, pane='self')

        ## Ok - Show the error to the user:
        raise self.ui
Пример #14
0
    def execute(self):
        try:
            dbh = DB.DBO()
        except:
            dbh = DB.DBO('mysql')
            dbh.execute("create database `%s`" % config.FLAGDB)
            dbh = DB.DBO()

        FlagFramework.post_event("init_default_db", None)
        yield "Done"
Пример #15
0
 def startup(self):
     ## These check that the schema is up to date
     DB.convert_to_unicode(None, 'dictionary')
     dbh=DB.DBO()
     dbh.execute("desc dictionary")
     for row in dbh:
         if row['Field'] == 'word':
             if not 'varbinary' in row['Type']:
                 dbh.execute("alter table dictionary modify word VARBINARY(250)")
                 break
Пример #16
0
    def execute(self):
        for iosource in self.args:
            dbh = DB.DBO(self.environment._CASE)
            dbh2 = dbh.clone()
            dbh.delete('inode', where=DB.expand("inode like 'I%s|%%'", iosource))
            dbh.execute("select * from filesystems where iosource = %r", iosource)
            for row in dbh:
                dbh2.delete('file', where=DB.expand("path like '%s%%'", iosource))

            dbh.delete("iosources", where=DB.expand("name=%r", iosource))
            yield "Removed IOSource %s" % iosource
Пример #17
0
    def __str__(self):
        postfix = ''
        ## Some tags are never allowed to be outputted
        if self.name not in self.allowable_tags:
            if self.name in self.forbidden_tag:
                return ''
            #print "Rejected tag %s" % self.name
            return self.innerHTML()

        if self.name == 'head':
            self.children = [self.header,] + self.children
        elif self.name =='body':
            self.children = [self.body_extra, ] + self.children

        ## Frames without src are filtered because IE Whinges:
        if self.name == 'iframe' and 'src' not in self.attributes:
		return ''

        attributes = "".join([" %s='%s'" % (k,v) for k,v \
                              in self.attributes.items() if k in \
                              self.allowable_attributes])

	if 'style' in self.attributes:
            attributes += ' style=%r' % self.css_filter(self.attributes['style'] or '')

        if 'http-equiv' in self.attributes:
            if self.attributes['http-equiv'].lower() == "content-type":
                ## PF _always_ outputs in utf8
                attributes += ' http-equiv = "Content-Type" content="text/html; charset=UTF-8"'
                
        if 'src' in self.attributes:
            attributes += ' src=%s' % self.resolve_reference(self.attributes['src'])

        try:
            if 'href' in self.attributes:
                if self.name == 'link':
                    attributes += " href=%s" % self.resolve_reference(self.attributes['href'], 'text/css')
                else:
                    attributes += DB.expand(' href="javascript: alert(%r)"',
                                            iri_to_uri(DB.expand("%s",self.attributes['href'])[:100]))
                    postfix = self.mark_link(self.attributes['href'])
        except: pass
        
        ## CSS needs to be filtered extra well
        if self.name == 'style':
            return expand("<style %s>%s</style>" , (attributes,
                                             self.css_filter(self.innerHTML())))
        
        if self.type == 'selfclose':
            return expand("<%s%s/>%s" , (self.name, attributes, postfix))
        else:
            return expand("<%s%s>%s</%s>%s", (self.name, attributes,
                                            self.innerHTML(),
                                            self.name,postfix))
Пример #18
0
class Index(Farm.Task):
    """ A task to index an inode with the dictionary """
    def run(self, case, inode_id, *args):
        global INDEX
        if not INDEX: reindex()

        try:
            desired_version = int(args[0])
        except:
            desired_version = INDEX_VERSION

        ## Did they want a detailed index or a unique index?
        unique = desired_version < 2**30

        ## In unique mode we want to generate one hit per scan job per
        ## word
        if unique:
            INDEX.clear_set()

        pyflaglog.log(
            pyflaglog.VERBOSE_DEBUG,
            "Indexing inode_id %s (version %s)" % (inode_id, desired_version))
        fsfd = FileSystem.DBFS(case)
        fd = fsfd.open(inode_id=inode_id)
        buff_offset = 0
        dbh = DB.DBO(case)

        ## Clear old hits:
        dbh.check_index("LogicalIndexOffsets", "inode_id")
        dbh.delete("LogicalIndexOffsets",
                   where=DB.expand("inode_id = %r", inode_id))

        ## Get ready for scan
        dbh.mass_insert_start("LogicalIndexOffsets")

        while 1:
            data = fd.read(1024 * 1024)
            if len(data) == 0: break

            for offset, matches in INDEX.index_buffer(data, unique=unique):
                for id, length in matches:
                    dbh.mass_insert(inode_id=inode_id,
                                    word_id=id,
                                    offset=offset + buff_offset,
                                    length=length)

            buff_offset += len(data)

        dbh.mass_insert_commit()

        ## Update the version
        dbh.update("inode",
                   where=DB.expand('inode_id = %r', inode_id),
                   version=desired_version)
Пример #19
0
 def startup(self):
     ## Check to see if the nsrl db exists
     try:
         dbh = DB.DBO(config.HASHDB)
         dbh.execute("select * from meta limit 1")
         dbh.fetch()
     except Exception, e:
         try:
             dbh = DB.DBO()
             self.init_default_db(dbh, None)
         except:
             pass
Пример #20
0
 def form(self, query, result):
     result.textfield("Inode ID", 'inode_id')
     dbh = DB.DBO(query['case'])
     try:
         result.selector(
             "Table Name",
             'table_name',
             DB.expand(
                 'select name as `key`,name as value from sqlite where inode_id=%r',
                 query['inode_id']),
             case=query['case'])
     except KeyError, e:
         pass
Пример #21
0
    def run(self, case, inode_id, *args):
        global INDEX
        if not INDEX: reindex()

        try:
            desired_version = int(args[0])
        except:
            desired_version = INDEX_VERSION

        ## Did they want a detailed index or a unique index?
        unique = desired_version < 2**30
        
        ## In unique mode we want to generate one hit per scan job per
        ## word
        if unique:
            INDEX.clear_set()

        pyflaglog.log(pyflaglog.VERBOSE_DEBUG, "Indexing inode_id %s (version %s)" % (inode_id, desired_version))
        fsfd = FileSystem.DBFS(case)
        fd = fsfd.open(inode_id=inode_id)
        buff_offset = 0
        dbh = DB.DBO(case)

        ## Clear old hits:
        dbh.check_index("LogicalIndexOffsets", "inode_id")
        dbh.delete("LogicalIndexOffsets", where=DB.expand("inode_id = %r",
                                                          inode_id))

        ## Get ready for scan
        dbh.mass_insert_start("LogicalIndexOffsets")

        while 1:
            data = fd.read(1024*1024)
            if len(data)==0: break

            for offset, matches in INDEX.index_buffer(data, unique = unique):
                for id, length in matches:
                    dbh.mass_insert(
                        inode_id = inode_id,
                        word_id = id,
                        offset = offset + buff_offset,
                        length = length)

            buff_offset += len(data)

        dbh.mass_insert_commit()
        
        ## Update the version
        dbh.update("inode",
                   where = DB.expand('inode_id = %r', inode_id),
                   version = desired_version)
Пример #22
0
    def execute(self):
        for iosource in self.args:
            dbh = DB.DBO(self.environment._CASE)
            dbh2 = dbh.clone()
            dbh.delete('inode',
                       where=DB.expand("inode like 'I%s|%%'", iosource))
            dbh.execute("select * from filesystems where iosource = %r",
                        iosource)
            for row in dbh:
                dbh2.delete('file',
                            where=DB.expand("path like '%s%%'", iosource))

            dbh.delete("iosources", where=DB.expand("name=%r", iosource))
            yield "Removed IOSource %s" % iosource
Пример #23
0
    def execute(self):
        scanners = []

        if len(self.args) < 2:
            yield self.help()
            return
        elif type(self.args[1]) == types.ListType:
            scanners = self.args[1]
        else:
            for i in range(1, len(self.args)):
                scanners.extend(
                    fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))

        ## Assume that people always want recursive - I think this makes sense
        path = self.args[0]
        if not path.endswith("*"):
            path = path + "*"

        ## FIXME For massive images this should be broken up, as in the old GUI method
        dbh = DB.DBO(self.environment._CASE)
        dbh.execute(
            "select inode.inode from inode join file on file.inode = inode.inode where file.path rlike %r",
            fnmatch.translate(path))

        pdbh = DB.DBO()
        pdbh.mass_insert_start('jobs')

        ## This is a cookie used to identify our requests so that we
        ## can check they have been done later.
        cookie = int(time.time())

        for row in dbh:
            inode = row['inode']

            pdbh.mass_insert(
                command='Scan',
                arg1=self.environment._CASE,
                arg2=row['inode'],
                arg3=','.join(scanners),
                cookie=cookie,
            )  #

        pdbh.mass_insert_commit()

        ## Wait for the scanners to finish:
        self.wait_for_scan(cookie)

        yield "Scanning complete"
Пример #24
0
        def pane_cb(path, result):
            if not path.endswith('/'): path=path+'/'
                
            result.heading("Path is %s" % path)
            case = query['case']
            dbh = DB.DBO(case)
            fsfd = Registry.FILESYSTEMS.dispatch(query['fstype'])(case)
            ## Try to see if the directory is already loaded:
            dbh.execute("select * from file where path=%r and name=''", path)
            if not dbh.fetch():
                fsfd.load(mount_point = query['mount_point'], iosource_name= query['iosource'],
                          directory = path)

            ## Now display the table
            result.table(
                elements = [ InodeIDType(case=query['case']),
                             FilenameType(case=query['case']),
                             DeletedType(),
                             IntegerType(name='File Size',column='size'),
                             TimestampType(name = 'Last Modified',column = 'mtime'),
                             ],
                table='inode',
                where=DB.expand("file.path=%r and file.mode!='d/d'",(path)),
                case = query['case'],
                pagesize=10,
                )
Пример #25
0
def start_workers():
    if config.FLUSH:
        dbh = DB.DBO()
        pyflaglog.log(pyflaglog.WARNING,
                      "Deleting job queue and killing workers")
        #dbh.execute("select max(id) as max from jobs")
        #row = dbh.fetch()
        #broadcast_id = row['max'] or 0
        dbh.execute("delete from jobs")
        #dbh.insert("jobs", _fast=True,
        #           command='Exit', state='broadcast',
        #           )

    if config.WORKERS == 0:
        return

    for i in range(config.WORKERS):
        try:
            r, w = os.pipe()
            pid = os.fork()
        except AttributeError:
            ## When running under windows we can not fork...  We must
            ## launch this module by itself instead - this is very
            ## suboptimal because we will be performing all startup
            ## code (registry parsing etc) for each worker. If you want
            ## performance you would not choose windows anyway,
            ## though. The following is windows specific:
            ## First find the name of the interpreter:
            import ctypes, sys

            name = ctypes.create_string_buffer(255)
            length = ctypes.windll.kernel32.GetModuleFileNameA(None, name, 255)
            interpreter = name.raw[:length]

            ## This encloses at least the file path in quotes just in
            ## case we are installed to somewhere with spaces - It
            ## seems that on windows argvs are not processed correctly
            ## because the below array ends up as a single command line
            ## string WTF? This is very dodgy...
            os.spawnv(
                os.P_NOWAIT, interpreter,
                ['"%s"' % interpreter, '"%s"' % __file__] + sys.argv[1:])
            pid = 1

        ## Parents:
        if pid:
            os.close(r)
            children.append(pid)
        else:
            os.close(w)
            nanny(worker_run, keepalive=r)

    atexit.register(terminate_children)

    ## The parent now calls the startup method on each of the events:
    for event in Registry.EVENT_HANDLERS.classes:
        try:
            event().startup()
        except Exception, e:
            pyflaglog.log(pyflaglog.WARNING, "Error: %s" % e)
Пример #26
0
 def test01Mounted(self):
     """ Test that mounted images work """
     ## We should be able to see test_file in the file table:
     dbh=DB.DBO(self.test_case)
     dbh.execute("select * from file where name = %r",self.test_file)
     row = dbh.fetch()
     self.assert_(row,"Unable to find the file in the VFS???")
Пример #27
0
 def explain(self, query, result):
     name = self.fd.name
     ## Trim the upload directory if present
     if name.startswith(config.UPLOADDIR):
         name = name[len(config.UPLOADDIR):]
         
     result.row("Filename",DB.expand("%s", name), **{'class':'explainrow'})
Пример #28
0
 def test01CreatePreset(self):
     """ Test that EventLog Presets can be created """
     dbh = DB.DBO(self.test_case)
     log = EventLogLog(case=self.test_case)
     query = query_type(datafile=self.test_file, log_preset=self.log_preset)
     log.parse(query)
     log.store(self.log_preset)
Пример #29
0
 def pane_cb(path, result):
     tlds = path.split("/")
     try:
         result.defaults.set('filter', DB.expand('TLD = %r and "Content Type"  contains html',tlds[1]))
         Reports.CaseTableReports.display(self, query, result)
     except IndexError:
         result.para("Click on a TLD to view all URLs from that TLD")
Пример #30
0
 def tree_cb(path):
     if path=='/':
         dbh = DB.DBO(query['case'])
         dbh.cached_execute("select tld from http where content_type like 'text/html%%' group by tld order by tld")
         for row in dbh:
             tld = row['tld']
             yield ((tld,tld,'leaf'))
Пример #31
0
    def execute(self):
        if len(self.args) < 2:
            yield self.help()
            return

        ## Try to glob the inode list:
        dbh = DB.DBO(self.environment._CASE)
        dbh.execute("select inode from inode where inode rlike %r", DB.glob2re(self.args[0]))
        pdbh = DB.DBO()
        pdbh.mass_insert_start("jobs")
        ## This is a cookie used to identify our requests so that we
        ## can check they have been done later.
        cookie = int(time.time())
        scanners = []
        for i in range(1, len(self.args)):
            scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))

        scanners = ScannerUtils.fill_in_dependancies(scanners)

        for row in dbh:
            inode = row["inode"]
            pdbh.mass_insert(
                command="Scan", arg1=self.environment._CASE, arg2=row["inode"], arg3=",".join(scanners), cookie=cookie
            )

        pdbh.mass_insert_commit()

        ## Wait for the scanners to finish:
        if self.environment.interactive:
            self.wait_for_scan(cookie)

        yield "Scanning complete"
Пример #32
0
    def run_analysis(self, report, query):
        """ Run the analysis """
        print query

        try:
            canonical_query = self.flag.canonicalise(query)
            thread_name = threading.currentThread().getName()
            print "Current thread is %s" % thread_name
            try:
                report.analyse(query)
                print "analysed report"
            except Exception, e:
                gtk.gdk.threads_enter()
                self.error_popup(e)
                gtk.gdk.threads_leave()
                return

            dbh = DB.DBO(query['case'])
            dbh.execute("insert into meta set property=%r,value=%r",
                        ('report_executed', canonical_query))
            ## This thread must never touch GTK stuff or dead lock
            ## will occur. We must signal the other threads that we
            ## have finished analysis.
            del self.running_threads[query.__str__()]
            return
Пример #33
0
    def __init__(self, case, fd, inode):
        File.__init__(self, case, fd, inode)
        # strategy: must determine basepath from parent, get our path
        # from db and then return the file:

        ## Note this _must_ work because we can only ever be called on
        ## a mounted iosource - it is an error otherwise:
        basepath = fd.io.directory

        self.case = case
        dbh = DB.DBO(case)
        dbh.check_index("file", "inode")
        dbh.execute("select path,name from file where inode=%r limit 1", (inode))
        row = dbh.fetch()

        path = row["path"]
        mount_point = fd.io.mount_point
        ## Prune the path down to the mount point:
        if path[: len(mount_point)] != mount_point:
            raise RuntimeError(DB.expand("Something went wrong - %s should be mounted on %s", (path, mount_point)))

        path = path[len(mount_point) :]
        path = basepath + "/" + path + "/" + row["name"]
        if not path.startswith(posixpath.normpath(config.UPLOADDIR)):
            path = FlagFramework.sane_join(config.UPLOADDIR, path)

        if os.path.isdir(path):
            self.fd = StringIO.StringIO("")
        else:
            self.fd = open(path, "r")

        s = os.stat(path)
        self.size = s.st_size
Пример #34
0
def drop_table(case, name):
    """ Drops the log table tablename """
    if not name: return
    
    dbh = DB.DBO(case)
    pyflaglog.log(pyflaglog.DEBUG, "Dropping log table %s in case %s" % (name, case))

    dbh.execute("select * from log_tables where table_name = %r limit 1" , name)
    row = dbh.fetch()

    ## Table not found
    if not row:
        return
    
    preset = row['preset']

    ## Get the driver for this table:
    log = load_preset(case, preset)
    log.drop(name)
    
    ## Ask the driver to remove its table:
    dbh.delete("log_tables",
               where= DB.expand("table_name = %r ", name));

    ## Make sure that the reports get all reset
    FlagFramework.reset_all(family='Load Data', report="Load Preset Log File",
                                       table = name, case=case)
Пример #35
0
    def execute(self):
        if len(self.args) < 2:
            yield self.help()
            return

        pdbh = DB.DBO()
        pdbh.mass_insert_start('jobs')
        cookie = int(time.time())
        scanners = []
        for i in range(1, len(self.args)):
            scanners.extend(
                fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))

        for path in self.glob_files(self.args[:1]):
            path, inode, inode_id = self.environment._FS.lookup(path=path)
            ## This is a cookie used to identify our requests so that we
            ## can check they have been done later.

            pdbh.mass_insert(
                command='Scan',
                arg1=self.environment._CASE,
                arg2=inode,
                arg3=','.join(scanners),
                cookie=cookie,
            )

        pdbh.mass_insert_commit()

        ## Wait for the scanners to finish:
        if 1 or self.environment.interactive:
            self.wait_for_scan(cookie)

        yield "Scanning complete"
Пример #36
0
 def casetable(self, field, query):
     """ Checks that field is a table within the case given as query[case]. This is not a fatal error, we just return false if not. """
     dbh = DB.DBO(query['case'])
     try:
         dbh.execute("select * from `%s_log` limit 1", query[field])
     except DB.DBError:
         return False
Пример #37
0
def glob_sql(pattern):
    path,name = posixpath.split(pattern)

    if globbing_re.search(path):
        path_sql = "path rlike '^%s/?$'" % translate(path)
    else:
        ## Ensure that path has a / at the end:
        if not path.endswith("/"): path=path+'/'
        
        path_sql = "path='%s'" % path

    if globbing_re.search(name):
        name_sql = "name rlike '^%s$'" % translate(name)
    else:
        name_sql = DB.expand("name=%r", name)
    
    if name and path:
        sql = "select concat(path,name) as path from file where %s and %s group by file.path,file.name" % (path_sql,name_sql)
    elif name:
        sql = "select concat(path,name) as path from file where %s group by file.path,file.name" % name_sql
    elif path:
        #sql = "%s and name=''" % path_sql
        sql = "select path from file where %s group by file.path" % path_sql
    else:
        ## Dont return anything for an empty glob
        sql = "select * from file where 1=0"

    return sql
Пример #38
0
def get_factories(case, scanners):
    """ Scanner factories are obtained from the Store or created as
    required. Scanners is a list in the form case:scanner
    """
    ## Ensure dependencies are satisfied
    scanners = ScannerUtils.fill_in_dependancies(scanners)

    ## First prepare the required factories:
    result = []
    for scanner in scanners:
        key = DB.expand("%s:%s", (case, scanner))
        try:
            f = factories.get(key)
        except KeyError:
            try:
                cls = Registry.SCANNERS.dispatch(scanner)
            except:
                # pyflaglog.log(pyflaglog.WARNING, "Unable to find scanner for %s", scanner)
                continue

            # Instatiate it:
            import pyflag.FileSystem as FileSystem

            f = cls(FileSystem.DBFS(case))

            ## Initialise it:
            f.prepare()

            ## Store it:
            factories.put(f, key=key)

        result.append(f)

    return result
Пример #39
0
    def execute(self):
        if len(self.args) < 2:
            yield self.help()
            return
        pdbh = DB.DBO()
        pdbh.mass_insert_start('jobs')
        cookie = int(time.time())
        scanners = []
        for i in range(1, len(self.args)):
            scanners.extend(
                fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))

        for path in self.glob_files(self.args[:1]):
            try:
                path, inode, inode_id = self.environment._FS.lookup(path=path)
            except Exception, e:
                continue
            ## This is a cookie used to identify our requests so that we
            ## can check they have been done later.

            pdbh.mass_insert(
                command='Scan',
                arg1=self.environment._CASE,
                arg2=inode_id,
                arg3=','.join(scanners),
                cookie=cookie,
            )
Пример #40
0
    def display(self,query,result):
        path=query['path']
        key=query['key']
        result.heading("Registry Key Contents")
        result.text(DB.expand("Key %s/%s:", (path,key)),style='red',font='typewriter')
        dbh=DB.DBO(query['case'])

        def hexdump(query,out):
            """ Show the hexdump for the key """
            dbh.execute("select value from reg where path=%r and reg_key=%r limit 1",(path,key))
            row=dbh.fetch()
            if row:
                HexDump(row['value'],out).dump()
            return out

        def strings(query,out):
            """ Draw the strings in the key """
            out.para("not implimented yet")
            return out

        def stats(query,out):
            """ display stats on a key """
            out.para("not implemented yet")
            return out

        result.notebook(
            names=["HexDump","Strings","Statistics"],
            callbacks=[hexdump,strings,stats],
            context="display_mode"
            )
Пример #41
0
 def form(self, query, result):
     result.textfield("Inode ID", 'inode_id')
     dbh = DB.DBO(query['case'])
     try:
         result.selector("Table Name", 'table_name', DB.expand('select name as `key`,name as value from sqlite where inode_id=%r', query['inode_id']), case=query['case'])
     except KeyError, e:
         pass
Пример #42
0
    def wait_for_scan(self, cookie):
        """ Waits for scanners to complete """
        import pyflag.Farm as Farm

        while Farm.get_cookie_reference(cookie) > 0:
            time.sleep(0.5)

        return

        print "Waiting for cookie %s" % cookie
        pdbh = DB.DBO()

        ## Often this process owns a worker as well. In that case we can wake it up:
        import pyflag.Farm as Farm

        #Farm.wake_workers()

        ## Wait until there are no more jobs left.
        while 1:
            pdbh.execute("select * from jobs where cookie=%r limit 1",
                         (cookie))
            row = pdbh.fetch()
            if not row: break

            time.sleep(1)
Пример #43
0
    def explain(self, query, result):
        name = self.fd.name
        ## Trim the upload directory if present
        if name.startswith(config.UPLOADDIR):
            name = name[len(config.UPLOADDIR) :]

        result.row("Filename", DB.expand("%s", name), **{"class": "explainrow"})
Пример #44
0
Файл: UI.py Проект: arkem/pyflag
    def _make_sql(self, query, ordering=True):
        """ Calculates the SQL for the table widget based on the query """
        ## Calculate the SQL
        query_str = "select "
        try:
            self.order = int(query.get('order',self.order))
        except: self.order=0

        try:
            self.direction = int(query.get('direction',self.direction))
        except: self.direction = 0

        total_elements = self.elements + self.filter_elements

        ## Fixup the elements - if no table specified use the global
        ## table - this is just a shortcut which allows us to be lazy:
        for e in total_elements:
            if not e.table: e.table = self.table
            if not e.case: e.case = self.case

        ## The columns and their aliases:
        query_str += ",".join([ e.select() + " as `" + e.name + "`" for e in self.elements ])
        
        query_str += _make_join_clause(total_elements)

        if self.where:
            w = ["(%s)" % self.where,]
        else:
            w = []
            
        for e in total_elements:
            tmp = e.where()
            if tmp: w.append(tmp)

        ## Is there a filter condition?
        if self.filter_str:
            filter_str = self.filter_str.replace('\r\n', ' ').replace('\n', ' ')
            filter_str = parser.parse_to_sql(filter_str, total_elements, ui=None)
            if not filter_str: filter_str=1
            
        else: filter_str = 1

        query_str += "where (%s and (%s)) " % (" and ".join(w), filter_str)

        if self.groupby:
            query_str += "group by %s " % DB.escape_column_name(self.groupby)
        elif self._groupby:
            query_str += "group by %s " % self.groupby
            
        ## Now calculate the order by:
        if ordering:
            try:
                query_str += "order by %s " % self.elements[self.order].order_by()
                if self.direction == 1:
                    query_str += "asc"
                else: query_str += "desc"
            except IndexError:
                pass

        return query_str
Пример #45
0
def render_html(self, inode_id, table_renderer):
    dbh = DB.DBO()
    case = table_renderer.case
    dbh.insert("jobs",
               command="Export",
               arg1=case,
               arg2=inode_id,
               cookie=int(time.time()))

    filename, content_type, fd = table_renderer.make_archive_filename(inode_id)
    result = "<a href='%s'>%s</a><br />" % (filename, fd.inode)

    try:
        filename = "inodes/%s_summary.html" % inode_id
        fd.html_export
        result += "<a href='%s'><img src=images/browse.png /></a>" % (
            filename, )
    except AttributeError:
        pass

    #if table_renderer.explain_inodes:
    ## Add a link to the explaination page:
    filename = "inodes/%s_explain.html" % inode_id
    result += "<a href='%s'><img src=images/question.png /></a>" % (filename, )

    ## Check if there are annotations for this
    dbh = DB.DBO(case)
    dbh.execute("select * from annotate where inode_id=%r", inode_id)
    for row in dbh:
        result += "<br>%s" % row['note']

    return result
Пример #46
0
 def finish(self):
     self.dbh.mass_insert_commit()
     ## Update the version
     self.dbh.update("inode",
                     where = DB.expand('inode_id = %r', self.inode_id),
                     version = INDEX_VERSION)
     
     del self.dbh
Пример #47
0
def drop_preset(preset):
    """ Drops the specified preset name """
    pyflaglog.log(pyflaglog.DEBUG, "Droppping preset %s" % preset)
    for case, table in find_tables(preset):
        drop_table(case, table)

    dbh = DB.DBO()
    if preset:
        dbh.delete("log_presets", where=DB.expand("name = %r",preset))
Пример #48
0
    def multiple_inode_reset(self, inode_glob):
        """ This method modifies the database to reset the scanners. It takes an argument which is a glob of the inodes to be reset. It does this for performance reasons. Each scanner is expected to clean up after itself. """

        ## Here we do the default (clear scanner_cache field) and hope that inherited classes either deal with it or call us
        sql = DB.glob2re(inode_glob)
        db = DB.DBO(self.case)
        db.execute(
            "update inode set scanner_cache = REPLACE(scanner_cache, %r, '') where inode rlike %r",
            (self.__class__.__name__, sql),
        )
Пример #49
0
def insert_whois_cache(sql_ip, id, ipinfo):
    dbh = DB.DBO()
    dbh.insert("whois_cache",
               _ip = sql_ip,
               id = id,

               _geoip_city = DB.expand("(select id from geoip_city where city=%r "
                                       "limit 1)", (ipinfo.get('city','Unknown'),)) or '',    
               _geoip_country = DB.expand("(select id from geoip_country where country"
                                          "=%r limit 1)", (ipinfo.get("country_code3","---"),)),

               _geoip_org = DB.expand("(select id from geoip_org where org"
                                      "=%r limit 1)", (ipinfo.get("org","Unknown"),)),

               _geoip_isp = DB.expand("(select id from geoip_isp where isp"
                                      "=%r limit 1)", (ipinfo.get("isp","Unknown"),)),

               _fast = True
               )
Пример #50
0
 def form(self, query, result):
     result.textfield("Inode ID", "inode_id")
     dbh = DB.DBO(query["case"])
     try:
         result.selector(
             "Table Name",
             "table_name",
             DB.expand("select name as `key`,name as value from sqlite where inode_id=%r", query["inode_id"]),
             case=query["case"],
         )
     except KeyError, e:
         pass
Пример #51
0
    def reset_entire_path(self, path_glob):
        """ This method modifies the database to reset the scanners. It takes an argument which is a path under which all inodes will be reset. It does this for performance reasons. Each scanner is expected to clean up after itself. """

        ## The scanners should do their thing on their tables and then call this (the base class) method to allow us to handle the simple stuff (clear the scanner cache field. If they don't call us, it is up to them to clean it up themselves.
        path = path_glob
        if not path.endswith("*"):
            path = path + "*"
        db = DB.DBO(self.case)
        db.execute(
            "update inode join file on file.inode = inode.inode set scanner_cache = REPLACE(scanner_cache, %r, '') where file.path rlike %r",
            (self.__class__.__name__, DB.glob2re(path)),
        )
Пример #52
0
    def operator_hit(self, column, operator, arg):
        """ Search for a hit in the dictionary """
        ## Try to work out if we need to reindex:
        reindex = False
        dbh = DB.DBO()
        dbh.execute("select id from dictionary where word = %r limit 1", arg)
        row = dbh.fetch()

        if self.ui:
            ## If the word is not in the dictionary, we definitely want to reindex
            if not row:
                count, total, tables, sql = self.outstanding_inodes()
                message = "Word %s is not in the dictionary" % arg
                
            ## If the word is in the dictionary we want to know how may
            ## inodes are outdated
            else:
                count, total, tables, sql = self.outstanding_inodes(word_id = row['id'])
                message = "There are some inodes which are not up to date"

            ## Any inodes to process?
            if count > 0:
                reindex = True

        ## We do not need to reindex - just do it
        if not reindex:
            return DB.expand("(%s = %s)",
                             (self.escape_column_name(self.column),
                              row.get('id',0)))
            
        ## Allow the user to reindex the currently selected set of
        ## inodes with a new dictionary based on the new word
        self.ui.heading(message)
        self.ui.para("This will affect %s inodes and require rescanning %s bytes" % (count,total))

        ## Make up the link for the use:
        context = FlagFramework.STORE.put(dict(tables = tables,
                                               inode_sql = sql,
                                               previous_query = self.ui.defaults,
                                               target = 'parent_pane',
                                               where = self.table_where_clause
                                               ))

        link = query_type(report = "Add Word", family = "Keyword Indexing",
                          case = self.case,
                          context = context,
                          word = arg)
        
        self.ui.link("Click here to scan these inodes", link,
                     pane = 'self')

        ## Ok - Show the error to the user:
        raise self.ui
Пример #53
0
            def pane_cb(path,result):
                query['order']='Filename'
                if path=='': path='/'
                
                ## If we are asked to show a file, we will show the
                ## contents of the directory the file is in:
                fsfd = FileSystem.DBFS( query["case"])
                if not fsfd.isdir(path):
                    path=os.path.dirname(path)

                self.make_table_widget(['URN','Name',
                                        'Size','Modified'],
                                       query, result,
                                       where=DB.expand("path=%r and (isnull(type) or type!='directory')", (path)),)
                
                result.toolbar(text=DB.expand("Scan %s",path),
                               icon="examine.png",
                               link=query_type(family="Load Data", report="ScanFS",
                                               path=path,
                                               case=query['case']), pane='popup'
                               )
Пример #54
0
    def longls(self,path='/', dirs = None):
        dbh=DB.DBO(self.case)
        if self.isdir(path):
            ## If we are listing a directory, we list the files inside the directory            
            where = DB.expand(" path=%r " ,path)
        else:
            ## We are listing the exact file specified:
            where = DB.expand(" path=%r and name=%r", (
                FlagFramework.normpath(posixpath.dirname(path)),
                posixpath.basename(path)))

        ## Only list directories
        if dirs:
            where += " and isnull(inode_id) "
        else:
            where += " and not isnull(inode_id) "
                   
        dbh.execute("select * from vfs where %s group by inode_id,path,name", (where))
        result = [dent for dent in dbh]

        return result
Пример #55
0
    def add_inode(self, fd, offset):
        """ This is called to allow the Carver to add VFS inodes.

        Returns the new inode_id.
        """
        ## Calculate the length of the new file
        length = self.get_length(fd,offset)
        new_inode = "%s|o%s:%s" % (self.fd.inode, offset, length)
        path, inode, inode_id = self.fsfd.lookup(inode_id = self.fd.inode_id)
        name = DB.expand("%s/%s", (path, self.make_filename(offset)))
        ## By default we just add a VFS Inode for it.
        new_inode_id = self.fsfd.VFSCreate(None,
                                           new_inode,
                                           name,
                                           size = length,
                                           )
        
        pyflaglog.log(pyflaglog.DEBUG, DB.expand("Added Carved inode %s (id %s) as %s",
                                                 (new_inode, new_inode_id,
                                                  name)))

        self.add_type_info(new_inode_id)
Пример #56
0
def delete_case(case):
    """ A helper function which deletes the case """
    dbh = DB.DBO(None)    
    ## Broadcast that the case is about to be dropped (This broadcasts
    ## to the workers)

    dbh.insert('jobs',command = "DropCase", state='broadcast', arg1=case, cookie=0, _fast = True)

    ## This sends an event to our process:
    post_event('reset', case)

    ## Remove any jobs that may be outstanding (dont touch the
    ## currently processing jobs)
    dbh.delete('jobs',DB.expand("arg1=%r and state='pending' " , case),
               _fast= True)

    ## Now wait until there are no more processing jobs:
    total_time = 0
    while 1:
        dbh.execute("select * from jobs where arg1=%r and state='processing' limit 1", case)
        row = dbh.fetch()
        if row:
            time.sleep(2)
            total_time += 2
            if total_time > 20:
                pyflaglog.log(pyflaglog.WARNING,"Outstanding jobs remain in %s. Removing the case anyway." % case)
                dbh.execute("delete from jobs where arg1=%r and state='processing'",case)
                break
            pyflaglog.log(pyflaglog.INFO, "Waiting for outstanding jobs in case %r to be completed" % case)
        else:
            break

    try:
      #Delete the case from the database
      dbh.delete('meta',DB.expand("property='flag_db' and value=%r" , case),
                 _fast=True)
      dbh.execute("drop database if exists `%s`" ,case)
    except DB.DBError,e:
        pass
Пример #57
0
def list_hits(case, inode_id, word, start=None, end=None):
    """ Returns a generator of hits of the word within the inode
    between offset start and end (these are inode offsets."""
    dbh = DB.DBO(case)
    pdbh = DB.DBO()
    pdbh.execute("select id from dictionary where word = %r limit 1" , word)
    row = pdbh.fetch()
    if not row:
        raise RuntimeError("Word queried (%s) is not in the dictionary???" % word)

    ranges = ''
    if start!=None:
        ranges += DB.expand("and offset >= %r",(start,))

    if end!=None:
        ranges += DB.expand("and offset < %r", (end,))
        
    id = row['id']
    dbh.execute("select offset,length from LogicalIndexOffsets where "
                "inode_id = %r and word_id = %r %s order by offset", inode_id, id, ranges)

    return dbh