Example #1
0
    def calculate(self):
        linux_common.set_plugin_members(self)
        
        phys_addr_space = utils.load_as(self._config, astype = 'physical')

        if phys_addr_space.profile.metadata.get('memory_model', '32bit') == "32bit":
            fmt  = "<I"
        else:
            fmt  = "<Q"

        needles     = []
        
        for sym in phys_addr_space.profile.get_all_symbol_names("kernel"):
            if sym.find("_sched_class") != -1:
                addr = phys_addr_space.profile.get_symbol(sym)
                needles.append(struct.pack(fmt, addr)) 

        if len(needles) == 0:
            debug.warning("Unable to scan for processes. Please file a bug report.")
        else:
            back_offset = phys_addr_space.profile.get_obj_offset("task_struct", "sched_class")

            scanner = poolscan.MultiPoolScanner(needles)    

            for _, offset in scanner.scan(phys_addr_space):
                ptask = obj.Object("task_struct", offset = offset - back_offset, vm = phys_addr_space)

                if not ptask.exit_state.v() in [0, 16, 32, 16|32]:
                    continue

                if not (0 < ptask.pid < 66000):
                    continue

                yield ptask
Example #2
0
        def draw_branch(pad, inherited_from):
            for task in data.values():
                if task.InheritedFromUniqueProcessId == inherited_from:

                    first_column = "{0} {1:#x}:{2:20}".format(
                                        "." * pad, 
                                        task.obj_offset, 
                                        str(task.ImageFileName or '')
                                        )

                    self.table_row(outfd, 
                        first_column,
                        task.UniqueProcessId,
                        task.InheritedFromUniqueProcessId,
                        task.ActiveThreads,
                        task.ObjectTable.HandleCount,
                        task.CreateTime)

                    if self._config.VERBOSE:
                        outfd.write("{0}    audit: {1}\n".format(
                                ' ' * pad, str(task.SeAuditProcessCreationInfo.ImageFileName.Name or '')))
                        process_params = task.Peb.ProcessParameters
                        if process_params:
                            outfd.write("{0}    cmd: {1}\n".format(
                                ' ' * pad, str(process_params.CommandLine or '')))
                            outfd.write("{0}    path: {1}\n".format(
                                ' ' * pad, str(process_params.ImagePathName or '')))

                    try:
                        del data[int(task.UniqueProcessId)]
                    except KeyError:
                        debug.warning("PID {0} PPID {1} has already been seen".format(task.UniqueProcessId, task.InheritedFromUniqueProcessId))

                    draw_branch(pad + 1, task.UniqueProcessId) 
    def calculate(self):
        linux_common.set_plugin_members(self)
        # Automatically initialize task_struct offsets
        task_struct.init_offsets(self.addr_space)
        if not all([task_struct.is_offset_defined(memname) for memname in ['comm', 'tasks', 'mm']]):
            debug.warning("Some of required members of 'task_struct' structure were not found.")
            return

        ksymbol_command = linux_auto_ksymbol(self._config)
        init_task_addr = ksymbol_command.get_symbol('init_task')
        if init_task_addr is None:
            debug.warning("Can't locate the first process (swapper).")
            return
        init_task = obj.Object('task_struct', offset=init_task_addr, vm=self.addr_space)
        tasks_dtb_list = []
        for task in init_task.tasks:
            if mm_struct.is_offset_defined('pgd'):
                pgd = task.mm.pgd
                if pgd:
                    tasks_dtb_list.append(self.addr_space.vtop(pgd))
            yield task
        # List unnamed potentially hidden or terminated processes
        # auto-discovered by dtblist command.
        dtblist_command = linux_auto_dtblist(self._config)
        for dtb in dtblist_command.calculate():
            if dtb not in tasks_dtb_list:
                yield dtb
    def calculate(self):
        #check wow64 process
        self.wow64 = False

        self.tasks = list(vtasks.pslist(self.addr_space))
        tasks_list = self.filter_tasks(vtasks.pslist(self.addr_space))

        #get handles for all processes
        self.segments = self.get_section_segments()

        #Check profile
        support_profiles = [
            'Win10x64_14393', 'Win10x64_15063', 'Win10x64_16299',
            'Win10x64_17134', 'Win10x64_18362', 'Win10x64_17763',
            'Win10x64_19041'
        ]
        profile = self._config.profile
        if not profile in support_profiles:
            debug.warning("Warning - {0} profile not supported".format(
                self._config.profile))

        #analyze through each process
        for task in tasks_list:
            if task.IsWow64:
                self.wow64 = True
            for data in self.analyze(task):
                yield data
Example #5
0
 def merge_overlay(self, overlay):
     """Applies an overlay to the profile's vtypes"""
     for k, v in overlay.items():
         if k not in self.vtypes:
             debug.warning("Overlay structure {0} not present in vtypes".format(k))
         else:
             self.vtypes[k] = self._apply_overlay(self.vtypes[k], v)
Example #6
0
 def add_types(self, vtypes, overlay = None):
     """ Add in a deprecated function that mimics the previous add_types function """
     debug.warning("Deprecation warning: A plugin is making use of profile.add_types")
     self.vtypes.update(vtypes)
     if overlay:
         self.merge_overlay(overlay)
     self.compile()
Example #7
0
    def calculate(self):
        linux_common.set_plugin_members(self)
        # Automatically initialize task_struct offsets
        task_struct.init_offsets(self.addr_space)
        if not all([
                task_struct.is_offset_defined(memname)
                for memname in ['comm', 'tasks', 'mm']
        ]):
            debug.warning(
                "Some of required members of 'task_struct' structure were not found."
            )
            return

        ksymbol_command = linux_auto_ksymbol(self._config)
        init_task_addr = ksymbol_command.get_symbol('init_task')
        if init_task_addr is None:
            debug.warning("Can't locate the first process (swapper).")
            return
        init_task = obj.Object('task_struct',
                               offset=init_task_addr,
                               vm=self.addr_space)
        tasks_dtb_list = []
        for task in init_task.tasks:
            if mm_struct.is_offset_defined('pgd'):
                pgd = task.mm.pgd
                if pgd:
                    tasks_dtb_list.append(self.addr_space.vtop(pgd))
            yield task
        # List unnamed potentially hidden or terminated processes
        # auto-discovered by dtblist command.
        dtblist_command = linux_auto_dtblist(self._config)
        for dtb in dtblist_command.calculate():
            if dtb not in tasks_dtb_list:
                yield dtb
Example #8
0
        def draw_branch(level, inherited_from):
            for task in data.values():
                if task.InheritedFromUniqueProcessId == inherited_from:

                    row = [Address(task.obj_offset),
                           str(task.ImageFileName or ''),
                           int(task.UniqueProcessId),
                           int(task.InheritedFromUniqueProcessId),
                           int(task.ActiveThreads),
                           int(task.ObjectTable.HandleCount),
                           str(task.CreateTime)]

                    if self._config.VERBOSE:
                        row += [str(task.SeAuditProcessCreationInfo.ImageFileName.Name or '')]
                        process_params = task.Peb.ProcessParameters
                        if not process_params:
                            row += [str("-"), str("-")]
                        else:
                            row += [str(process_params.CommandLine or ''),
                                    str(process_params.ImagePathName or '')]
                    yield (level, row)

                    try:
                        del data[int(task.UniqueProcessId)]
                    except KeyError:
                        debug.warning("PID {0} PPID {1} has already been seen".format(task.UniqueProcessId,
                                                                                      task.InheritedFromUniqueProcessId))

                    for item in draw_branch(level + 1, task.UniqueProcessId):
                        yield item
Example #9
0
    def get_entries(addr_space, regapi):

        regapi.reset_current()
        currentcs = regapi.reg_get_currentcontrolset()
        if currentcs == None:
            currentcs = "ControlSet001"

        version = (addr_space.profile.metadata.get('major', 0),
                   addr_space.profile.metadata.get('minor', 0))
        xp = False

        if version <= (5, 1):
            key = currentcs + '\\' + "Control\\Session Manager\\AppCompatibility"
            xp = True
        else:
            key = currentcs + '\\' + "Control\\Session Manager\\AppCompatCache"

        data_raw = regapi.reg_get_value('system', key, "AppCompatCache")
        if data_raw == None or len(data_raw) < 0x1c:
            debug.warning("No ShimCache data found")
            raise StopIteration

        bufferas = addrspace.BufferAddressSpace(addr_space.get_config(), data = data_raw)
        shimdata = obj.Object("ShimRecords", offset = 0, vm = bufferas)
        if shimdata == None:
            debug.warning("No ShimCache data found")
            raise StopIteration

        for e in shimdata.Entries:
            if xp:
                yield e.Path, e.LastModified, e.LastUpdate
            else:
                yield ShimCache.remove_unprintable(bufferas.read(int(e.PathOffset), int(e.Length))), e.LastModified, None
Example #10
0
    def reload_file(self, path):
        def assign_used_file():
            self.currentPath = path
            self.fname = self.used_files[path]['fname']
            self.name = self.used_files[path]['fname']
            self.mode = self.used_files[path]['mode']
            self.fhandle = self.used_files[path]['fhandle']
            self.fhandle.seek(0, 2)
            self.fsize = self.used_files[path]['fsize']

        if path in self.used_files:
            if self.currentPath == path:
                return
            assign_used_file()
        else:
            debug.debug('read from file: ' + path)
            path_name = urllib.url2pathname(path)
            if not os.path.exists(path_name):
                debug.warning('File not exist: ' + path +
                              ' Returning zero bytes..')
                currentPath = 'ZERO'
                return
            # assert os.path.exists(path_name), 'Filename must be specified and exist'
            self.used_files[path]['fname'] = os.path.abspath(path_name)
            self.used_files[path]['mode'] = 'rb'
            if self._config.WRITE:
                self.used_files[path]['mode'] += '+'
            self.used_files[path]['fhandle'] = open(
                self.used_files[path]['fname'], self.used_files[path]['mode'])
            self.used_files[path]['fsize'] = self.fhandle.tell()
            assign_used_file()
Example #11
0
    def calculate(self):
        addr_space = utils.load_as(self._config)
        regapi = registryapi.RegistryApi(self._config)
        regapi.reset_current()
        currentcs = regapi.reg_get_currentcontrolset()
        if currentcs == None:
            currentcs = "ControlSet001"

        version = (addr_space.profile.metadata.get('major', 0),
                   addr_space.profile.metadata.get('minor', 0))
        xp = False

        if version <= (5, 1):
            key = currentcs + '\\' + "Control\\Session Manager\\AppCompatibility"
            xp = True
        else:
            key = currentcs + '\\' + "Control\\Session Manager\\AppCompatCache"

        data_raw = regapi.reg_get_value('system', key, "AppCompatCache")
        if data_raw == None or len(data_raw) < 0x1c:
            debug.warning("No ShimCache data found")
            return

        bufferas = addrspace.BufferAddressSpace(self._config, data = data_raw)
        shimdata = obj.Object("ShimRecords", offset = 0, vm = bufferas)
        if shimdata == None:
            debug.warning("No ShimCache data found")
            return

        for e in shimdata.Entries:
            if xp:
                yield e.Path, e.LastModified, e.LastUpdate
            else:
                yield self.remove_unprintable(bufferas.read(int(e.PathOffset), int(e.Length))), e.LastModified, None
Example #12
0
    def search_process_memory(self, s, heap_only=False):

        # Allow for some overlap in case objects are
        # right on page boundaries
        overlap = 1024

        # Make sure s in a list. This allows you to search for
        # multiple strings at once, without changing the API.
        if type(s) != list:
            debug.warning("Single strings to search_process_memory is deprecated, use a list instead")
            s = [s]

        scan_blk_sz = 1024 * 1024 * 10

        addr_space = self.get_process_address_space()

        for vma in self.get_proc_maps():
            if heap_only:
                if not (vma.vm_start <= self.mm.start_brk and vma.vm_end >= self.mm.brk):
                    continue
            offset = vma.vm_start
            out_of_range = vma.vm_start + (vma.vm_end - vma.vm_start)
            while offset < out_of_range:
                # Read some data and match it.
                to_read = min(scan_blk_sz + overlap, out_of_range - offset)
                data = addr_space.zread(offset, to_read)
                if not data:
                    break
                for x in s:
                    for hit in utils.iterfind(data, x):
                        yield offset + hit
                offset += min(to_read, scan_blk_sz)
Example #13
0
  def download_pdbfile(self, db, guid, module_id, filename, path):
    db.execute("SELECT id FROM pdb WHERE guid=? AND file=?", (str(guid.upper()).rstrip('\0'), str(filename).rstrip('\0')))
    row = db.fetchone()
    if row == None:
      db.execute("INSERT INTO pdb(guid, file) VALUES (?, ?)", (str(guid.upper()).rstrip('\0'), str(filename).rstrip('\0')))
      db.execute("SELECT LAST_INSERT_ROWID() FROM pdb")
      row = db.fetchone()
    pdb_id = row[0]
    db.execute("SELECT * FROM mod_pdb WHERE module_id=? AND pdb_id=?", (module_id, pdb_id))
    row = db.fetchone()
    if row == None:
      db.execute("INSERT INTO mod_pdb(module_id, pdb_id) VALUES (?, ?)", (module_id, pdb_id))
    self._sym_db_conn.commit()

    for sym_url in SYM_URLS:
      url = "{0}/{1}/{2}/".format(sym_url, filename, guid)
      proxy = urllib2.ProxyHandler()
      opener = urllib2.build_opener(proxy)
      tries = [ filename[:-1] + '_', filename ]
      for t in tries:
        debug.info("Trying {0}".format(url+t))
        outfile = os.path.join(path, t)
        try:
          PDBOpener().retrieve(url+t, outfile, reporthook=self.progress)
          debug.info("Downloaded symbols and cached at {0}".format(outfile))
          if t.endswith("_"):
            self.cabextract(outfile, path)
            debug.info("Unpacked download into {0}".format(path))
            os.remove(outfile)
            db.execute("UPDATE pdb SET downloaded_at=DATETIME('now'), src=? WHERE id=? AND guid=? AND file=?", (sym_url, pdb_id, str(guid.upper()).rstrip('\0'), str(filename).rstrip('\0')))
            self._sym_db_conn.commit()
          return
        except urllib2.HTTPError, e:
          debug.warning("HTTP error {0}".format(e.code))
Example #14
0
 def __str__(self):
     ## If we are strict we blow up here
     if self.strict:
         debug.error("{0} n{1}".format(self.reason, self.bt))
         sys.exit(0)
     else:
         debug.warning("{0}".format(self.reason))
Example #15
0
    def get_section(self, sect):
        ret = None
        if self.isPE:
            if sect.split(':')[0] == 'pe':
                # PE Header
                ret = self.strings_str(self.get_header(sect)) if self.strings else self.get_header(sect)
            else:
                # PE Section
                split = sect.split(':')
                if len(split) > 1 and split[1] == 'header':
                    # Section header
                    for section in self.pDump.sections:
                        if split[0] == section.Name.translate(None, '\x00'):
                            ret = self.strings_str(section.__pack__()) if self.strings else section.__pack__()
                    if not ret:
                        debug.warning('Unknown section: {!s} for {!s}. Please specify a valid section.'.format(sect, self.pName))
                else:
                    # Section content
                    for section in self.pDump.sections:
                        if sect == section.Name.translate(None, '\x00'):
                            ret = self.strings_str(section.get_data()) if self.strings else section.get_data()
                    if not ret:
                        debug.warning('Unknown section: {!s} for {!s}. Please specify a valid section.'.format(sect, self.pName))
        else:
            raise exc.NoPE(self.pName)

        # Dump data to disk
        if self.mirror: self.dump_hashed_data(ret, sect)

        return ret
Example #16
0
def Object(theType, offset, vm, parent = None, name = None, **kwargs):
    """ A function which instantiates the object named in theType (as
    a string) from the type in profile passing optional args of
    kwargs.
    """
    name = name or theType
    offset = int(offset)

    try:
        if theType in vm.profile.types:
            result = vm.profile.types[theType](offset = offset, vm = vm, name = name,
                                               parent = parent)
            return result

        if theType in vm.profile.object_classes:
            result = vm.profile.object_classes[theType](theType = theType,
                                                        offset = offset,
                                                        vm = vm,
                                                        parent = parent,
                                                        name = name,
                                                        **kwargs)
            return result

    except InvalidOffsetError:
        ## If we cant instantiate the object here, we just error out:
        return NoneObject("Invalid Address 0x{0:08X}, instantiating {1}".format(offset, name),
                          strict = vm.profile.strict)

    ## If we get here we have no idea what the type is supposed to be?
    ## This is a serious error.
    debug.warning("Cant find object {0} in profile {1}?".format(theType, vm.profile))
Example #17
0
    def search_process_memory(self, s, heap_only=False):

        # Allow for some overlap in case objects are
        # right on page boundaries
        overlap = 1024

        # Make sure s in a list. This allows you to search for
        # multiple strings at once, without changing the API.
        if type(s) != list:
            debug.warning(
                "Single strings to search_process_memory is deprecated, use a list instead"
            )
            s = [s]

        scan_blk_sz = 1024 * 1024 * 10

        addr_space = self.get_process_address_space()

        for vma in self.get_proc_maps():
            if heap_only:
                if not (vma.vm_start <= self.mm.start_brk
                        and vma.vm_end >= self.mm.brk):
                    continue
            offset = vma.vm_start
            out_of_range = vma.vm_start + (vma.vm_end - vma.vm_start)
            while offset < out_of_range:
                # Read some data and match it.
                to_read = min(scan_blk_sz + overlap, out_of_range - offset)
                data = addr_space.zread(offset, to_read)
                if not data:
                    break
                for x in s:
                    for hit in utils.iterfind(data, x):
                        yield offset + hit
                offset += min(to_read, scan_blk_sz)
Example #18
0
    def calculate(self):
        common.set_plugin_members(self)

        pidlist = None

        try:
            if self._config.PID:
                pidlist = [int(p) for p in self._config.PID.split(',')]
        except:
            pass
        
        p = self.addr_space.profile.get_symbol("_allproc")

        procsaddr = obj.Object("proclist", offset = p, vm = self.addr_space)
        proc = obj.Object("proc", offset = procsaddr.lh_first, vm = self.addr_space)
        seen = []

        while proc.is_valid():
    
            if proc.obj_offset in seen:
                debug.warning("Recursive process list detected (a result of non-atomic acquisition). Use mac_tasks or mac_psxview)")
                break
            else:
                seen.append(proc.obj_offset)

            if not pidlist or proc.p_pid in pidlist:
                yield proc 

            proc = proc.p_list.le_next.dereference()
Example #19
0
 def merge_overlay(self, overlay):
     """Applies an overlay to the profile's vtypes"""
     for k, v in list(overlay.items()):
         if k not in self.vtypes:
             debug.warning(f"Overlay structure {k} not present in vtypes")
         else:
             self.vtypes[k] = self._apply_overlay(self.vtypes[k], v)
    def get_autoruns(self):

        debug.debug('Started get_autoruns()')
        results = []
        hive_key_list = []

        try:
            # Gather all software run keys
            self.regapi.reset_current()
            for run_key in SOFTWARE_RUN_KEYS:
                hive_key_list += [k for k in self.regapi.reg_yield_key(hive_name='software', key=run_key)]

            # Gather all ntuser run keys
            self.regapi.reset_current()
            for run_key in NTUSER_RUN_KEYS:
                hive_key_list += [k for k in self.regapi.reg_yield_key(hive_name='ntuser.dat', key=run_key)]

            # hive_key = (key pointer, hive_name)
            for hive_key in hive_key_list:
                results += self.parse_autoruns_key(hive_key)

        except Exception as e:
            debug.warning('get_autoruns() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args))

        debug.debug('Finished get_autoruns()')
        return results
Example #21
0
 def _init_ksymtab(self):
     phys_as = utils.load_as(self._config, astype='physical')
     start_addr, _ = phys_as.get_available_addresses().next()
     # First 16 MB of physical memory
     self.kernel_image = phys_as.read(start_addr, 0x1000000)
     # Init page_offset
     if phys_as.profile.metadata.get('memory_model', '32bit') != '32bit':
         raise NotImplementedError
     self.ksymtab_initialized = True
     # Locate the physical offset of the ksymtab_strings section
     for match in re.finditer('init_task\0', self.kernel_image):
         offset = match.start()
         symbol_char = re.compile(r'[0-9a-z_]')
         if symbol_char.match(self.kernel_image[offset - 1:offset]):
             # 'init_task' is a substring of another symbol like 'xxx_init_task'
             continue
         # TODO: Choose the right one, not the first.
         # Find the beginning of the ksymtab_strings section
         char = self.kernel_image[offset]
         while offset > 0 and (symbol_char.match(char) or char == '\x00'):
             offset -= 1
             char = self.kernel_image[offset]
         debug.debug("Found the physical offset of the ksymtab_strings "
                     "section: {0:#010x}".format(offset))
         self.ksymtab_strings_offset = offset
         return
     debug.warning("Can't locate a ksymtab_strings section")
Example #22
0
        def draw_branch(pad, inherited_from):
            for task in data.values():
                if task.InheritedFromUniqueProcessId == inherited_from:

                    first_column = "{0} {1:#x}:{2:20}".format(
                                        "." * pad, 
                                        task.obj_offset, 
                                        str(task.ImageFileName or '')
                                        )

                    self.table_row(outfd, 
                        first_column,
                        task.UniqueProcessId,
                        task.InheritedFromUniqueProcessId,
                        task.ActiveThreads,
                        task.ObjectTable.HandleCount,
                        task.CreateTime)

                    if self._config.VERBOSE:
                        outfd.write("{0}    audit: {1}\n".format(
                                ' ' * pad, str(task.SeAuditProcessCreationInfo.ImageFileName.Name or '')))
                        process_params = task.Peb.ProcessParameters
                        if process_params:
                            outfd.write("{0}    cmd: {1}\n".format(
                                ' ' * pad, str(process_params.CommandLine or '')))
                            outfd.write("{0}    path: {1}\n".format(
                                ' ' * pad, str(process_params.ImagePathName or '')))

                    try:
                        del data[int(task.UniqueProcessId)]
                    except KeyError:
                        debug.warning("PID {0} PPID {1} has already been seen".format(task.UniqueProcessId, task.InheritedFromUniqueProcessId))

                    draw_branch(pad + 1, task.UniqueProcessId) 
Example #23
0
    def get_entries(addr_space, regapi):

        regapi.reset_current()
        currentcs = regapi.reg_get_currentcontrolset()
        if currentcs == None:
            currentcs = "ControlSet001"

        version = (addr_space.profile.metadata.get('major', 0),
                   addr_space.profile.metadata.get('minor', 0))
        xp = False

        if version <= (5, 1):
            key = currentcs + "\\Control\\Session Manager\\AppCompatibility"
            xp = True
        else:
            key = currentcs + "\\Control\\Session Manager\\AppCompatCache"

        data_raw = regapi.reg_get_value('system', key, "AppCompatCache")
        if data_raw == None or len(data_raw) < 0x1c:
            debug.warning("No ShimCache data found")
            raise StopIteration

        bufferas = addrspace.BufferAddressSpace(addr_space.get_config(), data = data_raw)
        shimdata = obj.Object("ShimRecords", offset = 0, vm = bufferas)
        if shimdata == None:
            debug.warning("No ShimCache data found")
            raise StopIteration

        for e in shimdata.Entries:
            if xp:
                yield e.Path, e.LastModified, e.LastUpdate
            else:
                yield ShimCache.remove_unprintable(bufferas.read(int(e.PathOffset), int(e.Length))), e.LastModified, None
Example #24
0
    def calculate(self):
        """
        This works by walking the system call table
        and verifies that each is a symbol in the kernel
        """
        linux_common.set_plugin_members(self)

        if not has_distorm:
            debug.warning(
                "distorm not installed. The best method to calculate the system call table size will not be used."
            )

        for (
                tableaddr,
                table_name,
                i,
                idx_name,
                call_addr,
                sym_name,
                hooked,
        ) in self.get_syscalls(None, True, True):
            yield (
                tableaddr,
                table_name,
                i,
                idx_name,
                call_addr,
                sym_name,
                hooked,
            )
Example #25
0
    def calculate(self):
        common.set_plugin_members(self)

        pidlist = None

        try:
            if self._config.PID:
                pidlist = [int(p) for p in self._config.PID.split(',')]
        except:
            pass
        
        p = self.addr_space.profile.get_symbol("_allproc")

        procsaddr = obj.Object("proclist", offset = p, vm = self.addr_space)
        proc = obj.Object("proc", offset = procsaddr.lh_first, vm = self.addr_space)
        seen = []

        while proc.is_valid():
    
            if proc.obj_offset in seen:
                debug.warning("Recursive process list detected (a result of non-atomic acquisition). Use mac_tasks or mac_psxview)")
                break
            else:
                seen.append(proc.obj_offset)

            if not pidlist or proc.p_pid in pidlist:
                yield proc 

            proc = proc.p_list.le_next.dereference()
Example #26
0
    def render_text(self, outfd, data):
        if not has_distorm3:
            debug.warning("For best results please install distorm3")

        for process_name, pid, vad, vad_bytes in data:
            scan_result, where = self.is_malicious(vad, vad_bytes)

            if scan_result:
                outfd.write('\n')
                outfd.write('Process: {0} Pid: {1} Space Address: {2:#x}-{3:#x}\n'.format(process_name, pid, vad.Start, vad.End))
                vad_protection = vadinfo.PROTECT_FLAGS.get(vad.VadFlags.Protection.v(), '')
                outfd.write('Vad Tag: {0} Protection: {1}\n'.format(vad.Tag, vad_protection))
                outfd.write('Flags: {0}\n'.format(str(vad.VadFlags)))
                outfd.write('Scan result: {0}\n'.format(scan_result))
                outfd.write('\n')

                outfd.write("{0}\n".format("\n".join(
                    ["{0:#010x}  {1:<48}  {2}".format(vad.Start + where + o, h, ''.join(c))
                    for o, h, c in utils.Hexdump(vad_bytes[where:where+64])
                    ])))

                """Dissassemble opcodes if it is not a PE header"""
                if has_distorm3 and not self._is_pe(vad_bytes): # MZ
                    outfd.write('\n')
                    outfd.write("\n".join(
                        ["{0:#010x}  {1:<30}  {2}".format(o, h, i)
                        for o, i, h in Disassemble(vad_bytes[where:where+64], vad.Start + where)
                        ]))
                outfd.write('\n')
Example #27
0
    def _check_inetsw(self, modules):
        try:
            self.addr_space.profile.get_obj_offset("inet_protosw", "list")
        except KeyError:
            debug.warning(
                "You are using an old Linux profile. Please recreate the profile using the latest Volatility version."
            )
            return

        proto_members = self.profile.types['proto_ops'].keywords[
            "members"].keys()
        proto_members.remove('owner')
        proto_members.remove('family')

        inetsw_addr = self.addr_space.profile.get_symbol("inetsw")
        inetsw = obj.Object(theType="Array",
                            targetType="list_head",
                            offset=inetsw_addr,
                            vm=self.addr_space,
                            count=11)

        for inet_list in inetsw:
            for inet in inet_list.list_of_type("inet_protosw", "list"):
                name = self.addr_space.read(inet.prot.name.obj_offset, 32)
                idx = name.index("\x00")
                if idx != -1:
                    name = name[:idx]

                for (hooked_member, hook_type,
                     hook_address) in self._is_inline_hooked(
                         inet.ops, proto_members, modules):
                    yield (name, hooked_member, hook_type, hook_address)
    def parse_task_xml(self, xml, f_name):
        raw = xml
        xml = re.sub('\x00\x00+', '', xml) + '\x00'
        if xml:
            try:
                xml = xml.decode('utf-16')
                xml = re.sub(r"<Task(.*?)>", "<Task>", xml)
                xml = xml.encode('utf-16')

                root = ET.fromstring(xml)
                d = {}

                for e in root.findall("./RegistrationInfo/Date"):
                    d['Date'] = e.text or ''
                for e in root.findall("./RegistrationInfo/Description"):
                    d['Description'] = e.text or ''
                for e in root.findall("./Actions"):
                    d['Actions'] = self.visit_all_children(e)
                for e in root.findall("./Settings/Enabled"):
                    d['Enabled'] = e.text or ''
                for e in root.findall("./Settings/Hidden"):
                    d['Hidden'] = e.text or ''
                for t in root.findall("./Triggers/*"):
                    d['Triggers'] = self.visit_all_children(t)

                if not d.get("Actions", {}).get('Exec', {}).get("Command", False):
                    return None

                return d
            except UnicodeDecodeError as e:
                debug.warning('Error while parsing the following task: {}'.format(f_name))
                debug.debug('UnicodeDecodeError for: {}'.format(repr(raw)))
Example #29
0
 def hash(self, data, alghConfig):
     try:
         retdata = fhash.sdhash(data).hexdigest()
     except ValueError:
         retdata = '-'
         debug.warning("SDHash needs an input of at least 512 bytes. Too short: {!s}".format(len(data)))
     return retdata
Example #30
0
 def add_types(self, vtypes, overlay = None):
     """ Add in a deprecated function that mimics the previous add_types function """
     debug.warning("Deprecation warning: A plugin is making use of profile.add_types")
     self.vtypes.update(vtypes)
     if overlay:
         self.merge_overlay(overlay)
     self.compile()
Example #31
0
def Object(theType, offset, vm, name=None, **kwargs):
    """ A function which instantiates the object named in theType (as
    a string) from the type in profile passing optional args of
    kwargs.
    """
    name = name or theType
    offset = int(offset)

    try:
        if vm.profile.has_type(theType):
            result = vm.profile.types[theType](offset=offset,
                                               vm=vm,
                                               name=name,
                                               **kwargs)
            return result
    except InvalidOffsetError:
        ## If we cant instantiate the object here, we just error out:
        return NoneObject(
            "Invalid Address 0x{0:08X}, instantiating {1}".format(
                offset, name),
            strict=vm.profile.strict)

    ## If we get here we have no idea what the type is supposed to be?
    ## This is a serious error.
    debug.warning("Cant find object {0} in profile {1}?".format(
        theType, vm.profile))
Example #32
0
    def calculate(self):
        #check pid is valid before we spend time getting sections
        tasks = list(taskmods.DllList.calculate(self))
        pids = []
        for task in tasks:
            pids.append(int(task.UniqueProcessId))
        if not(int(self._config.PID) in pids):
            debug.error("Error - Invalid PID")

        #get handles for all processes by reseting the pid filter
        self.pid = self._config.PID
        self._config.PID = ""
        self.segments = self.get_section_segments()

        #revert pid option
        self._config.PID = self.pid

        #Check profile
        profile = self._config.profile
        if profile != "Win7SP1x86" and profile != "WinXPSP3x86":
            debug.warning("Warning - {0} profile not supported".format(self._config.profile))

        #analyze through each process
        for task in taskmods.DllList.calculate(self):
            for data in self.analyze(task):
                yield data
Example #33
0
    def render_text(self, outfd, data):

        if not has_distorm3:
            debug.warning("For best results please install distorm3")

        if self._config.DUMP_DIR and not os.path.isdir(self._config.DUMP_DIR):
            debug.error(self._config.DUMP_DIR + " is not a directory")

        for task in data:
            for vad, address_space in task.get_vads(vad_filter = task._injection_filter):

                if self._is_vad_empty(vad, address_space):
                    continue

                content = address_space.zread(vad.Start, 64)

                outfd.write("Process: {0} Pid: {1} Address: {2:#x}\n".format(
                    task.ImageFileName, task.UniqueProcessId, vad.Start))

                outfd.write("Vad Tag: {0} Protection: {1}\n".format(
                    vad.Tag, vadinfo.PROTECT_FLAGS.get(vad.VadFlags.Protection.v(), "")))

                outfd.write("Flags: {0}\n".format(str(vad.VadFlags)))
                outfd.write("\n")

                # this is for address reporting in the output 
                data_start = vad.Start

                # all zeros in the first page followed by 558B at the base of
                # the second page is an indicator of wiped PE headers
                if content.count(chr(0)) == len(content):
                    if address_space.zread(vad.Start, 0x1000).count(chr(0)) == 0x1000:
                        next_page = address_space.zread(vad.Start + 0x1000, 64)
                        if next_page[0:2] == "\x55\x8B":
                            outfd.write("**** POSSIBLE WIPED PE HEADER AT BASE *****\n\n")
                            content = next_page    
                            data_start = vad.Start + 0x1000                    

                outfd.write("{0}\n".format("\n".join(
                    ["{0:#010x}  {1:<48}  {2}".format(data_start + o, h, ''.join(c))
                    for o, h, c in utils.Hexdump(content)
                    ])))

                outfd.write("\n")
                outfd.write("\n".join(
                    ["{0:#010x} {1:<16} {2}".format(o, h, i)
                    for o, i, h in Disassemble(content, data_start)
                    ]))

                # Dump the data if --dump-dir was supplied
                if self._config.DUMP_DIR:

                    filename = os.path.join(self._config.DUMP_DIR,
                        "process.{0:#x}.{1:#x}.dmp".format(
                        task.obj_offset, vad.Start))

                    self.dump_vad(filename, vad, address_space)

                outfd.write("\n\n")
Example #34
0
 def acquire_crypto_material(self):
   sigpos = self.find_signature()
   if not sigpos:
     debug.warning('[LsaDecryptor] unable to find signature!')
     return
   self.iv = self.get_IV(sigpos)
   self.des_key = self.get_des_key(sigpos)
   self.aes_key = self.get_aes_key(sigpos)
Example #35
0
 def decrypt_epwd(self, decryptor):
   if self.epwd and decryptor:
     self.pwd = decryptor.decrypt(self.epwd)
     try:
       self.pwd = self.pwd.decode('utf-16-le').rstrip('\0')
     except UnicodeDecodeError:
       debug.warning('[Credential:decrypt_epwd] unicode decode error')
       self.pwd = self.pwd.encode('hex')
Example #36
0
 def acquire_crypto_material(self):
     sigpos = self.find_signature()
     if not sigpos:
         debug.warning('[LsaDecryptor] unable to find signature!')
         return
     self.iv = self.get_IV(sigpos)
     self.des_key = self.get_des_key(sigpos)
     self.aes_key = self.get_aes_key(sigpos)
Example #37
0
 def decrypt_epwd(self, decryptor):
     if self.epwd and decryptor:
         self.pwd = decryptor.decrypt(self.epwd)
         try:
             self.pwd = self.pwd.decode('utf-16-le').rstrip('\0')
         except UnicodeDecodeError:
             debug.warning('[Credential:decrypt_epwd] unicode decode error')
             self.pwd = self.pwd.encode('hex')
Example #38
0
 def scan(self, offset = 0, maxlen = None, max_size = None):
     for map in self.task.get_proc_maps():
         length = map.links.end - map.links.start 
         if max_size and length > max_size:
             debug.warning("Skipping max size entry {0:#x} - {1:#x}".format(map.links.start, map.links.end))
             continue
         for match in malfind.BaseYaraScanner.scan(self, map.links.start, length):
             yield match
Example #39
0
    def __str__(self):
        ## If we are strict we blow up here
        if self.strict:
            debug.error("Strict NoneObject string failure: {0} n{1}".format(self.reason, self.bt))
            sys.exit(0)
        else:
            debug.warning("NoneObject as string: {0}".format(self.reason))

        return ""
Example #40
0
    def __str__(self):
        ## If we are strict we blow up here
        if self.strict:
            debug.error("Strict NoneObject string failure: {0} n{1}".format(self.reason, self.bt))
            sys.exit(0)
        else:
            debug.warning("NoneObject as string: {0}".format(self.reason))

        return ""
Example #41
0
    def _list_to_type(self, name, typeList, typeDict=None):
        """ Parses a specification list and returns a VType object.

            This function is a bit complex because we support lots of
            different list types for backwards compatibility.
        """
        ## This supports plugin memory objects:
        try:
            kwargs = typeList[1]

            if type(kwargs) == dict:
                ## We have a list of the form [ ClassName, dict(.. args ..) ]
                return Curry(Object, theType=typeList[0], name=name, **kwargs)
        except (TypeError, IndexError) as _e:
            pass

        ## This is of the form [ 'void' ]
        if typeList[0] == "void":
            return Curry(Void, None, name=name)

        ## This is of the form [ 'pointer' , [ 'foobar' ]]
        if typeList[0] == "pointer":
            try:
                target = typeList[1]
            except IndexError:
                raise RuntimeError("Syntax Error in pointer type defintion for name {0}".format(name))

            return Curry(Pointer, None, name=name, target=self._list_to_type(name, target, typeDict))

        ## This is an array: [ 'array', count, ['foobar'] ]
        if typeList[0] == "array":
            return Curry(
                Array, None, name=name, count=typeList[1], target=self._list_to_type(name, typeList[2], typeDict)
            )

        ## This is a list which refers to a type which is already defined
        if typeList[0] in self.types:
            return Curry(self.types[typeList[0]], name=name)

        ## Does it refer to a type which will be defined in future? in
        ## this case we just curry the Object function to provide
        ## it on demand. This allows us to define structures
        ## recursively.
        ##if typeList[0] in typeDict:
        try:
            tlargs = typeList[1]
        except IndexError:
            tlargs = {}

        obj_name = typeList[0]
        if type(tlargs) == dict:
            return Curry(Object, obj_name, name=name, **tlargs)

        ## If we get here we have no idea what this list is
        # raise RuntimeError("Error in parsing list {0}".format(typeList))
        debug.warning("Unable to find a type for {0}, assuming int".format(typeList[0]))
        return Curry(self.types["int"], name=name)
Example #42
0
	def reconstruct_file(self, outfd, file_object, file_name_path):
		def check_for_overlaps(data):
			# ASSUME: data is sorted by (start_addr, end_addr, md5)
			while len(data) >= 2:
				data_type1, start_addr1, end_addr1, data1, hash1 = data[0]
				data_type2, start_addr2, end_addr2, data2, hash2 = data[1]
				if start_addr2 <= end_addr1:
					# Check if the overlap provides a data conflict
					if start_addr1 == start_addr2 and end_addr1 == end_addr2 and hash1 == hash2:
						# data overlap provides no conflict
						pass
					else:
						# data conflict exists
						if os.path.exists("{0}/this".format(file_name_path)):
							# We're about to fail with reconstruction, eliminate any old "this" files from previous reconstruction attempts
							os.remove("{0}/this".format(file_name_path))
						raise ExportException("File Reconstruction Failed: overlapping page conflict (for address range 0x{0:08X}-0x{1:08X}) detected during file reconstruction - please manually fix and use --reconstruct to attempt rebuilding the file".format(start_addr2, end_addr1))
				else:
					yield data[0]
				data = data[1:]
			if len(data) > 0:
				yield data[0]
		
		outfd.write("[_FILE_OBJECT @ 0x{0:08X}] Reconstructing extracted memory pages from:\n  {1}\n".format(file_object.v(), file_name_path))
		fill_char = chr(self._config.fill % 256)
		if os.path.exists(file_name_path):
			# list files in file_name_path
			raw_page_dumps = [ f for f in os.listdir(file_name_path) if re.search("(cache|direct)\.0x[a-fA-F0-9]{8}\-0x[a-fA-F0-9]{8}\.dmp(\.[a-fA-F0-9]{32})?$", f) ]
			# map file list to (dump_type, start_addr, end_addr, data, md5) tuple list
			page_dumps = []
			for dump in raw_page_dumps:
				dump_type, addr_range, ignore, md5 = dump.split(".")
				start_addr, end_addr = map(lambda x: x[2:], addr_range.split("-"))
				data = ""
				with open("{0}/{1}".format(file_name_path, dump), 'r') as fobj:
					data = fobj.read()
				if hashlib.md5(data).hexdigest() != md5:
					debug.error("consistency check failed (MD5 checksum check for {0} failed!)".format(dump))
				page_dumps += [ (dump_type, int(start_addr, 16), int(end_addr, 16), data, md5) ]
			try:
				# check for page overlaps
				extracted_file_data = check_for_overlaps(sorted(page_dumps, key = lambda x: (x[1], x[2], x[3])))
				# glue remaining file pages together and save reconstructed file as "this"
				with open("{0}/this".format(file_name_path), 'wb') as fobj:
					offset = 0
					for data_type, start_addr, end_addr, data, data_hash in extracted_file_data:
						if offset > end_addr:
							break
						if offset < start_addr:
							fobj.write(fill_char*(start_addr - offset))
						fobj.write(data)
						offset = end_addr + 1
				outfd.write("Successfully Reconstructed File\n")
			except ExportException as exn:
				debug.warning(exn)
		else:
			outfd.write("..Skipping file reconstruction due to a lack of extracted pages\n")
Example #43
0
    def render_text(self, outfd, data):

        if not has_pil:
            debug.error("Please install PIL")

        if not self._config.DUMP_DIR or not os.path.isdir(
                self._config.DUMP_DIR):
            debug.error("Please supply an existing --dump-dir")

        seen = []

        for window_station in data:
            for desktop in window_station.desktops():

                offset = desktop.PhysicalAddress
                if offset in seen:
                    continue
                seen.append(offset)

                # The foreground window
                win = desktop.DeskInfo.spwnd

                # Some desktops don't have any windows
                if not win:
                    debug.warning("{0}\{1}\{2} has no windows\n".format(
                        desktop.dwSessionId, window_station.Name,
                        desktop.Name))
                    continue

                im = Image.new(
                    "RGB", (win.rcWindow.right + 1, win.rcWindow.bottom + 1),
                    "White")
                draw = ImageDraw.Draw(im)

                # Traverse windows, visible only
                for win, _level in desktop.windows(
                        win=win,
                        filter=lambda x: 'WS_VISIBLE' in str(x.style)):
                    draw.rectangle(win.rcWindow.get_tup(),
                                   outline="Black",
                                   fill="White")
                    draw.rectangle(win.rcClient.get_tup(),
                                   outline="Black",
                                   fill="White")

                file_name = "session_{0}.{1}.{2}.png".format(
                    desktop.dwSessionId, window_station.Name, desktop.Name)

                file_name = os.path.join(self._config.DUMP_DIR, file_name)

                try:
                    im.save(file_name, "PNG")
                    result = "Wrote {0}".format(file_name)
                except SystemError, why:
                    result = why

                outfd.write("{0}\n".format(result))
Example #44
0
    def calculate(self):
        inputs = [
            self._config.TIMELINER_BODY, self._config.MFTPARSER_BODY,
            self._config.SHELLBAGS_BODY
        ]
        if None in inputs:
            debug.warning(
                "Not all input files specified ('mftparser', 'timeliner' and 'shellbags')."
            )

        debug.info("Combining mftparser, shellbags and timeliner bodies")
        debug.info("mftparser: %s" % self._config.MFTPARSER_BODY)
        debug.info("shellbags: %s" % self._config.SHELLBAGS_BODY)
        debug.info("timeliner: %s" % self._config.TIMELINER_BODY)

        tmpfile = tempfile.NamedTemporaryFile(mode='wb')
        debug.info("Combined body file: %s" % tmpfile.name)

        for infile in inputs:
            try:
                with open(infile, mode='rb') as f:
                    copyfileobj(f, tmpfile)
            except Exception:
                pass

        tmpfile.seek(0)

        if self._config.MACTIME_OPTIONS is not None:
            command = "{} -d -b {}".format(self._config.MACTIME_PATH,
                                           tmpfile.name,
                                           self._config.MACTIME_OPTIONS)
        else:
            command = "{} -d -b {}".format(self._config.MACTIME_PATH,
                                           tmpfile.name)
        debug.info("Executing %s" % command)

        try:
            args = shlex.split(command)
            proc = subprocess.Popen(args,
                                    shell=False,
                                    stdin=tmpfile,
                                    stdout=subprocess.PIPE)
            cmd_output = proc.stdout.read()
        except Exception as err:
            debug.error("Failed to run 'mactime' command.  Error: %s" % err)
            return (
                "Exception: {},\nmftparser: {},\n shellbags: {},\n timeliner: {},\n mactime path:{},\n "
                "mactime options: {},\noutput file:{}".format(
                    str(err), self._config.MFTPARSER_BODY,
                    self._config.SHELLBAGS_BODY, self._config.TIMELINER_BODY,
                    self._config.MACTIME_PATH, self._config.MACTIME_OPTIONS,
                    self._config.MACTIME_OUTPUT))
        else:
            return cmd_output
        finally:
            tmpfile.close()
Example #45
0
 def scan(self, offset=0, maxlen=None, max_size=None):
     for map in self.task.get_proc_maps():
         length = map.links.end - map.links.start
         if max_size and length > max_size:
             debug.warning("Skipping max size entry {0:#x} - {1:#x}".format(
                 map.links.start, map.links.end))
             continue
         for match in malfind.BaseYaraScanner.scan(self, map.links.start,
                                                   length):
             yield match
Example #46
0
    def render(self, outfd, grid):
        """Renders the TreeGrid in data out to the output file from the config options"""
        self._columns = grid.columns
        self._text_cell_renderers = self._text_cell_renderers_func(self._columns)

        if grid.max_depth() <= 1:
            debug.warning("Dot output will be unhelpful since the TreeGrid is a flat list")
        outfd.write("digraph output {\n  node[shape = Mrecord];\n  # rankdir=LR;\n")
        grid.visit(None, self._add_node, (outfd, {None: 0}))
        outfd.write("}\n")
Example #47
0
    def __str__(self):
        ## If we are strict we blow up here
        if self.strict:
            debug.error(
                f"Strict NoneObject string failure: {self.reason} n{self.bt}"
            )
            sys.exit(0)
        else:
            debug.warning(f"NoneObject as string: {self.reason}")

        return ""
Example #48
0
    def calculate(self):
        """ 
        This works by walking the system call table 
        and verifies that each is a symbol in the kernel
        """
        linux_common.set_plugin_members(self)

        if not has_distorm:
            debug.warning(
                "distorm not installed. The best method to calculate the system call table size will not be used."
            )

        memory_model = self.addr_space.profile.metadata.get(
            'memory_model', '32bit')

        if memory_model == '32bit':
            mask = 0xffffffff
            table_name = "32bit"
        else:
            mask = 0xffffffffffffffff
            table_name = "64bit"

        sym_addrs = self.profile.get_all_addresses()

        sys_call_info = self._get_table_info("sys_call_table")

        addrs = [(table_name, sys_call_info)]

        # 64 bit systems with 32 bit emulation
        ia32 = self.get_profile_symbol("ia32_sys_call_table")
        if ia32:
            ia32_info = self._get_table_info("ia32_sys_call_table")
            addrs.append(("32bit", ia32_info))

        for (table_name, (tableaddr, tblsz)) in addrs:

            table = obj.Object(theType='Array',
                               offset=tableaddr,
                               vm=self.addr_space,
                               targetType='long',
                               count=tblsz)

            for (i, call_addr) in enumerate(table):

                if not call_addr:
                    continue

                # have to treat them as 'long' so need to mask
                call_addr = call_addr & mask

                if not call_addr in sym_addrs:
                    yield (table_name, i, call_addr, 1)
                else:
                    yield (table_name, i, call_addr, 0)
Example #49
0
    def render_text(self, outfd, data):

        if not has_pil:
            debug.error("Please install PIL")

        if not self._config.DUMP_DIR or not os.path.isdir(self._config.DUMP_DIR):
            debug.error("Please supply an existing --dump-dir")

        seen = []

        for window_station in data:
            for desktop in window_station.desktops():

                offset = desktop.PhysicalAddress
                if offset in seen:
                    continue
                seen.append(offset)

                # The foreground window 
                win = desktop.DeskInfo.spwnd
                
                # Some desktops don't have any windows
                if not win:
                    debug.warning("{0}\{1}\{2} has no windows\n".format(
                        desktop.dwSessionId, window_station.Name, desktop.Name))
                    continue

                im = Image.new("RGB", (win.rcWindow.right + 1, win.rcWindow.bottom + 1), "White")
                draw = ImageDraw.Draw(im)

                # Traverse windows, visible only
                for win, _level in desktop.windows(
                                        win = win,
                                        filter = lambda x : 'WS_VISIBLE' in str(x.style)):
                    draw.rectangle(win.rcWindow.get_tup(), outline = "Black", fill = "White")
                    draw.rectangle(win.rcClient.get_tup(), outline = "Black", fill = "White")
                    
                    ## Create labels for the windows 
                    self.draw_text(draw, str(win.strName or ''), win.rcWindow.left + 2, win.rcWindow.top)

                file_name = "session_{0}.{1}.{2}.png".format(
                    desktop.dwSessionId,
                    window_station.Name, desktop.Name)

                file_name = os.path.join(self._config.DUMP_DIR,
                    file_name)

                try:
                    im.save(file_name, "PNG")
                    result = "Wrote {0}".format(file_name)
                except SystemError, why:
                    result = why

                outfd.write("{0}\n".format(result))
Example #50
0
	def dump_from_pool(self):
		result = []
		for object_obj, file_obj, name in filescan.FileScan.calculate(self):
			try:
				if bool(self._config.reconstruct):
					# --reconstruct
					result += [ (file_obj, name) ]
				else:
					result += [ self.dump_file_object(file_obj) ]
			except ExportException as exn:
				debug.warning(exn)
		return filter(None, result)
Example #51
0
 def dump_from_pool(self):
     result = []
     for object_obj, file_obj, name in filescan.FileScan.calculate(self):
         try:
             if bool(self._config.reconstruct):
                 # --reconstruct
                 result += [(file_obj, name)]
             else:
                 result += [self.dump_file_object(file_obj)]
         except ExportException as exn:
             debug.warning(exn)
     return filter(None, result)
Example #52
0
    def calculate(self):
        """ 
        This works by walking the system call table 
        and verifies that each is a symbol in the kernel
        """
        linux_common.set_plugin_members(self)

        if not has_distorm:
            debug.warning("distorm not installed. The best method to calculate the system call table size will not be used.")
                        
        for (tableaddr, table_name, i, idx_name, call_addr, sym_name, hooked) in self.get_syscalls(None, True, True): 
            yield (tableaddr, table_name, i, idx_name, call_addr, sym_name, hooked)
Example #53
0
    def render_text(self, outfd, data):
        if self._config.verbose and self._config.QUICK:
            debug.warning('The quick mode only carves At#.job files.')

        self.table_header(outfd, [
            ("Offset(P)", "[addrpad]"),
            ("ScheduledDate", "23"),
            ("MostRecentRunTime", "23"),
            ("Application", "50"),
            ("Parameters", "100"),
            ("WorkingDir", "50"),
            ("Author", "30"),
            ("RunInstanceCount", "3"),
            ("MaxRunTime", "10"),
            ("ExitCode", "10"),
            ("Comment", ""),
        ])

        i = 1
        for offset, job_file in data:
            # Dump the data if --dump-dir was supplied
            if self._config.DUMP_DIR:
                path = os.path.join(self._config.DUMP_DIR, 'carved_%s.job' % i)
                fh = open(path, 'wb')
                fh.write(job_file)
                fh.close()
                i += 1
                if self._config.verbose:
                    debug.info('  Written: ' + os.path.basename(path))
            try:
                job = JobParser(job_file)
            except:
                if self._config.verbose:
                    debug.error('Failed parsing the hit at 0x%x' % offset)
                continue
            hours, ms = divmod(job.MaxRunTime, 3600000)
            minutes, ms = divmod(ms, 60000)
            seconds = ms / 1000
            self.table_row(
                outfd,
                offset,
                job.ScheduledDate,
                job.RunDate,
                job.Name,
                job.Parameter,
                job.WorkingDirectory,
                job.User,
                job.RunningInstanceCount,
                '{0:02}:{1:02}:{2:02}.{3}'.format(hours, minutes, seconds, ms),
                '{0:#010x}'.format(job.ExitCode),
                job.Comment,
            )
Example #54
0
  def __init__(self, config, *args, **kwargs):
    threads.Threads.__init__(self, config, *args, **kwargs)

    if not yara_installed:
      debug.warning("In order to search the stack frames, it is necessary to install yara - searching is disabled")

    config.add_option('UNWIND', default = DEFAULT_UNWIND, help = 'List of frame unwinding strategies (comma-separated)', action = 'store', type = 'str')
    config.add_option('LISTUNWINDS', default = False, help = 'List all known frame unwinding strategies', action = 'store_true')
    config.add_option("SYMBOLS", default = False, action = 'store_true', cache_invalidator = False, help = "Use symbol servers to resolve process addresses to module names (we assume symbol tables have already been built)")

    stack_registry = registry.get_plugin_classes(StackTop)

    if getattr(config, 'LISTUNWINDS', False):
      print "Stack Frame Unwind Strategies:\n"
      for cls_name, cls in sorted(stack_registry.items(), key=lambda v: v[0]):
        if cls_name not in ["UserFrame", "KernelFrame"]:
          print "{0:<20}: {1}\n".format(cls_name, pydoc.getdoc(cls))
      sys.exit(0)

    self.kernel_strategies = []
    self.user_strategies = []
    for strategy in getattr(config, 'UNWIND', DEFAULT_UNWIND).split(","):
      if ":" in strategy:
        if strategy.startswith("kernel:"):
          strategy = strategy[len("kernel:"):]
          if strategy not in stack_registry or not issubclass(stack_registry[strategy], KernelFrame):
            debug.error("{0} is not a valid kernel stack unwinding strategy".format(strategy))
          self.kernel_strategies.append(stack_registry[strategy])
        elif strategy.startswith("user:"******"user:"******"{0} is not a valid user stack unwinding strategy".format(strategy))
          self.user_strategies.append(stack_registry[strategy])
        else:
          debug.error("{0} is an unrecognised stack".format(strategy.split(":")[0]))
      elif strategy not in stack_registry:
        debug.error("{0} is neither a valid kernel nor user stack unwinding strategy".format(strategy))
      elif not issubclass(stack_registry[strategy], KernelFrame) and not issubclass(stack_registry[strategy], UserFrame):
        debug.error("{0} is neither a valid kernel nor stack unwinding strategy".format(strategy))
      else:
        if issubclass(stack_registry[strategy], KernelFrame):
          self.kernel_strategies.append(stack_registry[strategy])
        if issubclass(stack_registry[strategy], UserFrame):
          self.user_strategies.append(stack_registry[strategy])

    self.use_symbols = getattr(config, 'SYMBOLS', False)

    # Determine which filters the user wants to see
    if getattr(config, 'FILTER', None):
      self.filters = set(config.FILTER.split(','))
    else:
      self.filters = set()
    def parse_sdb_key(self, subkey):

        key_path = self.regapi.reg_get_key_path(subkey) or str(subkey.Name)

        try:
            desc = sanitize_path(self.regapi.reg_get_value('software', '', 'DatabaseDescription', subkey) or '')
            db_path = sanitize_path(self.regapi.reg_get_value('software', '', 'DatabasePath', subkey) or '')
            pids = self.find_pids_for_imagepath(desc)
        except Exception as e:
            debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args))

        if desc:
            return (desc, db_path, subkey.LastWriteTime, key_path, pids)
    def dump_table(self, folder, SDT, filename):
        #init
        outfile = "{0}/{1}".format(folder, filename)

        #check if file exists
        if (os.path.isfile(outfile) and not self._config.OVERWRITE):
            debug.warning("file {0} already exists - skip".format(outfile))
            return

        #dump table to file
        fobj = open(outfile, "wb")  #open in byte-mode!
        SDT.dump(fobj)
        fobj.close()