Пример #1
0
 def progress(self, blocks, blocksz, totalsz):
   if self.lastprog == None:
       debug.info("Connected. Downloading data...")
   percent = int((100*(blocks*blocksz)/float(totalsz)))
   if self.lastprog != percent and percent % 5 == 0: 
     debug.info("{0}%".format(percent))
   self.lastprog = percent
Пример #2
0
    def check_microarch(self, addr, phy_space, key):
        microarch = hyper.revision_id_db[key]

        if microarch.lower() == "sandy":
            vmcs_off = hyper.vmcs_offset_sandy
        elif microarch.lower() == "core":
            vmcs_off = hyper.vmcs_offset_core
        else:
            debug.error("Microarchitecture %s not supported yet." % microarch)

        off = vmcs_off["VMCS_LINK_POINTER"] * 4
        data = phy_space.read(addr + off, 0x04)
        vmcs_link_pointer = struct.unpack('<I', data)[0]
        data2 = phy_space.read(addr + off + 0x04, 0x04)
        vmcs_link_pointer2 = struct.unpack('<I', data2)[0]

        if (vmcs_link_pointer == 0xffffffff and vmcs_link_pointer2 == 0xffffffff):
            size = layouts.vmcs.vmcs_field_size["GUEST_CR3"] / 8
            off = vmcs_off["GUEST_CR3"] * 4
            data = phy_space.read(addr + off, size)
            if size == 4:
                guest_cr3 = struct.unpack('<I', data)[0]
            elif size == 8:
                guest_cr3 = struct.unpack('<Q', data)[0]
            else:
                debug.error("CR3 size not possible.")

            if ((guest_cr3 % 4096) == 0) and (guest_cr3 != 0):
                debug.info("\t|__ VMCS 0x%08x [CONSISTENT]" % addr)
Пример #3
0
    def find_function_symbol(self, task, address):
        """
        Match a function symbol to a functiona address.
        @param task: the task_struct
        @param address:  The function address
        @return: The function symbol or None
        """
        if self.symbols:
            for vma in task.get_proc_maps():
                if vma.vm_start <= address <= vma.vm_end:
                    #lib = vma.vm_file
                    lib = linux_common.get_path(task, vma.vm_file)
                    offset = address - vma.vm_start

                    #libsymbols = self.symbols[os.path.basename(lib)]
                    if type(lib) == list:
                        lib = ""
                    base = os.path.basename(lib)
                    #print(base)
                    #print("{:016x} {} {}".format(offset, base, lib))

                    if base in self.symbols:

                        if offset in self.symbols[base]:
                            debug.info("Instruction was a call to 0x{:016x} = {}@{}".format(address, self.symbols[base][offset], base ))
                            return self.symbols[base][offset]
                        elif address in self.symbols[base]:# for a function in the main binary, eg 0x40081e
                            debug.info("Instruction was a call to 0x{:016x} = {}@{}".format(address, self.symbols[base][address], base ))
                            return self.symbols[base][address]
                    break
        return None
Пример #4
0
 def __init__(self, location):
     """Initializes the firewire implementation"""
     self.location = location.strip('/')
     debug.info("Waiting for 5s firewire to settle")
     self._bus = forensic1394.Bus()
     self._bus.enable_sbp2()
     time.sleep(5)
     self._device = None
Пример #5
0
        def get_symbol(self, sym_name, nm_type = "", module = "kernel"):
            """Gets a symbol out of the profile
            
            sym_name -> name of the symbol
            nm_tyes  -> types as defined by 'nm' (man nm for examples)
            module   -> which module to get the symbol from, default is kernel, otherwise can be any name seen in 'lsmod'
    
            This fixes a few issues from the old static hash table method:
            1) Conflicting symbols can be handled, if a symbol is found to conflict on any profile, 
               then the plugin will need to provide the nm_type to differentiate, otherwise the plugin will be errored out
            2) Can handle symbols gathered from modules on disk as well from the static kernel
    
            symtable is stored as a hash table of:
            
            symtable[module][sym_name] = [(symbol address, symbol type), (symbol addres, symbol type), ...]
    
            The function has overly verbose error checking on purpose...
            """

            symtable = self.sys_map

            ret = None

            # check if the module is there...
            if module in symtable:

                mod = symtable[module]

                # check if the requested symbol is in the module
                if sym_name in mod:

                    sym_list = mod[sym_name]

                    # if a symbol has multiple definitions, then the plugin needs to specify the type
                    if len(sym_list) > 1:
                        if nm_type == "":
                            debug.error("Requested symbol {0:s} in module {1:s} has multiple definitions and no type given\n".format(sym_name, module))
                        else:
                            for (addr, stype) in sym_list:

                                if stype == nm_type:
                                    ret = addr
                                    break

                            if ret == None:
                                debug.error("Requested symbol {0:s} in module {1:s} could not be found\n".format(sym_name, module))
                    else:
                        # get the address of the symbol
                        ret = sym_list[0][0]
                else:
                    debug.debug("Requested symbol {0:s} not found in module {1:s}\n".format(sym_name, module))
            else:
                debug.info("Requested module {0:s} not found in symbol table\n".format(module))

            if self.shift_address and ret:
                ret = ret + self.shift_address

            return ret
Пример #6
0
 def render_text(self, outfd, data):
     self.outfd = outfd
     for (p, reg, frames) in data:
         #self.render_registers(reg)
         debug.info("Found {} frames!".format(len(frames)))
         debug.info("")
         print(frames)
         if self.dump_file:
             self.write_annotated_stack(self.dump_file, self.calculate_annotations(frames))
     print(stats)
Пример #7
0
    def render_text(self, outfd, data):
        if self._config.verbose and self._config.QUICK:
            debug.warning('The quick mode only carves At#.job files.')

        self.table_header(outfd,
                        [("Offset(P)", "[addrpad]"),
                         ("ScheduledDate", "23"),
                         ("MostRecentRunTime", "23"),
                         ("Application", "50"),
                         ("Parameters", "100"),
                         ("WorkingDir", "50"),
                         ("Author", "30"),
                         ("RunInstanceCount", "3"),
                         ("MaxRunTime", "10"),
                         ("ExitCode", "10"),
                         ("Comment", ""),
                        ])

        i = 1
        for offset, job_file in data:
            # Dump the data if --dump-dir was supplied
            if self._config.DUMP_DIR:
                path = os.path.join(self._config.DUMP_DIR, 'carved_%s.job' % i)
                fh = open(path, 'wb')
                fh.write(job_file)
                fh.close()
                i += 1
                if self._config.verbose:
                    debug.info('  Written: ' + os.path.basename(path))
            try:
                job = JobParser(job_file)
            except:
                if self._config.verbose:
                    debug.error('Failed parsing the hit at 0x%x' % offset)
                continue
            hours, ms = divmod(job.MaxRunTime, 3600000)
            minutes, ms = divmod(ms, 60000)
            seconds = ms / 1000
            self.table_row(outfd,
                        offset,
                        job.ScheduledDate,
                        job.RunDate,
                        job.Name,
                        job.Parameter,
                        job.WorkingDirectory,
                        job.User,
                        job.RunningInstanceCount,
                        '{0:02}:{1:02}:{2:02}.{3}'.format(
                            hours, minutes, seconds, ms),
                        '{0:#010x}'.format(job.ExitCode),
                        job.Comment,
                        )
Пример #8
0
    def visit_window(self, screen_id, win):
        
        if win.v() in self._seen_windows:
            debug.info('Window referenced more than once! Offset {:#x}. (Skipped)'.format(win.v()))
        else:
            self._windows.append((screen_id, win))
            self._seen_windows.add(win.v())

        if win.firstChild and self._current_vm.is_valid_address(win.firstChild):
            self.visit_window(screen_id, win.firstChild.dereference())
        
        if win.nextSib and self._current_vm.is_valid_address(win.nextSib):
            self.visit_window(screen_id, win.nextSib.dereference())
Пример #9
0
 def visit_atomNode(self, atomNode):
 
     if atomNode.v() in self._seen_atoms:
         debug.info('Atom referenced more than once! Offset {:#x}.'.format(atomNode.v()))
     else:
         self._atoms[int(atomNode.a)] = atomNode
         self._seen_atoms.add(atomNode.v())
     
     if atomNode.left and self._current_vm.is_valid_address(atomNode.left):
         self.visit_atomNode(atomNode.left.dereference())
     
     if atomNode.right and self._current_vm.is_valid_address(atomNode.right):
         self.visit_atomNode(atomNode.right.dereference())
Пример #10
0
    def find_prevalent_microarch(self, generic_vmcs, phy_space):
        microarch_vmcs = {}
        for vmcs in generic_vmcs:
            try:
                revid_raw = phy_space.read(vmcs, 0x04)
            except:
                continue

            rev_id = struct.unpack('<I', revid_raw)[0]
            for key in layouts.revision_id_db.keys():
                if key == rev_id:
                    if key not in microarch_vmcs:
                        microarch_vmcs[key] = []
                        microarch_vmcs[key].append(vmcs)
                        debug.info("Possible VMCS 0x%x with %s microarchitecture" % (vmcs,
                        layouts.db.revision_id_db[key]))
                        self.check_microarch(vmcs, phy_space, key)
                    else:
                        debug.info("Possible VMCS 0x%x with %s microarchitecture" % (vmcs,
                        layouts.db.revision_id_db[key]))
                        microarch_vmcs[key].append(vmcs)
                        self.check_microarch(vmcs, phy_space, key)
        maxi = 0
        key = None
        for k, v in microarch_vmcs.items():
            if len(microarch_vmcs[k]) > maxi:
                maxi = len(microarch_vmcs[k])
                key = k
        if key != None:
            debug.info("Prevalent Microarch: [0x%08x - %s] - VMCS: %d" % (key,
            layouts.db.revision_id_db[key], maxi))
        debug.info("Microarchitecture not found.")
Пример #11
0
        def get_all_symbols(self, module = "kernel"):
            """ Gets all the symbol tuples for the given module """
            ret = []

            symtable = self.sys_map

            if module in symtable:
                mod = symtable[module]

                for (name, addrs) in mod.items():
                    ret.append([name, addrs[0][0]])
            else:
                debug.info("All symbols  requested for non-existent module %s" % module)

            return ret
Пример #12
0
 def find_return_libc_start(self, proc_as, start_stack, return_start):
     """
     Scans the stack for a certain address, in this case the return address of __libc_start_main.
     @param proc_as: Process address space
     @param start_stack: Start address to search
     @param return_start: The return address to find
     @return The address found or None
     """
     address = start_stack
     for value in yield_address(proc_as, start_stack, reverse=True):
         if value == return_start:
             debug.info("Scanned {} stack addresses before finding the __libc_start_main return address".format((start_stack-address)/linux_process_info.address_size))
             return address
         address -= linux_process_info.address_size
     debug.info("Exhausted search for __libc_start_main return address at stack address {:016x}".format(address))
     return None
Пример #13
0
    def get_all_kmem_caches(self):
        linux_common.set_plugin_members(self)
        cache_chain = self.addr_space.profile.get_symbol("cache_chain")
        slab_caches = self.addr_space.profile.get_symbol("slab_caches")

        if cache_chain: #slab
            caches = obj.Object("list_head", offset = cache_chain, vm = self.addr_space)
            listm = "next"
            ret = [cache for cache in caches.list_of_type("kmem_cache", listm)]
        elif slab_caches: #slub
            debug.info("SLUB is currently unsupported.")
            ret = []
        else:
            debug.error("Unknown or unimplemented slab type.")

        return ret
Пример #14
0
        def get_all_function_symbols(self, module = "kernel"):
            """ Gets all the function tuples for the given module """
            ret = []

            symtable = self.type_map

            if module in symtable:
                mod = symtable[module]

                for (addr, (name, _sym_types)) in mod.items():
                    if self.shift_address and addr:
                        addr = addr + self.shift_address

                    ret.append([name, addr])
            else:
                debug.info("All symbols requested for non-existent module %s" % module)

            return ret
    def render_text(self, outfd, data):

#03.14
        print "%%%%%%%%%%%%%%%%%%%%%%%%%%%% linux_process_stack,render_text, Begin::",datetime.datetime.now()
#

        self.outfd = outfd
        for (p, reg, frames) in data:
            #self.render_registers(reg)
            debug.info("Found {} frames!".format(len(frames)))
            debug.info("")
            print(frames)
            if self.dump_file:
                self.write_annotated_stack(self.dump_file, self.calculate_annotations(frames))
        print(stats)

#03.14
        print "%%%%%%%%%%%%%%%%%%%%%%%%%%%% linux_process_stack,render_text, End::",datetime.datetime.now()
Пример #16
0
    def get_record_info(self, profile):
        """Get search metadata for the appropriate record version for this profile"""

        record_info = None

        record_version = self.get_record_version(profile)

        if self._config.RECORDTYPE:
            debug.info('Forcing record version {}'.format(self._config.RECORDTYPE))

            if self._config.RECORDTYPE != record_version:
                debug.warning('Overriding expected profile record version {} with user-specified version {}'.format(record_version, self._config.RECORDTYPE))

            record_version = self._config.RECORDTYPE

        if record_version in USN_RECORD_SEARCHDATA:
            record_info = USN_RECORD_SEARCHDATA[record_version]

        return record_info
Пример #17
0
 def find_scanned_frames(self, p, address, end):
     """
     Find frames by scanning for return addresses.
     @param p: process info object
     @param address: Start address
     @param end: End address
     @return: a list of frames
     """
     address_size = linux_process_info.address_size
     frames = []
     debug.info("Scan range (%rsp to end) = (0x{:016x} to 0x{:016x})".format(address, end))
     count = 0
     while address <= end:
         if p.proc_as.is_valid_address(address) and self.is_return_address(read_address(p.proc_as, address, address_size), p):
             st = stack_frame(address + address_size, p.proc_as, count)
             frames.append(st)
             count += 1
         address += address_size
     return frames
Пример #18
0
    def calculate(self):
    
        # Apply the correct vtypes for the profile
        addr_space = utils.load_as(self._config)
        addr_space.profile.object_classes.update(linux_xatoms.xatoms_classes)
        addr_space.profile.vtypes.update(xwindows_vtypes_x64)
        addr_space.profile.compile()

        # Build a list of tasks
        tasks = linux_pslist.linux_pslist.calculate(self)
        if self._config.PID:
            pids = [int(p) for p in self._config.PID.split(',')]
            the_tasks = [t for t in tasks if t.pid in pids]
        else:
            # Find the X Windows task
            the_tasks = []
            for task in tasks:
                task_offset, dtb, ppid, uid, gid, start_time = self._get_task_vals(task)
                task_name = str(task.comm)
                task_pid = int(task.pid)
                if task_name == 'X' or task_name == 'Xorg':
                    the_tasks.append(task)

        # In case no appropriate processes are found
        if len(the_tasks) < 1:
            return

        for task in the_tasks:

            # These need to be here so that they're reset for each X/Xorg process.
            self._atoms = {}  # Holds the atoms, per X process
            self._seen_atoms = set()  # Holds a list of atom offsets for avoiding circular referencing

            self._current_vm = task.get_process_address_space()
            msg = 'Working with \'{0}\' (pid={1}).'.format(str(task.comm), task.pid)
            debug.info(msg)
            proc_maps = task.get_proc_maps()
            atom_root = self.seek_atom_root(task, proc_maps)
            if atom_root:
                self.visit_atomNode(atom_root)
            debug.info('Found {:,} atom(s).'.format(len(self._atoms)))
            yield msg, self._atoms
Пример #19
0
 def find_entry_point(self, proc_as, start_code):
     """
     Read the entry point from the program header.
     @param proc_as: Process address space
     @param start_code: Start of the program code mapping
     @return The address of the entry point (_start)
     """
     # entry point lives at ELF header + 0x18
     # add it to the memory mapping of the binary
     if not proc_as.is_valid_address(start_code+0x18):
         # it's gone from memory
         debug.info("We could not find program entry point, skipping _start detection")
         return False
     offset = read_address(proc_as, start_code+0x18)
     if offset > start_code:
         # it's an absolute address
         return offset
     else:
         # it's a relative offset, i.e. PIE code
         return start_code + offset
Пример #20
0
    def calculate(self):
        address_space = utils.load_as(self._config, astype = 'physical')

        if not self.is_valid_profile(address_space.profile):
            debug.error('This command does not support the selected profile.')

        if self._config.QUICK:
            scanner = AtJobsScanner()
        else:
            # Regex matching... slow!
            scanner = GenericJobsScanner()
        for offset in scanner.scan(address_space):
            if self._config.verbose:
                debug.info('[+] Found hit: 0x%x' % offset)
            data = scanner.carve(address_space, offset)
            if data:
                yield offset, data
            elif self._config.verbose:
                debug.info('[-] Failed verification')
        return
Пример #21
0
    def calculate(self):
        lpi = linux_process_info
        if self._config.SYMBOL_DIR:
            self.symbols = self.load_symbols(self._config.SYMBOL_DIR)
            #print(self.symbols['libc-2.13.so'])
        if self._config.DUMP_FILE:
            try:
                self.dump_file = open(self._config.DUMP_FILE, 'a+')
                debug.info("Opened {} for writing".format(self._config.DUMP_FILE))
            except IOError:
                debug.error("Failed to open %s for writing".format(self._config.DUMP_FILE))




        for p in linux_process_info.linux_process_info.calculate(self):
            stats['tasks'] += 1
            if p:
                for i, task in enumerate(p.threads):
                    stats['threads'] += 1
                    #print(i, task.comm.v(), p.thread_registers[i], p.thread_stacks[i], p.thread_stack_ranges)
                    #for reg, value in p.thread_registers[i]._asdict().iteritems():
                    #    print(reg, "{:016x}".format(value))
                    debug.info("Starting analysis of task: pid {}, thread name {}".format(task.pid, task.comm))
                    debug.info("=================================================")
                    yield self.analyze_stack(p, task, i) #, self.analyze_registers(p, task, i)
            else:
                stats['tasks_ignored'] += 1
Пример #22
0
    def dumpFromPhdr(self, proc_as, filename):
        outfile = open(filename, "wb+")
        filesize = 0
        self.elfInfo["PHDR_ENTRIES"] = {}
        for i in range(0, self.elfInfo["AUXV"]["AT_PHNUM"] * self.elfInfo["AUXV"]["AT_PHENT"], self.elfInfo["AUXV"]["AT_PHENT"]):
            if self.elfInfo["ARCH"] == 'x86':
                phdr_entry = Elf32_PhdR()
            else:
                phdr_entry = Elf64_PhdR()
            phdr_entry.readFromDump(proc_as, self.elfInfo["AUXV"]["AT_PHDR"] + i)
            debug.info(phdr_entry)

            if not phdr_entry.p_type in self.elfInfo["PHDR_ENTRIES"]:
                self.elfInfo["PHDR_ENTRIES"][phdr_entry.p_type] = []
            self.elfInfo["PHDR_ENTRIES"][phdr_entry.p_type].append(phdr_entry)

            if phdr_entry.p_type == ElfStruct.phdr_types["PT_LOAD"]:
                if filesize < (phdr_entry.p_offset + phdr_entry.p_filesz):
                    filesize = phdr_entry.p_offset + phdr_entry.p_filesz
                debug.info("Dumping %x size %x \n" % (phdr_entry.p_vaddr, phdr_entry.p_filesz))
                raw = proc_as.zread(phdr_entry.p_vaddr, phdr_entry.p_filesz)
                outfile.seek(phdr_entry.p_offset)
                outfile.write(raw)
        debug.info("File size = %d\n" % filesize)
        outfile.close()
        return
Пример #23
0
  def download_pdbfile(self, db, guid, module_id, filename, path):
    db.execute("SELECT id FROM pdb WHERE guid=? AND file=?", (str(guid.upper()).rstrip('\0'), str(filename).rstrip('\0')))
    row = db.fetchone()
    if row == None:
      db.execute("INSERT INTO pdb(guid, file) VALUES (?, ?)", (str(guid.upper()).rstrip('\0'), str(filename).rstrip('\0')))
      db.execute("SELECT LAST_INSERT_ROWID() FROM pdb")
      row = db.fetchone()
    pdb_id = row[0]
    db.execute("SELECT * FROM mod_pdb WHERE module_id=? AND pdb_id=?", (module_id, pdb_id))
    row = db.fetchone()
    if row == None:
      db.execute("INSERT INTO mod_pdb(module_id, pdb_id) VALUES (?, ?)", (module_id, pdb_id))
    self._sym_db_conn.commit()

    for sym_url in SYM_URLS:
      url = "{0}/{1}/{2}/".format(sym_url, filename, guid)
      proxy = urllib2.ProxyHandler()
      opener = urllib2.build_opener(proxy)
      tries = [ filename[:-1] + '_', filename ]
      for t in tries:
        debug.info("Trying {0}".format(url+t))
        outfile = os.path.join(path, t)
        try:
          PDBOpener().retrieve(url+t, outfile, reporthook=self.progress)
          debug.info("Downloaded symbols and cached at {0}".format(outfile))
          if t.endswith("_"):
            self.cabextract(outfile, path)
            debug.info("Unpacked download into {0}".format(path))
            os.remove(outfile)
            db.execute("UPDATE pdb SET downloaded_at=DATETIME('now'), src=? WHERE id=? AND guid=? AND file=?", (sym_url, pdb_id, str(guid.upper()).rstrip('\0'), str(filename).rstrip('\0')))
            self._sym_db_conn.commit()
          return
        except urllib2.HTTPError, e:
          debug.warning("HTTP error {0}".format(e.code))
Пример #24
0
 def find_function_address(self, proc_as, ret_addr):
     """
     Calculates the function address given a return address. Disassembles code to get through the double indirection
     introduced by the Linux PLT.
     @param proc_as: Process address space
     @param ret_addr: Return address
     @return The function address or None
     """
     if distorm_loaded:
         decode_as = self.decode_as
         retaddr_assembly = distorm3.Decode(ret_addr - 5, proc_as.read(ret_addr - 5, 5), decode_as)
         if len(retaddr_assembly) == 0:
             return None
         #print(retaddr_assembly)
         retaddr_assembly = retaddr_assembly[0] # We're only getting 1 instruction
         # retaddr_assembly[2] = "CALL 0x400620"
         instr = retaddr_assembly[2].split(' ')
         #print(instr)
         if instr[0] == 'CALL':
             try:
                 target = int(instr[1][2:], 16)
             except ValueError:
                 return None
             bytes = proc_as.read(target, 6)
             if not bytes:
                 # We're not sure if this is the function address
                 return target
             plt_instructions = distorm3.Decode(target, bytes, decode_as)
             plt_assembly = plt_instructions[0] # 1 instruction
             #print(plt_assembly)
             instr2 = plt_assembly[2].split(' ')
             #print(instr2)
             if instr2[0] == 'JMP':
                 final_addr = None
                 if instr2[1] == 'DWORD':
                     target2 = int(instr2[2][3:-1], 16)
                 elif instr2[1] == 'QWORD': # if QWORD
                     target2 = int(instr2[2][7:-1], 16)
                 else: # if 0xADDRESS
                     final_addr = int(instr2[1][2:],16)
                 if not final_addr:
                     final_addr = target + 6 + target2
                 debug.info("Found function address from instruction {} at offset 0x{:016x}".format(instr2, target))
                 if proc_as.is_valid_address(final_addr):
                     return read_address(proc_as, final_addr)
                 return None
             elif instr2[0] == 'PUSH' and instr2[1] == 'RBP':
                 # This is an internal function
                 debug.info("Found function address from instruction {} at offset 0x{:016x}".format(instr, target))
                 return target
             else:
                 # In case push rbp is removed
                 debug.info("Found function address from instruction {} at offset 0x{:016x}".format(instr, target))
                 return target
         return None
     else:
         return None
Пример #25
0
    def load_symbols(self, dir):
        """
        Loads function symbols from a directory.
        @param dir: the directory
        @return: a symbol dict or None
        """
        if os.path.isdir(dir):
            debug.info("Loading function symbols from directory: {}".format(dir))
            symbols = {}
            for filename in os.listdir(dir):
                # We're ignoring the type of symbol, for now
                if filename[-7:] == '.dynsym':
                    libname = filename[:-7]
                elif filename[-8:] == '.symbols':
                    libname = filename[:-8]
                else:
                    libname = filename

                if not libname in symbols:
                    symbols[libname] = {}
                with open(os.path.join(dir, filename), 'r') as f:
                    for line in f:
                        line = line.strip().split(' ')
                        if len(line) == 2:
                            # symbol is undefined, ignore for now
                            pass
                        else: # len = 3
                            offset = int(line[0], 16)
                            t = line[1] # We're ignoring the type, for now
                            name = line[2]
                            symbols[libname][offset] = name
                            #print(symbols[libname][offset])
            return symbols
        else:
            debug.warning("Loading function symbols from directory: Not a valid directory: {}".format(dir))
        return None
Пример #26
0
 def parse_screenInfo(self, screen_info):
 
     debug.info('Parsing the {} ScreenPtr structure(s).'.format(screen_info.numScreens))
     for screen_ptr in screen_info.screens:
         if screen_ptr and self._current_vm.is_valid_address(screen_ptr):
             screen = screen_ptr.dereference()
             debug.info(screen)
             debug.info('Parsing the windows.')
             if self._current_vm.is_valid_address(screen.root):
                 self.visit_window(screen.myNum, screen.root.dereference())
Пример #27
0
    def calculate(self):
        
        addr_space = utils.load_as(self._config)
        
        # Check the profile: we only support 64-bit Linux
        meta = addr_space.profile.metadata
        if not (meta['os'] == 'linux' and meta['memory_model'] == '64bit'):
            debug.error('Sorry, currently only 64-bit Linux is supported.')

        # Apply the correct vtypes for the profile
        addr_space.profile.object_classes.update(linux_xwindows.xwindows_classes)
        addr_space.profile.vtypes.update(xwindows_vtypes_x64)
        addr_space.profile.compile()

        # Build a list of tasks
        tasks = linux_pslist.linux_pslist.calculate(self)
        if self._config.PID:
            pids = [int(p) for p in self._config.PID.split(',')]
            the_tasks = [t for t in tasks if t.pid in pids]
        else:
            # Find the X Windows task
            the_tasks = []
            for task in tasks:
                task_offset, dtb, ppid, uid, gid, start_time = self._get_task_vals(task)
                task_name = str(task.comm)
                task_pid = int(task.pid)
                if task_name == 'X' or task_name == 'Xorg':
                    the_tasks.append(task)

        # In case no appropriate processes are found
        if len(the_tasks) < 1:
            return

        for task in the_tasks:

            # These need to be here so that they're reset for each X/Xorg process.
            self._windows = []  # Stores a list of WindowPtr objects; one for each window found
            self._seen_windows = set()  # Holds a list of window offsets for avoiding circular referencing
            self._atoms = {}  # Stores a dictionary of AtomNode objects, indexed by atom id
            self._seen_atoms = set()  # Holds a list of atom offsets for avoiding circular referencing
            self._current_vm = task.get_process_address_space()

            msg = 'Working with \'{0}\' (pid={1}).'.format(str(task.comm), task.pid)
            debug.info(msg)
            proc_maps = task.get_proc_maps()
            screen_info = self.seek_screen_info(task, proc_maps)
            atom_root = self.seek_atom_root(task, proc_maps)
            if atom_root:
                self.visit_atomNode(atom_root)
            debug.info('Found {:,} atom(s).'.format(len(self._atoms)))
            self.parse_screenInfo(screen_info)
            debug.info('Found {:,} window(s).'.format(len(self._windows)))
            yield msg, self._windows
Пример #28
0
  def __init__(self, addr_space, build_symbols):
    self.parser = NameParser()

    # Used to cache symbol/export/module information in an SQLite3 DB
    # NB: Volatility cache storeage area used, but we manage things manually.
    cache_sym_db_path = "{0}/symbols".format(addr_space._config.CACHE_DIRECTORY)
    if not os.path.exists(cache_sym_db_path):
      os.makedirs(cache_sym_db_path)
    self._sym_db_conn = sqlite3.connect("{0}/{1}.db".format(cache_sym_db_path, addr_space.profile.__class__.__name__))
    db = self.get_cursor()
    db.execute("attach ':memory:' as volatility")
    self._sym_db_conn.commit()

    kdbg = tasks.get_kdbg(addr_space)

    # Build tables and indexes
    self.create_tables(db)

    # Insert kernel space module information
    debug.info("Building function symbol tables for kernel space...")
    for mod in kdbg.modules():
      if build_symbols:
        # Module row created by add_exports if it doesn't already exist
        self.add_exports(db, mod)
        self.add_debug_symbols(db, kdbg.obj_vm, mod)
      # Link in system modules to each process
      for eproc in kdbg.processes():
        self.add_module(db, eproc, mod)

    # Insert (per process) user space module information
    debug.info("Building function symbol tables for each processes user space...")
    for eproc in kdbg.processes():
      debug.info("Processing PID {0} ...".format(int(eproc.UniqueProcessId)))
      for mod in eproc.get_load_modules():
        if build_symbols:
          # Module row created by add_exports if it doesn't already exist
          self.add_exports(db, mod)
          self.add_debug_symbols(db, eproc.get_process_address_space(), mod)
        # Link in user module to current process
        self.add_module(db, eproc, mod)

    debug.info("Symbol tables successfully built")
Пример #29
0
 def render_text(self, outfd, data):
     if (not os.path.isdir(self._config.DUMP_DIR)):
         debug.error("Please specify an existing output dir (--dump-dir)")
     linux_common.set_plugin_members(self)
     if self.profile.metadata.get('arch').lower() == 'x86':
         self.elfInfo['ARCH'] = 'x86'
         debug.info("x86 Architecture \n")
     elif self.profile.metadata.get('arch').lower() == 'x64':
         self.elfInfo['ARCH'] = 'x64'
         debug.info("x64 Architecture \n")
     dumped = []
     for task in data:
         #retrieve the auxiliary vector for the process
         debug.info("Looking for auxiliary_vector process PID = %s" % self._config.PID)
         self.get_auxiliary_vector(task)
         proc_as = task.get_process_address_space()
         #create a the elf file of the process
         file_name = task.comm + '.dump'
         file_path = os.path.join(self._config.DUMP_DIR, file_name)
         self.dumpFromPhdr(proc_as, file_path)
         debug.info("Reset elf\n")
         self.resetElfHdr(file_path)
         self.restorePlt(file_path, proc_as)
         dumped.append(task)
     outfd.write("Dumped Process:\n")
     self.table_header(outfd, [("Offset", "[addrpad]"),
                               ("Name", "20"),
                               ("Pid", "15"),
                               ("Uid", "15"),
                               ("Start Time", "")])
     for task in dumped:
         self.table_row(outfd, task.obj_offset,
             task.comm,
             str(task.pid),
             str(task.uid) if task.uid else "-",
             task.get_task_start_time())
Пример #30
0
 def restorePlt(self, filename, proc_as):
     #outfile = io.FileIO(filename, 'rb+')
     #Starting from PHdr PT_DYNAMIC I get the dynamic linking information, I am interested in
     # DT_SYMTAB (address of symbol table) to locate each symbol and DT_PLTGOT
     debug.info("Looking at the content of dynamic header in the memory dump...")
     addr = self.elfInfo["PHDR_ENTRIES"][ElfStruct.phdr_types["PT_DYNAMIC"]][0].p_vaddr
     if self.elfInfo['ARCH'] == 'x86':
         dynamic_entry = Elf32_Dyn()
     else:
         dynamic_entry = Elf64_Dyn()
     dynamic_entry.readFromDump(proc_as, addr)
     while not dynamic_entry.d_tag == ElfStruct.dynamic_entry_types['DT_NULL']:
         debug.info(dynamic_entry)
         addr += 0x08
         dynamic_entry.readFromDump(proc_as, addr)
         # if dynamic_entry.d_tag == ElfStruct.dynamic_entry_types['DT_PLTGOT']:
     debug.info(dynamic_entry)
     #outfile.close()
     return
Пример #31
0
    def analyze_stack(self, process_info, task, thread_number):
        """
        Analyzes the stack, building the stack frames and performing validation
        @param process_info: The porcess info object
        @param task: the task_struct
        @param thread_number: the thread number for use in process info
        @return: a tuple (process info, registers, frames list) or None
        """
        # shortcut variables
        p = process_info
        i = thread_number

        is_thread = i != 0  # only the first thread has stack arguments etc

        for (low, high) in p.thread_stack_ranges:
            # print("{:016x} {:016x} {:016x}".format(low, p.thread_registers[i].rsp, high))
            if low <= p.thread_registers[i].rsp <= high:
                debug.info("Found the stack at 0x{:016x}-0x{:016x}".format(
                    low, high))
                stack_low = low
                stack_high = high

        # print(stack_high)

        address_size = linux_process_info.address_size

        # Find the needed mappings
        libc_start, libc_end, libc_name = process_info.get_map_by_name(
            'libc-', 'r-x')
        debug.info("Found libc ({}) at range: 0x{:016x}-0x{:016x}".format(
            libc_name, libc_start, libc_end))
        debug.info("Program code located at 0x{:016x}-0x{:016x}".format(
            p.mm_start_code, p.mm_end_code))

        if is_thread:
            debug.info(
                "Current task is a thread, we don't expect to find the start/main return addresses!"
            )

        # Get the entry point from the elf headers
        entry_point = self.find_entry_point(p.proc_as, p.mm_start_code)

        offset = p.mm.arg_start % address_size  # stack alignment
        stack_arguments = p.mm.arg_start - address_size - offset

        libc_start_main_stack_frame = None

        main_scan_start = None
        if not is_thread and entry_point:
            debug.info(
                "Executable entry point ('_start' function): 0x{:016x}".format(
                    entry_point))

            # Experiments showed the entry point usually contains the same code
            # the instruction before would be the call to __libc_start_main
            return_start = entry_point + 0x29

            debug.info(
                "Scanning for return address of __libc_start_main function, starting at program arguments (0x{:016x}) downwards"
                .format(stack_arguments))
            return_libc_start = self.find_return_libc_start(
                p.proc_as, stack_arguments, return_start)

            # print(return_libc_start)
            if return_libc_start:
                stats['libc_start'] += 1
                debug.info(
                    "Found the __libc_start_main return address (0x{:016x}) at address 0x{:016x}"
                    .format(return_start, return_libc_start))

                # Find the return address of the main function
                # debug.info("Scanning for return address of main function, starting at %rsp: 0x{:016x}".format(p.thread_registers[i].rsp))
                debug.info(
                    "Scanning for return address of main function, starting at __libc_start_main return address (0x{:016x}) downwards"
                    .format(return_libc_start))
                main_scan_start = return_libc_start

                # give it a stack frame
                libc_start_main_stack_frame = stack_frame(
                    return_libc_start + address_size, p.proc_as, 0)

        if not main_scan_start:
            if not is_thread:
                main_scan_start = stack_arguments
                debug.info(
                    "Scanning for return address of main function, starting at program arguments (0x{:016x}) downwards"
                    .format(main_scan_start))
            else:
                main_scan_start = stack_high

        found_main = self.find_return_main(process_info.proc_as, libc_start,
                                           libc_end, main_scan_start)

        if found_main:
            stats['main'] += 1
            stack_main, main_offset = found_main
            debug.info("Found main stackframe at 0x{:016x}".format(stack_main))
            main_frame = stack_frame(stack_main + address_size, p.proc_as, -1)

            # print(main_frame)
            main_pointer = main_frame.ret_address + main_offset + address_size
            main_address = read_address(p.proc_as, main_pointer, address_size)
            debug.info("The address of the main function is 0x{:016x}".format(
                main_address))
            main_frame.function = main_address
            # print("{:016x} {:016x}".format(main_pointer, main_address))
        else:
            debug.warning("Unable to find address of main stackframe")
            debug.info("Assuming no frame pointers")
            main_address = 0
            main_frame = None
            # return p, p.thread_registers[i], []

        frames = []

        st = None

        if self.has_frame_pointer(main_address, p.proc_as):
            debug.info(
                "Register %rbp was not 0, trying old-school stack frames")
            frames += self.find_oldschool_frames(p, p.proc_as,
                                                 p.thread_registers[i])
        elif found_main:  # apparently, -O1 and higher dont use rbp
            debug.info(
                "No old-school stack frames detected, scanning for return addresses"
            )

            address = p.thread_registers[i].rsp
            end = main_frame.ret_address - address_size
            frames += self.find_scanned_frames(p, address, end)

            main_frame.frame_number = frames[-1].frame_number + 1
            frames.append(main_frame)
        else:
            address = p.thread_registers[i].rsp
            end = stack_high
            frames += self.find_scanned_frames(p, address, end)

        if len(frames) > 0:
            lastframe = frames[-1]
            while (lastframe.ebp and p.is_thread_stack_pointer(lastframe.ebp)
                   and not lastframe.ebp == lastframe.ebp_address):
                newframe = stack_frame(
                    lastframe.ebp + (address_size * 2),
                    p.proc_as,
                    lastframe.frame_number + 1,
                )
                frames.append(newframe)
                lastframe = newframe

            # print("{:016x}, {:016x}".format(main_frame.address, lastframe.address))
            if main_frame:
                if main_frame.address == lastframe.address:
                    lastframe.function = main_frame.function
                else:
                    frames.append(main_frame)
            if libc_start_main_stack_frame:
                if lastframe.address != libc_start_main_stack_frame.address:
                    frames.append(libc_start_main_stack_frame)
        else:
            if main_frame:
                frames.append(main_frame)
            if libc_start_main_stack_frame:
                frames.append(libc_start_main_stack_frame)

        for frame in frames:
            if not frame.function:
                frame.function = self.find_function_address(
                    p.proc_as, frame.ret)
            frame.symbol = self.find_function_symbol(task, frame.function)

            stats['frames']['possible_frames'] += 1
            if frame.function:
                stats['frames']['function_address'] += 1
            if frame.symbol:
                stats['frames']['symbols'] += 1

        # self.find_locals_size(p.proc_as, frames)

        if len(frames) == 0:
            if is_thread:
                stats['threads_zero_frames'] += 1
            else:
                stats['tasks_zero_frames'] += 1
        # self.validate_stack_frames(frames)
        return p, p.thread_registers[i], frames
Пример #32
0
    def _parse_migration(self):
        # use AdvancingReader to treat the base AS like a file
        r = AdvancingReader(self.base)

        # skip header
        r.skip(HEADER_LENGTH)

        # read page bundles
        last_bundle = False
        last_iteration = False
        self._iterations += 1

        while not last_bundle:
            # determine bundle/marker type
            bundle_start = r.offset
            bundle_type, = r.unpack("<B")

            # handle next iteration marker
            if bundle_type == NEXT_ITERATION_MARKER:
                debug.debug("Skipping next iteration marker @ %#x" % r.offset)
                r.skip(NEXT_ITERATION_MARKER_LENGTH)
                self._iterations += 1
                continue

            # handle last iteration marker (only occurs once)
            elif bundle_type == LAST_ITERATION_MARKER and not last_iteration:
                debug.debug("Skipping last iteration marker @ %#x" % r.offset)
                r.skip(LAST_ITERATION_MARKER_LENGTH)
                self._iterations += 1

                # this flag is used to only parse this marker exactly once
                # the very last bundle also starts with a 0x1 byte
                # setting this flag prevents the parser from confusing the two structures
                last_iteration = True

                continue

            # verify bundle type
            self.as_assert(bundle_type in BUNDLE_MARKERS, "Invalid bundle_type: %d @ %#x" % (bundle_type, bundle_start))
            last_bundle = bundle_type == LAST_BUNDLE_MARKER

            # parse and verify bundle header
            # uint32 magic; uint8 reserved[11]; uint32 pageCount;
            magic, page_count = r.unpack("<I11xI")
            self.as_assert(magic == BUNDLE_MAGIC, "Invalid page bundle magic: %#x @ %#x" % (magic, bundle_start))
            self.as_assert(PAGE_COUNT_MIN <= page_count <= PAGE_COUNT_MAX,
                           "Invalid page_count: %d @ %#x" % (page_count, bundle_start))

            debug.debug("Page bundle with %d entries @ %#x" % (page_count, bundle_start))

            # parse array of page numbers
            # uint32 pageNumbers[128];
            page_numbers = r.unpack("<128I")
            # discard unpopulated array entries
            page_numbers = page_numbers[:page_count]

            # parse array of page metadata
            # pageMeta pageMetadata[128];
            # struct pageMeta {uint32 iteration; uint32 pageGroup; uint32 pageTypeA; uint64 pointer; uint32 pageTypeB;};
            page_metadata = r.unpack("<" + ("IIIQI" * 128))
            # discard unpopulated array entries
            page_metadata = page_metadata[:page_count * 5]

            # extract page types from page metadata
            page_types_a = page_metadata[2::5]
            page_types_b = page_metadata[4::5]

            # validate page types
            for x in zip(page_types_a, page_types_b):
                self.as_assert(x in VALID_PAGETYPES, "Unknown page type: %s @ %#x" % (x, bundle_start))

            # all information in the bundle header has been read
            # r.offset now points to page contents

            # populate _pages dictionary
            cnt = 0
            for i in xrange(page_count):
                # page contents are only transmitted for some types of pages
                if page_types_a[i] in PAGE_TYPES_WITH_CONTENTS:
                    # starting address of the page (VM physical)
                    page_addr = page_numbers[i] * PAGE_SIZE

                    # collect stats
                    cnt += 1
                    self._transferred_pages += 1
                    if page_addr in self._pages:
                        self._retransmitted_pages += 1

                    # store/overwrite translation mapping
                    self._pages[page_addr] = r.offset

                    # move r.offset to the next page
                    r.skip(PAGE_SIZE)
                else:
                    self._skipped_pages += 1

            debug.debug("Page bundle contained contents of %d pages @ %#x" % (cnt, bundle_start))

        def mb(pages):
            return (pages * PAGE_SIZE) >> 20

        report = ["%d pages (%dM) extracted from migration" % (len(self._pages), mb(len(self._pages))),
                  "%d (%dM) transferred" % (self._transferred_pages, mb(self._transferred_pages)),
                  "%d (%dM) retransmitted" % (self._retransmitted_pages, mb(self._retransmitted_pages)),
                  "%d iterations" % self._iterations]
        debug.info("; ".join(report))
def debug_info(msg):
    if VERBOSE:
        debug.info(msg)
Пример #34
0
 def validate_swap(self, swap_info_struct):
     #print("max: {:016x}".format(swap_info_struct.max))
     debug.info("inuse_pages <= pages: {}".format(
         swap_info_struct.inuse_pages <= swap_info_struct.pages))
     debug.info("lowest_bit <= highest_bit: {}".format(
         swap_info_struct.lowest_bit <= swap_info_struct.highest_bit))
     debug.info("lowest_alloc <= highest_alloc: {}".format(
         swap_info_struct.lowest_alloc <= swap_info_struct.highest_alloc))
     debug.info("is_valid_address(swap_map): {}".format(
         self.addr_space.is_valid_address(swap_info_struct.swap_map)))
     debug.info("is_valid_address(curr_swap_extent): {}".format(
         self.addr_space.is_valid_address(
             swap_info_struct.curr_swap_extent)))
     #debug.info("is_valid_address(first_swap_extent): {}".format(self.addr_space.is_valid_address(swap_info_struct.first_swap_extent)))
     debug.info("is_valid_address(bdev): {}".format(
         self.addr_space.is_valid_address(swap_info_struct.bdev)))
Пример #35
0
 def logverbose(self, msg):
     if self._config.VERBOSE:
         debug.info(msg)
Пример #36
0
    def find_return_main(self, proc_as, libc_start, libc_end, start_address):
        """
        Find the return address of the main function by scanning for pointers into libc. At this point we will look
        for specific patterns in the code, to gather addresses.
        @param proc_as: Process address space
        @param libc_start: Start address of libc code
        @param libc_end: End address of libc code
        @param start_address: The address to start the scan at.
        @return: The address on the stack and an offset (the location of the main address on the stack) or None/False
        """
        if not distorm_loaded:
            return

        # This function checks if it is a return address, does the actual work
        def is_return_address(address):
            # Load 1 instruction (Debian)
            #
            # hardcoding 4 bytes
            size = 4
            bytestr = proc_as.read(address - size, size)

            # Instruction in the form of 'CALL RSP+0x18'
            single_instr = distorm3.Decode(address - size, bytestr,
                                           self.decode_as)
            if len(single_instr) == 1 and single_instr[0][2][:4] == 'CALL':
                # we use this one
                # print(single_instr)
                part = single_instr[0][2].split('[')[1]
                if part[:4] == 'RSP+':
                    # take the part after the +, slice off the 0x, and convert to an int
                    rspoffset = int(part.split('+')[1][2:-1], 16)
                    return rspoffset

            # Arch linux/Ubuntu
            # load 3 instructions, something like this:
            # mov 0x18(%rsp), %rax (size 5)
            # mov (%rax), %rdx (size 3)
            # callq *reg (size 2)

            # hardcoding 10 bytes
            size = 10

            bytestr = proc_as.read(address - size, size)
            possible = ['RCX', 'RAX']
            instr = distorm3.Decode(address - size, bytestr, self.decode_as)
            # print(instr[-1][2])
            checkother = False
            if 0 < len(instr) < 3:
                pass
            elif len(instr) == 3:
                # check all 3
                checkother = True
            else:
                return False

            last_instr = instr[-1][2].split(' ')
            register = None

            # print(last_instr)

            if last_instr[0] == 'CALL' and last_instr[1] in possible:
                # print(last_instr)
                register = last_instr[1]
            else:
                # print(last_instr)
                return None

            # Find the offset
            if checkother:
                mov = 'MOV ' + register
                confirmed = True
                movinstr = None
                saveinstr = None
                if mov in instr[0][2]:
                    movinstr = instr[0][2]
                    saveinstr = instr[1][2]
                elif mov in instr[1][2]:
                    saveinstr = instr[0][2]
                    movinstr = instr[1][2]
                else:
                    # that's weird
                    confirmed = False

                if movinstr != None:
                    part = movinstr.split('[')[1]
                    if part[:4] == 'RSP+':
                        # take the part after the +, slice off the 0x, and convert to an int
                        rspoffset = int(part.split('+')[1][2:-1], 16)
                        return rspoffset
            return False

        # just a loop with some minor logic, the internal function does all the work
        addr = start_address
        counter = 0
        invalid = 0
        for value in yield_address(proc_as, start_address, reverse=True):
            if libc_start <= value <= libc_end:
                counter += 1
                # print("{:016x} {:016x}".format(addr, value))
                if not proc_as.is_valid_address(value):
                    invalid += 1
                else:
                    retval = is_return_address(value)
                    if retval:
                        debug.info(
                            "Scanned {} libc addresses on the stack before finding the main return address"
                            .format(counter))
                        return addr, retval
            addr -= linux_process_info.address_size
        debug.info(
            "Scanned {} libc addresses on the stack, did not find the main return address"
            .format(counter))
        debug.info(
            "Of these addresses, {} were invalid (e.g. due to swap)".format(
                invalid))
Пример #37
0
 def find_function_address(self, proc_as, ret_addr):
     """
     Calculates the function address given a return address. Disassembles code to get through the double indirection
     introduced by the Linux PLT.
     @param proc_as: Process address space
     @param ret_addr: Return address
     @return The function address or None
     """
     if distorm_loaded:
         decode_as = self.decode_as
         retaddr_assembly = distorm3.Decode(ret_addr - 5,
                                            proc_as.read(ret_addr - 5, 5),
                                            decode_as)
         if len(retaddr_assembly) == 0:
             return None
         # print(retaddr_assembly)
         retaddr_assembly = retaddr_assembly[
             0]  # We're only getting 1 instruction
         # retaddr_assembly[2] = "CALL 0x400620"
         instr = retaddr_assembly[2].split(' ')
         # print(instr)
         if instr[0] == 'CALL':
             try:
                 target = int(instr[1][2:], 16)
             except ValueError:
                 return None
             bytes = proc_as.read(target, 6)
             if not bytes:
                 # We're not sure if this is the function address
                 return target
             plt_instructions = distorm3.Decode(target, bytes, decode_as)
             plt_assembly = plt_instructions[0]  # 1 instruction
             # print(plt_assembly)
             instr2 = plt_assembly[2].split(' ')
             # print(instr2)
             if instr2[0] == 'JMP':
                 final_addr = None
                 if instr2[1] == 'DWORD':
                     target2 = int(instr2[2][3:-1], 16)
                 elif instr2[1] == 'QWORD':  # if QWORD
                     target2 = int(instr2[2][7:-1], 16)
                 else:  # if 0xADDRESS
                     final_addr = int(instr2[1][2:], 16)
                 if not final_addr:
                     final_addr = target + 6 + target2
                 debug.info(
                     "Found function address from instruction {} at offset 0x{:016x}"
                     .format(instr2, target))
                 return read_address(proc_as, final_addr)
             elif instr2[0] == 'PUSH' and instr2[1] == 'RBP':
                 # This is an internal function
                 debug.info(
                     "Found function address from instruction {} at offset 0x{:016x}"
                     .format(instr, target))
                 return target
             else:
                 # In case push rbp is removed
                 debug.info(
                     "Found function address from instruction {} at offset 0x{:016x}"
                     .format(instr, target))
                 return target
         return None
     else:
         return None
Пример #38
0
    def execute(self,options,config,yarapath):
        addr_space = utils.load_as(config)

        if not os.path.isfile("kdcopydatablock.txt"):
            if (addr_space.profile.metadata.get("os") == "windows" and addr_space.profile.metadata.get("memory_model") == "64bit" and addr_space.profile.metadata.get("major") >= 6 and addr_space.profile.metadata.get("minor") >= 2):
                kdbg = tasks.get_kdbg(addr_space)
                fout = open('kdcopydatablock.txt', 'w')
                kdblockaddr = '{0:#x}'.format(kdbg.KdCopyDataBlock)
                fout.write(kdblockaddr)
                fout.close()
                sys.argv.append("--kdbg")
                sys.argv.append(kdblockaddr)
        
        processList = tasks.pslist(addr_space)

        if adutils.getConfigValue(options,'sockets') == True:
            getSocketsDelegate = adsockets.getSocketsFactory(addr_space.profile)
            sockets = getSocketsDelegate(config,addr_space)

        if adutils.getConfigValue(options,'yarascan') == True:
            getYaraDelegate = adyarascan.getYaraFactory(addr_space.profile)
            config.update('YARA_RULES_DIRECTORY',yarapath)
            compiledrules = getYaraDelegate(config).compile_rules()

        list_head_offset = None
        has_service_table = False

        process_obj_list = datastructs.rootType()

        for processIndex, eprocess in enumerate(processList):
            config.process_id = eprocess.UniqueProcessId
            config.dtb = eprocess.Pcb.DirectoryTableBase

            all_mods = list(eprocess.get_load_modules())

            # get Token for Privileges
            token = eprocess.Token.dereference_as('_TOKEN')
            if hasattr(token.Privileges, 'Present'):
                privileges = token.Privileges.Present
            else:
                # Current memory analysis erroneously points
                # to token.ModifiedId for privileges for XP
                # The line below will match what the current memory analysis collects:
                # privileges = token.ModifiedId.LowPart
                # I don't think this is correct, either.
                luid = token.Privileges.dereference_as('_LUID_AND_ATTRIBUTES')
                privileges = luid.Luid.LowPart
            
            validName = "Unknown"    
            if eprocess.ImageFileName:
                validName = eprocess.ImageFileName
            name = self.get_full_name(name=validName, path=eprocess.Peb.ProcessParameters.ImagePathName or '')
            try:
                process_obj = process_obj_list.Process.add(
                    resultitemtype=18,
                    Name=name,
                    Path=adutils.SmartUnicode(eprocess.Peb.ProcessParameters.ImagePathName or ""),
                    StartTime=adutils.SmartUnicode(eprocess.CreateTime or ""),
                    WorkingDir=adutils.SmartUnicode(eprocess.Peb.ProcessParameters.CurrentDirectory.DosPath or ""),
                    CommandLine=adutils.SmartUnicode(eprocess.Peb.ProcessParameters.CommandLine or ""),
                    LinkTime=0,
                    Subsystem=long(eprocess.Peb.ImageSubsystem),
                    Imagebase=long(eprocess.Peb.ImageBaseAddress),
                    Characteristics=0,
                    Checksum=0,
                    KernelTime=long(eprocess.Pcb.KernelTime),
                    UserTime=long(eprocess.Pcb.UserTime),
                    Privileges=long(privileges),
                    PID=int(eprocess.UniqueProcessId),
                    ParentPID=int(eprocess.InheritedFromUniqueProcessId),
                    User='',
                    Group='',
                    MD5=BLANK_MD5,
                    SHA1=BLANK_SHA1,
                    FuzzySize=0,
                    Fuzzy='',
                    Fuzzy2X='',
                    KFFStatus=0,
                    FromMemory='',
                    EffectiveUser='',
                    EffectiveGroup='',
                    Size=calculate_image_size(eprocess),
                    EProcBlockLoc=long(eprocess.obj_vm.vtop(eprocess.obj_offset)) or 0,
                    WindowTitle=adutils.SmartUnicode(eprocess.Peb.ProcessParameters.WindowTitle or "")
                )
            except:
                debug.info('Caught error in adding process, continuing')
                continue

            kthread = eprocess.Pcb.ThreadListHead.Flink.dereference_as('_KTHREAD')
            list_head_offset = kthread.ThreadListEntry.obj_offset - kthread.obj_offset
            kthread = obj.Object('_KTHREAD', offset=eprocess.Pcb.ThreadListHead.Flink - list_head_offset, vm=eprocess.obj_vm)
            if hasattr(kthread, 'ServiceTable'):
                SDTs = set()
                for i in range(eprocess.ActiveThreads):
                    if _is_valid_service_table_address(address=kthread.ServiceTable, memory_model=eprocess.obj_vm.profile.metadata.get('memory_model', '32bit')):
                        SDTs.add(long(kthread.ServiceTable))
                    kthread = obj.Object('_KTHREAD', offset=kthread.ThreadListEntry.Flink - list_head_offset, vm=eprocess.obj_vm)
                for sdt in SDTs:
                    process_obj.SDT.append(sdt)

            if adutils.getConfigValue(options,'processdlls') == True:
                for moduleIndex, module in enumerate(all_mods):
                    baseName = "Unknown"
                    if module.BaseDllName:
                        baseName = module.BaseDllName
                    dll_obj = process_obj.Loaded_DLL_List.DLL.add(
                        Name=adutils.SmartUnicode(baseName or ''),
                        Description='',
                        Path=adutils.SmartUnicode(module.FullDllName or ''),
                        Version='',
                        MD5=BLANK_MD5,
                        SHA1=BLANK_SHA1,
                        FuzzySize=0,
                        Fuzzy='',
                        Fuzzy2X='',
                        CreateTime=u"0000-00-00 00:00:00", #adutils.SmartUnicode(module.TimeDateStamp),
                        KFFStatus=0,
                        PID=int(eprocess.UniqueProcessId),
                        baseAddress=long(module.DllBase),
                        ImageSize=long(module.SizeOfImage),
                        ProcessName=name,
                        FromMemory=''
                    )
            if adutils.getConfigValue(options,'sockets') == True:
                pid = int(eprocess.UniqueProcessId)
                if pid in sockets:
                    process_obj.Open_Sockets_List.CopyFrom(sockets[pid])

            if adutils.getConfigValue(options,'handles') == True:
                if eprocess.ObjectTable.HandleTableList:
                    for handle in eprocess.ObjectTable.handles():
                        if not handle.is_valid():
                            continue
                        handle_obj = process_obj.Open_Handles_List.OpenHandle.add(
                            ID=long(handle.HandleValue),
                            Type=adutils.SmartUnicode(handle.get_object_type()),
                            Path=get_handle_name(handle),
                            AccessMask=int(handle.GrantedAccess),
                            Name='',
                            PID=int(eprocess.UniqueProcessId),
                            PointerCount=long(handle.PointerCount),
                            ObjectAddress=long(handle.obj_offset),
                            FromMemory='',
                            Owner='',
                            Group='',
                            Permissions=''
                        )
            if adutils.getConfigValue(options,'vad') == True:
                for vad in eprocess.VadRoot.traverse():
                    longflags = 0
                    if hasattr(vad, 'u'):
                        longflags = long(vad.u.LongFlags)
                    elif hasattr(vad, 'Core'):
                        longflags = long(vad.Core.u.LongFlags)
                    vad_obj = process_obj.Vad_List.Vad.add(
                        Protection=int(vad.VadFlags.Protection),
                        StartVpn=long(vad.Start >> PAGE_SIZE),
                        EndVpn=long(vad.End >> PAGE_SIZE),
                        Address=long(vad.obj_offset),
                        Flags=longflags,
                        Mapped=u'False',
                        ProcessName=process_obj.Name,
                        PID=process_obj.PID,
                        FromMemory='')
                    if not vad.Tag in _MMVAD_SHORT_TAGS:
                        if vad.FileObject and vad.FileObject.FileName:
                            name = str(vad.FileObject.FileName)
                            if len(name) > 0 and name[0] == '\\':
                                vad_obj.Filename = adutils.SmartUnicode(name)
                                vad_obj.Mapped = u'True'
                            else:
                                print name

            if adutils.getConfigValue(options,'yarascan') == True:
                pid = int(eprocess.UniqueProcessId)
                config.update('pid',str(pid))
                yara = getYaraDelegate(config).calculateonvad(compiledrules, eprocess)
                try:
                    for hit in yara:
                        process_obj.YaraHits.YaraHit.add(
                            id='',
                            Name=hit[2].rule,
                            Category='')
                except:
                    debug.info('Caught error in adding yarahit, continuing')

        file = open(config.OUTPUT_PATH + "processes.xml", "w")
        #file.write(process_obj_list.SerializeToString())
        file.write(proto2xml(process_obj_list,indent=0))
Пример #39
0
    def calculate(self):
        """Calculates various information about the image"""
        debug.info("Determining profile based on KDBG search...")
        profilelist = [
            p.__name__
            for p in registry.get_plugin_classes(obj.Profile).values()
        ]

        bestguess = None
        suglist = [s for s, _ in kdbgscan.KDBGScan.calculate(self)]
        if suglist:
            bestguess = suglist[0]
        suggestion = ", ".join(set(suglist))

        # Set our suggested profile first, then run through the list
        if bestguess in profilelist:
            profilelist = [bestguess] + profilelist
        chosen = 'no profile'

        # Save the original profile
        origprofile = self._config.PROFILE
        # Force user provided profile over others
        profilelist = [origprofile] + profilelist

        for profile in profilelist:
            debug.debug('Trying profile ' + profile)
            self._config.update('PROFILE', profile)
            addr_space = utils.load_as(self._config, astype='any')
            if hasattr(addr_space, "dtb"):
                chosen = profile
                break

        if bestguess != chosen:
            if not suggestion:
                suggestion = 'No suggestion'
            suggestion += ' (Instantiated with ' + chosen + ')'

        yield ('Suggested Profile(s)', str, suggestion)

        tmpas = addr_space
        count = 0
        while tmpas:
            count += 1
            yield ('AS Layer' + str(count), str,
                   tmpas.__class__.__name__ + " (" + tmpas.name + ")")
            tmpas = tmpas.base

        if not hasattr(addr_space, "pae"):
            yield ('PAE type', str, "No PAE")
        else:
            yield ('PAE type', str, "PAE" if addr_space.pae else "No PAE")

        if hasattr(addr_space, "dtb"):
            yield ('DTB', Address, Address(addr_space.dtb))

        volmagic = obj.VolMagic(addr_space)
        if hasattr(addr_space, "dtb"):
            kdbg = volmagic.KDBG.v()
            if type(kdbg) == int:
                kdbg = obj.Object("_KDDEBUGGER_DATA64",
                                  offset=kdbg,
                                  vm=addr_space)
            if kdbg.is_valid():
                yield ('KDBG', Address, Address(kdbg.obj_offset))
                kpcr_list = list(kdbg.kpcrs())
                yield ('Number of Processors', int, len(kpcr_list))
                yield ('Image Type (Service Pack)', int, kdbg.ServicePack)
                for kpcr in kpcr_list:
                    yield ('KPCR for CPU {0}'.format(
                        kpcr.ProcessorBlock.Number), Address,
                           Address(kpcr.obj_offset))

            KUSER_SHARED_DATA = volmagic.KUSER_SHARED_DATA.v()
            if KUSER_SHARED_DATA:
                yield ('KUSER_SHARED_DATA', Address,
                       Address(KUSER_SHARED_DATA))

            data = self.get_image_time(addr_space)

            if data:
                yield ('Image date and time', str, str(data['ImageDatetime']))
                yield ('Image local date and time', str,
                       timefmt.display_datetime(
                           data['ImageDatetime'].as_datetime(),
                           data['ImageTz']))

        # Make sure to reset the profile to its original value to keep the invalidator from blocking the cache
        self._config.update('PROFILE', origprofile)
Пример #40
0
        def get_symbol(self,
                       sym_name,
                       nm_type="",
                       sym_type="",
                       module="kernel"):
            """Gets a symbol out of the profile
            
            sym_name -> name of the symbol
            nm_tyes  -> types as defined by 'nm' (man nm for examples)
            sym_type -> the type of the symbol (passing Pointer will provide auto deref)
            module   -> which module to get the symbol from, default is kernel, otherwise can be any name seen in 'lsmod'
    
            This fixes a few issues from the old static hash table method:
            1) Conflicting symbols can be handled, if a symbol is found to conflict on any profile, 
               then the plugin will need to provide the nm_type to differentiate, otherwise the plugin will be errored out
            2) Can handle symbols gathered from modules on disk as well from the static kernel
    
            symtable is stored as a hash table of:
            
            symtable[module][sym_name] = [(symbol address, symbol type), (symbol addres, symbol type), ...]
    
            The function has overly verbose error checking on purpose...
            """

            symtable = self.sys_map

            ret = None

            # check if the module is there...
            if module in symtable:

                mod = symtable[module]

                # check if the requested symbol is in the module
                if sym_name in mod:

                    sym_list = mod[sym_name]

                    # if a symbol has multiple definitions, then the plugin needs to specify the type
                    if len(sym_list) > 1:
                        if nm_type == "":
                            debug.error(
                                "Requested symbol {0:s} in module {1:s} has multiple definitions and no type given\n"
                                .format(sym_name, module))
                        else:
                            for (addr, stype) in sym_list:

                                if stype == nm_type:
                                    ret = addr
                                    break

                            if ret == None:
                                debug.error(
                                    "Requested symbol {0:s} in module {1:s} of type {3:s} could not be found\n"
                                    .format(sym_name, module, sym_type))

                    else:
                        # get the address of the symbol
                        ret = sym_list[0][0]

                else:
                    debug.debug(
                        "Requested symbol {0:s} not found in module {1:s}\n".
                        format(sym_name, module))
            else:
                debug.info(
                    "Requested module {0:s} not found in symbol table\n".
                    format(module))

            if ret and sym_type == "Pointer":
                # FIXME: change in 2.3 when truncation no longer occurs
                ret = ret & 0xffffffffffff

            return ret