def init_volatility(conf): import volatility.conf as volconf import volatility.registry as registry import volatility.commands as commands import volatility.addrspace as addrspace if hasattr(volconf, "PyREBoxVolatility"): registry.PluginImporter() vol_config = volconf.ConfObject() registry.register_global_options(vol_config, commands.Command) registry.register_global_options(vol_config, addrspace.BaseAddressSpace) vol_config.PROFILE = conf.vol_profile # Set global volatility configuration conf_m.vol_conf = vol_config return True else: pp_error( """The imported volatility version is not appropriate for PyREBox: * Your local volatility instalation may be in conflict with PyREBox's volatility installation... ... set up a virtual env to avoid the conflict (see installation instructions). * PyREBox's volatility version was not properly installed or configured... ... you rebuild it running: $./build.sh --rebuild_volatility * You have a virtual env for PyREBox's python dependencies, and you forgot to activate it! ... you know what to do!\n""") return False
def add_module_monitoring_hooks(pgd): ''' Adds initial set of breakpoints for a given process, so that we can detect when a new module is inserted, or a module is removed from any of its linked lists. ''' from api import BP import api from vmi import update_modules import functools from utils import pp_error if pgd not in module_load_remove_breakpoints: module_load_remove_breakpoints[pgd] = {} # Update the module list for this pgd hooking_points = update_modules(pgd) if hooking_points is not None: # Add the BPW breakpoints for module_base, addr, size in hooking_points: haddr = api.va_to_pa(pgd, addr) bp = BP(haddr, None, size = size, typ = BP.MEM_WRITE_PHYS, func = functools.partial(module_change_callback, pgd, addr, haddr), new_style = True) module_load_remove_breakpoints[pgd][(module_base, addr, size)] = bp bp.enable() else: pp_error("Could not set initial list of breakpoints for module monitoring: %x" % pgd)
def import_module(module_name): global MODULE_COUNTER import api_internal from ipython_shell import add_command try: already_imported = False for mod in modules: if module_name == modules[mod][0]: already_imported = True break if not already_imported: pp_print("[*] Importing %s\n" % module_name) mod = __import__(module_name, fromlist=['']) mod.initialize_callbacks( MODULE_COUNTER, functools.partial(api_internal.print_internal, module_name)) # Add commands declared by the module for element in dir(mod): if element.startswith("do_"): add_command(element[3:], getattr(mod, element)) modules[MODULE_COUNTER] = (module_name, mod) MODULE_COUNTER += 1 else: pp_warning("[*] Module %s already imported\n" % module_name) except Exception as e: pp_error("[!] Could not initialize python module due to exception\n") pp_error(" %s\n" % str(e)) return
def add_module_monitoring_hooks(pgd): ''' Adds initial set of breakpoints for a given process, so that we can detect when a new module is inserted, or a module is removed from any of its linked lists. ''' from api import BP import api from vmi import update_modules import functools from utils import pp_error if pgd not in module_load_remove_breakpoints: module_load_remove_breakpoints[pgd] = {} # Update the module list for this pgd hooking_points = update_modules(pgd) if hooking_points is not None: # Add the BPW breakpoints for module_base, addr, size in hooking_points: haddr = api.va_to_pa(pgd, addr) bp = BP(haddr, None, size=size, typ=BP.MEM_WRITE_PHYS, func=functools.partial(module_change_callback, pgd, addr, haddr), new_style=True) module_load_remove_breakpoints[pgd][(module_base, addr, size)] = bp bp.enable() else: pp_error( "Could not set initial list of breakpoints for module monitoring: %x" % pgd)
def function_wrapper_new(f, *args, **kwargs): try: f(kwargs) except Exception as e: from utils import pp_error pp_error("\nException occurred when calling callback function %s - %s" % (repr(f), str(e))) finally: return
def linux_insert_kernel_module(module, base, size, basename, fullname, update_symbols=False): from vmi import modules from vmi import symbols from vmi import Module # Create module, use 0 as checksum as it is irrelevant here mod = Module(base, size, 0, 0, 0, basename, fullname) # Add an entry in the module list, if necessary if (0, 0) not in modules: modules[(0, 0)] = {} # Add the module to the module list if base in modules[(0, 0)]: del modules[(0, 0)][base] modules[(0, 0)][base] = mod if update_symbols: # Use 0 as a checksum, here we should not have name collision checksum = 0 if (checksum, fullname) not in symbols: symbols[(checksum, fullname)] = {} syms = symbols[(checksum, fullname)] try: ''' pp_debug("Processing symbols for module %s\n" % basename) ''' for sym_name, sym_offset in module.get_symbols(): if sym_name in syms: if syms[sym_name] != sym_offset: # There are cases in which the same import is present twice, such as in this case: # nm /lib/x86_64-linux-gnu/libpthread-2.24.so | grep "pthread_getaffinity_np" # 00000000000113f0 T pthread_getaffinity_np@GLIBC_2.3.3 # 00000000000113a0 T # pthread_getaffinity_np@@GLIBC_2.3.4 sym_name = sym_name + "_" while sym_name in syms and syms[ sym_name] != sym_offset: sym_name = sym_name + "_" if sym_name not in syms: syms[sym_name] = sym_offset else: syms[sym_name] = sym_offset except Exception as e: # Probably could not fetch the symbols for this module pp_error("%s" % str(e)) pass mod.set_symbols(symbols[(checksum, fullname)]) return None
def windows_update_modules(pgd, update_symbols=False): ''' Use volatility to get the modules and symbols for a given process, and update the cache accordingly ''' import api from utils import get_addr_space from vmi import modules if pgd != 0: addr_space = get_addr_space(pgd) else: addr_space = get_addr_space() if addr_space is None: pp_error("Volatility address space not loaded\n") return # Get EPROC directly from its offset procs = api.get_process_list() inserted_bases = [] # Parse/update kernel modules: if last_kdbg is not None: kdbg = obj.Object( "_KDDEBUGGER_DATA64", offset=last_kdbg, vm=addr_space) for module in kdbg.modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(0, 0, module, update_symbols) for proc in procs: p_pid = proc["pid"] p_pgd = proc["pgd"] # p_name = proc["name"] p_kernel_addr = proc["kaddr"] if p_pgd == pgd: task = obj.Object("_EPROCESS", offset=p_kernel_addr, vm=addr_space) # Note: we do not erase the modules we have information for from the list, # unless we have a different module loaded at the same base address. # In this way, if at some point the module gets unmapped from the PEB list # but it is still in memory, we do not loose the information. if (p_pid, p_pgd) not in modules: modules[(p_pid, p_pgd)] = {} for module in task.get_init_modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(p_pid, p_pgd, module, update_symbols) for module in task.get_mem_modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(p_pid, p_pgd, module, update_symbols) for module in task.get_load_modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(p_pid, p_pgd, module, update_symbols) return
def reload_module(_id): try: if _id in modules: modules[_id].reload() else: pp_warning("[*] The module number specified (%d) has not been imported\n" % _id) except Exception as e: pp_error("[!] Could not reload python module due to exception\n") pp_error(" %s\n" % str(e)) return
def reload_module(_id): try: if _id in modules: modules[_id].reload() else: pp_warning("[*] The module number specified (%d) has not been imported\n" % _id) except Exception as e: pp_error("[!] Could not reload python module due to exception\n") pp_error(" %s\n" % str(e)) return
def pyrebox_ipython_shell(): finished = False while not finished: try: from ipython_shell import start_shell start_shell() finished = True except Exception as e: pp_error(str(e) + "\n") traceback.print_exc(file=sys.stdout)
def pyrebox_ipython_shell(): finished = False while not finished: try: from ipython_shell import start_shell start_shell() finished = True except Exception as e: pp_error(str(e) + "\n") traceback.print_exc(file=sys.stdout)
def load_symbols_from_cache_file(): global symbols global symbol_cache_path if symbol_cache_path is not None and os.path.isfile(symbol_cache_path): try: f = open(symbol_cache_path, "r") symbols = json.loads(f.read()) f.close() except Exception as e: pp_error("Error while reading symbols from %s: %s\n" % (symbol_cache_path, str(e)))
def init(platform, root_path, volatility_path, conf_name): try: # Just configure basic logging import logging logging.basicConfig() # Initialize stuff pp_debug("[*] Platform: %s\n" % platform) pp_debug("[*] Starting python module initialization\n") pp_debug("[*] Reading configuration from '%s'\n" % (conf_name)) sys.settrace config = ConfigParser.RawConfigParser() # Store configuration information in raw, # for plugins to be able to fetch it conf_m.config = config if not os.path.isfile(conf_name): pp_error("[!] Could not initialize pyrebox, conf file '%s' missing!\n" % (conf_name)) return None config.read(conf_name) vol_profile = config.get('VOL', 'profile') # Set global configuration conf_m.volatility_path = volatility_path conf_m.vol_profile = vol_profile conf_m.platform = platform conf_m.pyre_root = root_path if platform == "x86_64-softmmu": conf_m.bitness = 64 conf_m.endianess = "l" elif platform == "i386-softmmu": conf_m.bitness = 32 conf_m.endianess = "l" sys.path.append(volatility_path) sys.path.append(root_path) sys.path.append(os.getcwd()) if not init_volatility(): return None # Initialize the shell now from ipython_shell import initialize_shell initialize_shell() # Initialize the symbol cache from the file if config.has_option('SYMBOL_CACHE', 'path'): from vmi import load_symbols_from_cache_file from vmi import set_symbol_cache_path set_symbol_cache_path(config.get('SYMBOL_CACHE', 'path')) load_symbols_from_cache_file() return vol_profile except Exception as e: # Do this to make sure we print the stack trace to help trouble-shooting traceback.print_exc() raise e
def serialize_interproc(): global interproc_config global interproc_data import traceback import pickle try: f_out = open(interproc_config.interproc_bin_log_name, "w") pickle.dump(interproc_data, f_out) f_out.close() except Exception: traceback.print_exc() pp_error(traceback.print_stack())
def interproc_basic_stats(): global interproc_config global interproc_data import traceback try: f = open(interproc_config.interproc_basic_stats_name, "w") for proc in interproc_data.get_processes(): proc.print_stats(f) f.close() except Exception: traceback.print_exc() pp_error(traceback.print_stack())
def unload_module(_id): try: if _id in modules: modules[_id].unload() else: pp_warning("[*] The module number specified (%d) has not been imported\n" % _id) pp_warning("[*] Possible ids:") for i in modules: pp_warning(" %s - %s" % (str(i),str(type(i)))) except Exception as e: pp_error("[!] Could not unload python module due to exception\n") pp_error(" %s\n" % str(e)) return
def unload_module(_id): try: if _id in modules: modules[_id].unload() else: pp_warning("[*] The module number specified (%d) has not been imported\n" % _id) pp_warning("[*] Possible ids:") for i in modules: pp_warning(" %s - %s" % (str(i),str(type(i)))) except Exception as e: pp_error("[!] Could not unload python module due to exception\n") pp_error(" %s\n" % str(e)) return
def function_wrapper_old(f, callback_type, *args, **kwargs): global DISABLE_DEPRECATION_WARNINGS try: if not DISABLE_DEPRECATION_WARNINGS: from utils import pp_warning pp_warning("You are using a deprecated callback format.\n" + \ "Switch to new style callback format, that will become the default in the future.\n" + \ "See the documentation of CallbackManager for further reference.\n") # Set to True, so that we don't repeat the same message again and again DISABLE_DEPRECATION_WARNINGS = True # We need to treat each callback separately if callback_type == CallbackManager.BLOCK_BEGIN_CB: f(kwargs["cpu_index"], kwargs["cpu"], kwargs["tb"]) elif callback_type == CallbackManager.BLOCK_END_CB: f(kwargs["cpu_index"], kwargs["cpu"], kwargs["tb"], kwargs["cur_pc"], kwargs["next_pc"]) elif callback_type == CallbackManager.INSN_BEGIN_CB: f(kwargs["cpu_index"], kwargs["cpu"]) elif callback_type == CallbackManager.INSN_END_CB: f(kwargs["cpu_index"], kwargs["cpu"]) elif callback_type == CallbackManager.MEM_READ_CB: f(kwargs["cpu_index"], kwargs["vaddr"], kwargs["size"], kwargs["haddr"]) elif callback_type == CallbackManager.MEM_WRITE_CB: f(kwargs["cpu_index"], kwargs["vaddr"], kwargs["size"], kwargs["haddr"], kwargs["data"]) elif callback_type == CallbackManager.KEYSTROKE_CB: f(kwargs["keycode"]) elif callback_type == CallbackManager.NIC_REC_CB: f(kwargs["buf"], kwargs["size"], kwargs["cur_pos"], kwargs["start"], kwargs["stop"]) elif callback_type == CallbackManager.NIC_SEND_CB: f(kwargs["addr"], kwargs["size"], kwargs["buf"]) elif callback_type == CallbackManager.OPCODE_RANGE_CB: f(kwargs["cpu_index"], kwargs["cpu"], kwargs["cur_pc"], kwargs["next_pc"]) elif callback_type == CallbackManager.TLB_EXEC_CB: f(kwargs["cpu"], kwargs["vaddr"]) elif callback_type == CallbackManager.CREATEPROC_CB: f(kwargs["pid"], kwargs["pgd"], kwargs["name"]) elif callback_type == CallbackManager.REMOVEPROC_CB: f(kwargs["pid"], kwargs["pgd"], kwargs["name"]) elif callback_type == CallbackManager.CONTEXTCHANGE_CB: f(kwargs["old_pgd"], kwargs["new_pgd"]) elif callback_type == CallbackManager.LOADMODULE_CB: f(kwargs["pid"], kwargs["pgd"], kwargs["base"], kwargs["size"], kwargs["name"], kwargs["fullname"]) elif callback_type == CallbackManager.REMOVEMODULE_CB: f(kwargs["pid"], kwargs["pgd"], kwargs["base"], kwargs["size"], kwargs["name"], kwargs["fullname"]) else: raise Exception("Unsupported callback type!") except Exception as e: from utils import pp_error pp_error("\nException occurred when calling callback function %s - %s\n\n" % (str(f), str(e))) finally: return
def linux_init_address_space(): from utils import ConfigurationManager as conf_m import volatility.utils as utils try: config = conf_m.vol_conf try: addr_space = utils.load_as(config) except BaseException as e: # Return silently print (str(e)) conf_m.addr_space = None return False conf_m.addr_space = addr_space return True except Exception as e: pp_error("Could not load volatility address space: %s" % str(e))
def linux_init_address_space(): from utils import ConfigurationManager as conf_m import volatility.utils as utils try: config = conf_m.vol_conf try: addr_space = utils.load_as(config) except BaseException as e: # Return silently print (str(e)) conf_m.addr_space = None return False conf_m.addr_space = addr_space return True except Exception as e: pp_error("Could not load volatility address space: %s" % str(e))
def init(platform, root_path, volatility_path): global conf try: # Just configure basic logging import logging logging.basicConfig() # Initialize stuff pp_debug("[*] Platform: %s\n" % platform) pp_debug("[*] Starting python module initialization\n") pp_debug("[*] Reading configuration\n") sys.settrace config = ConfigParser.RawConfigParser() if not os.path.isfile("pyrebox.conf"): pp_error( "[!] Could not initialize pyrebox, pyrebox.conf file missing!\n" ) return None config.read('pyrebox.conf') vol_profile = config.get('VOL', 'profile') conf = ConfigManager(volatility_path=volatility_path, vol_profile=vol_profile, platform=platform) sys.path.append(conf.volatility_path) sys.path.append(root_path) sys.path.append(os.getcwd()) # Set global configuration conf_m.conf = conf if not init_volatility(conf_m.conf): return None # Initialize the shell now from ipython_shell import initialize_shell initialize_shell() # Locate python modules that should be loaded by default for (module, enable) in config.items("MODULES"): if enable.strip().lower() == "true" or enable.strip().lower( ) == "yes": import_module(module) pp_debug("[*] Finished python module initialization\n") return vol_profile except Exception as e: # Do this to make sure we print the stack trace to help trouble-shooting traceback.print_exc() raise e
def import_module(module_name): global MODULE_COUNTER try: already_imported = False for mod in modules: if modules[mod].get_module_name() == module_name: already_imported = True break if not already_imported: MODULE_COUNTER += 1 modules[MODULE_COUNTER] = Module(MODULE_COUNTER, module_name) modules[MODULE_COUNTER].load() else: pp_warning("[*] Module %s already imported, did you want to reload it instead?\n" % module_name) except Exception as e: pp_error("[!] Could not initialize python module due to exception\n") pp_error(" %s\n" % str(e)) return
def import_module(module_name): global MODULE_COUNTER try: already_imported = False for mod in modules: if modules[mod].get_module_name() == module_name: already_imported = True break if not already_imported: MODULE_COUNTER += 1 modules[MODULE_COUNTER] = Module(MODULE_COUNTER, module_name) modules[MODULE_COUNTER].load() else: pp_warning("[*] Module %s already imported, did you want to reload it instead?\n" % module_name) except Exception as e: pp_error("[!] Could not initialize python module due to exception\n") pp_error(" %s\n" % str(e)) return
def linux_get_offsets(): from utils import ConfigurationManager as conf_m import volatility.obj as obj import volatility.registry as registry try: profs = registry.get_plugin_classes(obj.Profile) profile = profs[conf_m.vol_profile]() init_task_offset = profile.get_symbol("init_task") comm_offset = profile.get_obj_offset("task_struct", "comm") pid_offset = profile.get_obj_offset("task_struct", "pid") tasks_offset = profile.get_obj_offset("task_struct", "tasks") mm_offset = profile.get_obj_offset("task_struct", "mm") pgd_offset = profile.get_obj_offset("mm_struct", "pgd") parent_offset = profile.get_obj_offset("task_struct", "parent") exit_state_offset = profile.get_obj_offset("task_struct", "exit_state") thread_stack_size = profile.get_obj_offset( "pyrebox_thread_stack_size_info", "offset") # new process proc_exec_connector_offset = profile.get_symbol("proc_exec_connector") # new kernel module trim_init_extable_offset = profile.get_symbol("trim_init_extable") # process exit proc_exit_connector_offset = profile.get_symbol("proc_exit_connector") return (long(init_task_offset), long(comm_offset), long(pid_offset), long(tasks_offset), long(mm_offset), long(pgd_offset), long(parent_offset), long(exit_state_offset), long(thread_stack_size), long(proc_exec_connector_offset), long(trim_init_extable_offset), long(proc_exit_connector_offset)) except Exception as e: pp_error("Could not retrieve symbols for profile initialization %s" % str(e)) return None
def linux_get_offsets(): from utils import ConfigurationManager as conf_m import volatility.obj as obj import volatility.registry as registry try: profs = registry.get_plugin_classes(obj.Profile) profile = profs[conf_m.vol_profile]() init_task_offset = profile.get_symbol("init_task") comm_offset = profile.get_obj_offset("task_struct", "comm") pid_offset = profile.get_obj_offset("task_struct", "pid") tasks_offset = profile.get_obj_offset("task_struct", "tasks") mm_offset = profile.get_obj_offset("task_struct", "mm") pgd_offset = profile.get_obj_offset("mm_struct", "pgd") parent_offset = profile.get_obj_offset("task_struct", "parent") exit_state_offset = profile.get_obj_offset("task_struct", "exit_state") thread_stack_size = profile.get_obj_offset( "pyrebox_thread_stack_size_info", "offset") # new process proc_exec_connector_offset = profile.get_symbol("proc_exec_connector") # new kernel module trim_init_extable_offset = profile.get_symbol("trim_init_extable") # process exit proc_exit_connector_offset = profile.get_symbol("proc_exit_connector") return (long(init_task_offset), long(comm_offset), long(pid_offset), long(tasks_offset), long(mm_offset), long(pgd_offset), long(parent_offset), long(exit_state_offset), long(thread_stack_size), long(proc_exec_connector_offset), long(trim_init_extable_offset), long(proc_exit_connector_offset)) except Exception as e: pp_error("Could not retrieve symbols for profile initialization %s" % str(e)) return None
def init(platform, root_path, volatility_path, conf_name): try: # Just configure basic logging import logging logging.basicConfig() # Initialize stuff pp_debug("[*] Platform: %s\n" % platform) pp_debug("[*] Starting python module initialization\n") pp_debug("[*] Reading configuration from '%s'\n" % (conf_name)) sys.settrace config = ConfigParser.RawConfigParser() # Store configuration information in raw, # for plugins to be able to fetch it conf_m.config = config if not os.path.isfile(conf_name): pp_error( "[!] Could not initialize pyrebox, conf file '%s' missing!\n" % (conf_name)) return None config.read(conf_name) vol_profile = config.get('VOL', 'profile') # Set global configuration conf_m.volatility_path = volatility_path conf_m.vol_profile = vol_profile conf_m.platform = platform conf_m.pyre_root = root_path sys.path.append(volatility_path) sys.path.append(root_path) sys.path.append(os.getcwd()) if not init_volatility(): return None # Initialize the shell now from ipython_shell import initialize_shell initialize_shell() return vol_profile except Exception as e: # Do this to make sure we print the stack trace to help trouble-shooting traceback.print_exc() raise e
def init(platform, root_path, volatility_path, conf_name): try: # Just configure basic logging import logging logging.basicConfig() # Initialize stuff pp_debug("[*] Platform: %s\n" % platform) pp_debug("[*] Starting python module initialization\n") pp_debug("[*] Reading configuration from '%s'\n" % (conf_name)) sys.settrace config = ConfigParser.RawConfigParser() # Store configuration information in raw, # for plugins to be able to fetch it conf_m.config = config if not os.path.isfile(conf_name): pp_error("[!] Could not initialize pyrebox, conf file '%s' missing!\n" % (conf_name)) return None config.read(conf_name) vol_profile = config.get('VOL', 'profile') # Set global configuration conf_m.volatility_path = volatility_path conf_m.vol_profile = vol_profile conf_m.platform = platform sys.path.append(volatility_path) sys.path.append(root_path) sys.path.append(os.getcwd()) if not init_volatility(): return None # Initialize the shell now from ipython_shell import initialize_shell initialize_shell() return vol_profile except Exception as e: # Do this to make sure we print the stack trace to help trouble-shooting traceback.print_exc() raise e
def init_volatility(): import volatility.conf as volconf import volatility.registry as registry import volatility.commands as commands import volatility.addrspace as addrspace if hasattr(volconf, "PyREBoxVolatility"): registry.PluginImporter() vol_config = volconf.ConfObject() registry.register_global_options(vol_config, commands.Command) registry.register_global_options(vol_config, addrspace.BaseAddressSpace) vol_config.PROFILE = conf_m.vol_profile # Set global volatility configuration conf_m.vol_conf = vol_config return True else: pp_error("""The imported volatility version is not appropriate for PyREBox: * Your local volatility installation may be in conflict with PyREBox's volatility installation... ... set up a virtual env to avoid the conflict (see installation instructions). * You have a virtual env for PyREBox's python dependencies, and you forgot to activate it! ... you know what to do!\n""") return False
def windows_insert_module_internal(p_pid, p_pgd, base, size, fullname, basename, checksum, update_symbols): from utils import get_addr_space from vmi import modules from vmi import symbols from vmi import Module from api_internal import dispatch_module_load_callback from api_internal import dispatch_module_remove_callback import pefile import api global filesystem global symbol_cache_must_be_saved if fullname.startswith("\\??\\"): fullname = fullname[4:] if fullname.upper().startswith("C:\\"): fullname = fullname[3:] if fullname.upper().startswith("\\SYSTEMROOT"): fullname = "\WINDOWS" + fullname[11:] fullname = fullname.replace("\\", "/") if fullname[-4:].upper() == ".SYS" and not "/" in fullname: fullname = "/WINDOWS/system32/DRIVERS/" + fullname fullname = fullname.lower() mod = Module(base, size, p_pid, p_pgd, checksum, basename, fullname) # First, we try to get the symbols from the cache if fullname != "" and fullname in symbols.keys(): mod.set_symbols(symbols[fullname]) # If we are updating symbols (a simple module retrieval would # not require symbol extraction), and we don't have any # symbols on the cache: elif fullname != "" and update_symbols: unnamed_function_counter = 0 syms = {} # Here, fetch the file using the sleuthkit, and use # PE file to process it # First select the file system if not selected already if filesystem is None: for fs in api.get_filesystems(): file_list = api.open_guest_path(fs["index"], "") if isinstance(file_list, list) and len(file_list) > 0: if "windows" in [f.lower() for f in file_list]: filesystem = fs if filesystem is not None: # Try to read the file f = None try: f = api.open_guest_path(filesystem["index"], fullname) except Exception as e: pp_error("%s - %s\n" % (str(e), fullname)) if f is not None: data = f.read() pe = pefile.PE(data=data) if hasattr(pe, "DIRECTORY_ENTRY_EXPORT"): for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols: if exp.name is not None: syms[exp.name] = exp.address else: syms["unnamed_funcion_%d" % unnamed_function_counter] = exp.address unnamed_function_counter += 1 # If we managed to parse the export table, update the symbols # except if it is empty if fullname not in symbols.keys(): symbols[fullname] = syms # Even if it is empty, the module symbols are set # to an empty list, and thus are 'resolved'. # Anyway, in future updates, they could be resolved, # as we allow this in the first condition. mod.set_symbols(symbols[fullname]) symbol_cache_must_be_saved = True else: symbols[fullname] = {} mod.set_symbols(symbols[fullname]) #Module load/del notification if base in modules[(p_pid, p_pgd)]: if modules[(p_pid, p_pgd)][base].get_size() != size or \ modules[(p_pid, p_pgd)][base].get_checksum() != checksum or \ modules[(p_pid, p_pgd)][base].get_name() != basename or \ modules[(p_pid, p_pgd)][base].get_fullname() != fullname: # Notify of module deletion and module load dispatch_module_remove_callback( p_pid, p_pgd, base, modules[(p_pid, p_pgd)][base].get_size(), modules[(p_pid, p_pgd)][base].get_name(), modules[(p_pid, p_pgd)][base].get_fullname()) del modules[(p_pid, p_pgd)][base] modules[(p_pid, p_pgd)][base] = mod dispatch_module_load_callback(p_pid, p_pgd, base, size, basename, fullname) # If we updated the symbols and have a bigger list now, dont substitute the module # but update its symbols instead elif len(mod.get_symbols()) > len( modules[(p_pid, p_pgd)][base].get_symbols()): modules[(p_pid, p_pgd)][base].set_symbols(mod.get_symbols()) else: # Just notify of module load modules[(p_pid, p_pgd)][base] = mod dispatch_module_load_callback(p_pid, p_pgd, base, size, basename, fullname) # Mark the module as present modules[(p_pid, p_pgd)][base].set_present()
def linux_insert_kernel_module(module, base, size, basename, fullname, update_symbols=False): from vmi import add_module from vmi import has_module from vmi import get_module from vmi import has_symbols from vmi import get_symbols from vmi import add_symbols from vmi import Module from api_internal import dispatch_module_load_callback from api_internal import dispatch_module_remove_callback # Create module, use 0 as checksum as it is irrelevant here mod = Module(base, size, 0, 0, 0, basename, fullname) #Module load/del notification if has_module(0, 0, base): ex_mod = get_module(0, 0, base) if ex_mod.get_size() != size or \ ex_mod.get_checksum() != checksum or \ ex_mod.get_name() != basename or \ ex_mod.get_fullname() != fullname: # Notify of module deletion and module load dispatch_module_remove_callback(0, 0, base, ex_mod.get_size(), ex_mod.get_name(), ex_mod.get_fullname()) dispatch_module_load_callback(0, 0, base, size, basename, fullname) add_module(0, 0, base, mod) else: # Just notify of module load dispatch_module_load_callback(0, 0, base, size, basename, fullname) add_module(0, 0, base, mod) # Mark the module as present get_module(0, 0, base).set_present() if update_symbols: if not has_symbols(fullname): syms = {} try: ''' pp_debug("Processing symbols for module %s\n" % basename) ''' for sym_name, sym_offset in module.get_symbols(): if sym_name in syms: if syms[sym_name] != sym_offset: # There are cases in which the same import is present twice, such as in this case: # nm /lib/x86_64-linux-gnu/libpthread-2.24.so | grep "pthread_getaffinity_np" # 00000000000113f0 T pthread_getaffinity_np@GLIBC_2.3.3 # 00000000000113a0 T # pthread_getaffinity_np@@GLIBC_2.3.4 sym_name = sym_name + "_" while sym_name in syms and syms[ sym_name] != sym_offset: sym_name = sym_name + "_" if sym_name not in syms: syms[sym_name] = sym_offset else: syms[sym_name] = sym_offset add_symbols(fullname, syms) except Exception as e: # Probably could not fetch the symbols for this module pp_error("%s" % str(e)) pass mod.set_symbols(get_symbols(fullname)) return None
def ntmapviewofsection(params, cm, proc, update_vads, long_size): import volatility.obj as obj import volatility.win32.tasks as tasks import volatility.plugins.overlays.windows.windows as windows from core import Section from utils import get_addr_space import api from api import CallbackManager global interproc_data global interproc_config TARGET_LONG_SIZE = api.get_os_bits() / 8 cpu_index = params["cpu_index"] cpu = params["cpu"] # IN HANDLE SectionHandle, # IN HANDLE ProcessHandle, # IN OUT PVOID *BaseAddress OPTIONAL, # IN ULONG ZeroBits OPTIONAL, # IN ULONG CommitSize, # IN OUT PLARGE_INTEGER SectionOffset OPTIONAL, # IN OUT PULONG ViewSize, # IN InheritDisposition, # IN ULONG AllocationType OPTIONAL, # IN ULONG Protect pgd = api.get_running_process(cpu_index) # Read the parameters ret_addr, section_handle, proc_handle, base_p, arg_3, arg_4, offset_p, size_p = read_parameters( cpu, 7, long_size) # Load volatility address space addr_space = get_addr_space(pgd) class _SECTION_OBJECT(obj.CType, windows.ExecutiveObjectMixin): def is_valid(self): return obj.CType.is_valid(self) addr_space.profile.object_classes.update( {'_SECTION_OBJECT': _SECTION_OBJECT}) # Get list of processes, and filter out by the process that triggered the # call (current process id) eprocs = [ t for t in tasks.pslist(addr_space) if t.UniqueProcessId == proc.get_pid() ] # Initialize proc_obj, that will point to the object of the referenced # process, and section_obj, idem proc_obj = None section_obj = None mapping_proc = None if (TARGET_LONG_SIZE == 4 and proc_handle == 0xffffffff) or ( TARGET_LONG_SIZE == 8 and proc_handle == 0xffffffffffffffff): mapping_proc = proc # Search handle table for the caller process for task in eprocs: if task.UniqueProcessId == proc.get_pid( ) and task.ObjectTable.HandleTableList: for handle in task.ObjectTable.handles(): if handle.is_valid(): if not mapping_proc and not proc_obj and \ handle.HandleValue == proc_handle and \ handle.get_object_type() == "Process": proc_obj = handle.dereference_as("_EPROCESS") elif handle.HandleValue == section_handle and handle.get_object_type( ) == "Section": # We dereference the object as _SECTION_OBJECT, although it is not a _SECTION_OBJECT but a # _SECTION, that is not present in the volatility overlay: # http://forum.sysinternals.com/section-object_topic24975.html # For a better reference see the comments on the Section class # in mw_monitor_classes.py section_obj = handle.dereference_as("_SECTION_OBJECT") if (proc_obj or mapping_proc) and section_obj: break break # proc_obj represents the process over which the section is mapped # section_object represents the section being mapped. if (proc_obj is not None or mapping_proc is not None) and section_obj is not None: mapped_sec = None if mapping_proc is None: mapping_proc = interproc_data.get_process_by_pid( int(proc_obj.UniqueProcessId)) if mapping_proc is None: pp_error("[!] The mapping process is not being monitored," + " a handle was obtained with an API different from " + "OpenProcess or CreateProcess\n") return mapped_sec = interproc_data.get_section_by_offset( section_obj.obj_offset) # If the section was not in our list, we create an entry if mapped_sec is None: mapped_sec = Section(pgd, section_obj) interproc_data.add_section(mapped_sec) # Record the actual map once we return back from the call and we can # dereference output parameters callback_name = cm.generate_callback_name("mapviewofsection_ret") # Arguments to callback: the callback name, so that it can unset it, # the process handle variable, and the section handle callback_function = functools.partial(ntmapviewofsection_ret, cm=cm, callback_name=callback_name, mapping_proc=mapping_proc, mapped_sec=mapped_sec, base_p=base_p, size_p=size_p, offset_p=offset_p, proc=proc, update_vads=update_vads, long_size=long_size) cm.add_callback(CallbackManager.INSN_BEGIN_CB, callback_function, name=callback_name, addr=ret_addr, pgd=pgd)
def linux_insert_kernel_module(module, base, size, basename, fullname, update_symbols=False): from vmi import modules from vmi import symbols from vmi import Module from api_internal import dispatch_module_load_callback from api_internal import dispatch_module_remove_callback # Create module, use 0 as checksum as it is irrelevant here mod = Module(base, size, 0, 0, 0, basename, fullname) # Add an entry in the module list, if necessary if (0, 0) not in modules: modules[(0, 0)] = {} #Module load/del notification if base in modules[(0, 0)]: if modules[(0, 0)][base].get_size() != size or \ modules[(0, 0)][base].get_checksum() != checksum or \ modules[(0, 0)][base].get_name() != basename or \ modules[(0, 0)][base].get_fullname() != fullname: # Notify of module deletion and module load dispatch_module_remove_callback(0, 0, base, modules[(0, 0)][base].get_size(), modules[(0, 0)][base].get_name(), modules[(0, 0)][base].get_fullname()) del modules[(0, 0)][base] dispatch_module_load_callback(0, 0, base, size, basename, fullname) modules[(0, 0)][base] = mod else: # Just notify of module load dispatch_module_load_callback(0, 0, base, size, basename, fullname) modules[(0, 0)][base] = mod # Mark the module as present modules[(0, 0)][base].set_present() if update_symbols: # Use 0 as a checksum, here we should not have name collision checksum = 0 if (checksum, fullname) not in symbols: symbols[(checksum, fullname)] = {} syms = symbols[(checksum, fullname)] try: ''' pp_debug("Processing symbols for module %s\n" % basename) ''' for sym_name, sym_offset in module.get_symbols(): if sym_name in syms: if syms[sym_name] != sym_offset: # There are cases in which the same import is present twice, such as in this case: # nm /lib/x86_64-linux-gnu/libpthread-2.24.so | grep "pthread_getaffinity_np" # 00000000000113f0 T pthread_getaffinity_np@GLIBC_2.3.3 # 00000000000113a0 T # pthread_getaffinity_np@@GLIBC_2.3.4 sym_name = sym_name + "_" while sym_name in syms and syms[sym_name] != sym_offset: sym_name = sym_name + "_" if sym_name not in syms: syms[sym_name] = sym_offset else: syms[sym_name] = sym_offset except Exception as e: # Probably could not fetch the symbols for this module pp_error("%s" % str(e)) pass mod.set_symbols(symbols[(checksum, fullname)]) return None
def windows_insert_module_internal( p_pid, p_pgd, base, size, fullname, basename, checksum, update_symbols, do_stop = False): from utils import get_addr_space from vmi import add_symbols from vmi import get_symbols from vmi import has_symbols from vmi import Module from vmi import add_module from vmi import get_module from vmi import has_module from api_internal import dispatch_module_load_callback from api_internal import dispatch_module_remove_callback import pefile import api global filesystem global symbol_cache_must_be_saved if fullname.startswith("\\??\\"): fullname = fullname[4:] if fullname.upper().startswith("C:\\"): fullname = fullname[3:] if fullname.upper().startswith("\\SYSTEMROOT"): fullname = "\WINDOWS" + fullname[11:] fullname = fullname.replace("\\", "/") if fullname[-4:].upper() == ".SYS" and not "/" in fullname: fullname = "/WINDOWS/system32/DRIVERS/" + fullname fullname = fullname.lower() mod = Module(base, size, p_pid, p_pgd, checksum, basename, fullname) # First, we try to get the symbols from the cache if fullname != "" and has_symbols(fullname): mod.set_symbols(get_symbols(fullname)) # If we are updating symbols (a simple module retrieval would # not require symbol extraction), and we don't have any # symbols on the cache: elif fullname != "" and update_symbols: pp_debug("Symbols not found in cache, extracting from %s...\n" % fullname) unnamed_function_counter = 0 syms = {} # Here, fetch the file using the sleuthkit, and use # PE file to process it # First select the file system if not selected already if filesystem is None: for fs in api.get_filesystems(): file_list = api.open_guest_path(fs["index"], "") if isinstance(file_list, list) and len(file_list) > 0: if "windows" in [f.lower() for f in file_list]: filesystem = fs if filesystem is not None: # Try to read the file f = None try: f = api.open_guest_path(filesystem["index"], fullname) except Exception as e: pp_error("%s - %s\n" % (str(e), fullname)) if f is not None: data = f.read() pe = pefile.PE(data=data) if hasattr(pe, "DIRECTORY_ENTRY_EXPORT"): for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols: if exp.name is not None: syms[exp.name] = exp.address else: syms["unnamed_funcion_%d" % unnamed_function_counter] = exp.address unnamed_function_counter += 1 add_symbols(fullname, syms) mod.set_symbols(syms) # Even if it is empty, the module symbols are set # to an empty list, and thus are 'resolved'. # Anyway, in future updates, they could be resolved, # as we allow this in the first condition. symbol_cache_must_be_saved = True #Module load/del notification if has_module(p_pid, p_pgd, base): ex_mod = get_module(p_pid, p_pgd, base) # Module replacement, only if it is a different module, and also # take into consideration wow64 redirection. Never substitute the # wow64 version by the system32 version of the same dll if (ex_mod.get_fullname().lower() != fullname.lower()) and not ((ex_mod.get_name().lower() == basename.lower()) and ("windows/syswow64".lower() in ex_mod.get_fullname().lower()) and ("windows/system32" in fullname.lower())): # Notify of module deletion and module load dispatch_module_remove_callback(p_pid, p_pgd, base, ex_mod.get_size(), ex_mod.get_name(), ex_mod.get_fullname()) add_module(p_pid, p_pgd, base, mod) mod.set_present() dispatch_module_load_callback(p_pid, p_pgd, base, size, basename, fullname) # If we updated the symbols and have a bigger list now, dont substitute the module # but update its symbols instead elif len(mod.get_symbols()) > len(ex_mod.get_symbols()): ex_mod.set_symbols(mod.get_symbols()) # In any case, mark as present ex_mod.set_present() else: # Just notify of module load add_module(p_pid, p_pgd, base, mod) # Mark the module as present mod.set_present() dispatch_module_load_callback(p_pid, p_pgd, base, size, basename, fullname)
def windows_update_modules(pgd, update_symbols=False): ''' Use volatility to get the modules and symbols for a given process, and update the cache accordingly ''' global last_kdbg global symbol_cache_must_be_saved import api from utils import get_addr_space from vmi import set_modules_non_present from vmi import clean_non_present_modules from vmi import add_module from vmi import get_module from vmi import has_module if pgd != 0: addr_space = get_addr_space(pgd) else: addr_space = get_addr_space() if addr_space is None: pp_error("Volatility address space not loaded\n") return [] # Get EPROC directly from its offset procs = api.get_process_list() inserted_bases = [] # Parse/update kernel modules if pgd 0 is requested: if pgd == 0 and last_kdbg is not None: kdbg = obj.Object( "_KDDEBUGGER_DATA64", offset=last_kdbg, vm=addr_space) # List entries are returned, so that # we can monitor memory writes to these # entries and detect when a module is added # or removed list_entry_size = None list_entry_regions = [] # Add the initial list pointer as a list entry list_entry_regions.append((kdbg.obj_offset, kdbg.PsLoadedModuleList.obj_offset, kdbg.PsLoadedModuleList.size())) # Mark all modules as non-present set_modules_non_present(0, 0) for module in kdbg.modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(0, 0, module, update_symbols) if list_entry_size is None: list_entry_size = module.InLoadOrderLinks.size() list_entry_regions.append((module.obj_offset, module.InLoadOrderLinks.obj_offset, list_entry_size * 3)) # Remove all the modules that are not marked as present clean_non_present_modules(0, 0) if symbol_cache_must_be_saved: from vmi import save_symbols_to_cache_file save_symbols_to_cache_file() symbol_cache_must_be_saved = False return list_entry_regions for proc in procs: p_pid = proc["pid"] p_pgd = proc["pgd"] # p_name = proc["name"] p_kernel_addr = proc["kaddr"] if p_pgd == pgd: task = obj.Object("_EPROCESS", offset=p_kernel_addr, vm=addr_space) # List entries are returned, so that # we can monitor memory writes to these # entries and detect when a module is added # or removed list_entry_size = None list_entry_regions = [] scan_peb = True if task.Peb is None or not task.Peb.is_valid(): if isinstance(task.Peb.obj_offset, int): list_entry_regions.append((task.obj_offset, task.Peb.obj_offset, task.Peb.size())) scan_peb = False if task.Peb.Ldr is None or not task.Peb.Ldr.is_valid(): list_entry_regions.append((task.Peb.v(), task.Peb.Ldr.obj_offset, task.Peb.Ldr.size())) scan_peb = False if scan_peb: # Add the initial list pointer as a list entry if we already have a PEB and LDR list_entry_regions.append((task.Peb.Ldr.dereference().obj_offset, task.Peb.Ldr.InLoadOrderModuleList.obj_offset, task.Peb.Ldr.InLoadOrderModuleList.size() * 3)) # Note: we do not erase the modules we have information for from the list, # unless we have a different module loaded at the same base address. # In this way, if at some point the module gets unmapped from the PEB list # but it is still in memory, we do not loose the information. # Mark all modules as non-present set_modules_non_present(p_pid, p_pgd) for module in task.get_init_modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(p_pid, p_pgd, module, update_symbols) if list_entry_size is None: list_entry_size = module.InLoadOrderLinks.size() list_entry_regions.append((module.obj_offset, module.InLoadOrderLinks.obj_offset, list_entry_size * 3)) for module in task.get_mem_modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(p_pid, p_pgd, module, update_symbols) if list_entry_size is None: list_entry_size = module.InLoadOrderLinks.size() list_entry_regions.append((module.obj_offset, module.InLoadOrderLinks.obj_offset, list_entry_size * 3)) for module in task.get_load_modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(p_pid, p_pgd, module, update_symbols) if list_entry_size is None: list_entry_size = module.InLoadOrderLinks.size() list_entry_regions.append((module.obj_offset, module.InLoadOrderLinks.obj_offset, list_entry_size * 3)) # Now, if we are a 64bit system and the process is a Wow64 process, traverse VAD # to find the 32 bit modules if api.get_os_bits() == 64 and task.IsWow64: for vad in task.VadRoot.traverse(): if vad is not None: if hasattr(vad, "FileObject"): f = vad.FileObject if f is not None: fname = f.file_name_with_device() if fname and "Windows\\SysWOW64".lower() in fname.lower() and ".dll" == fname[-4:].lower(): fname_starts = fname.find("Windows\\SysWOW64") fname = fname[fname_starts:] windows_insert_module_internal(p_pid, p_pgd, vad.Start, vad.End - vad.Start, fname, fname.split("\\")[-1], "", update_symbols, do_stop = True) # Remove all the modules that are not marked as present clean_non_present_modules(p_pid, p_pgd) if symbol_cache_must_be_saved: from vmi import save_symbols_to_cache_file save_symbols_to_cache_file() symbol_cache_must_be_saved = False return list_entry_regions return None
def windows_update_modules(pgd, update_symbols=False): ''' Use volatility to get the modules and symbols for a given process, and update the cache accordingly ''' global last_kdbg import api from utils import get_addr_space from vmi import modules from vmi import set_modules_non_present from vmi import clean_non_present_modules if pgd != 0: addr_space = get_addr_space(pgd) else: addr_space = get_addr_space() if addr_space is None: pp_error("Volatility address space not loaded\n") return [] # Get EPROC directly from its offset procs = api.get_process_list() inserted_bases = [] # Parse/update kernel modules if pgd 0 is requested: if pgd == 0 and last_kdbg is not None: if (0, 0) not in modules: modules[(0, 0)] = {} kdbg = obj.Object("_KDDEBUGGER_DATA64", offset=last_kdbg, vm=addr_space) # List entries are returned, so that # we can monitor memory writes to these # entries and detect when a module is added # or removed list_entry_size = None list_entry_regions = [] # Add the initial list pointer as a list entry list_entry_regions.append( (kdbg.obj_offset, kdbg.PsLoadedModuleList.obj_offset, kdbg.PsLoadedModuleList.size())) # Mark all modules as non-present set_modules_non_present(0, 0) for module in kdbg.modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(0, 0, module, update_symbols) if list_entry_size is None: list_entry_size = module.InLoadOrderLinks.size() list_entry_regions.append( (module.obj_offset, module.InLoadOrderLinks.obj_offset, list_entry_size * 3)) # Remove all the modules that are not marked as present clean_non_present_modules(0, 0) return list_entry_regions for proc in procs: p_pid = proc["pid"] p_pgd = proc["pgd"] # p_name = proc["name"] p_kernel_addr = proc["kaddr"] if p_pgd == pgd: task = obj.Object("_EPROCESS", offset=p_kernel_addr, vm=addr_space) # List entries are returned, so that # we can monitor memory writes to these # entries and detect when a module is added # or removed list_entry_size = None list_entry_regions = [] if task.Peb is None or not task.Peb.is_valid(): if isinstance(task.Peb.obj_offset, int): list_entry_regions.append( (task.obj_offset, task.Peb.obj_offset, task.Peb.size())) return list_entry_regions if task.Peb.Ldr is None or not task.Peb.Ldr.is_valid(): list_entry_regions.append( (task.Peb.v(), task.Peb.Ldr.obj_offset, task.Peb.Ldr.size())) return list_entry_regions # Add the initial list pointer as a list entry if we already have a PEB and LDR list_entry_regions.append( (task.Peb.Ldr.dereference().obj_offset, task.Peb.Ldr.InLoadOrderModuleList.obj_offset, task.Peb.Ldr.InLoadOrderModuleList.size() * 3)) # Note: we do not erase the modules we have information for from the list, # unless we have a different module loaded at the same base address. # In this way, if at some point the module gets unmapped from the PEB list # but it is still in memory, we do not loose the information. if (p_pid, p_pgd) not in modules: modules[(p_pid, p_pgd)] = {} # Mark all modules as non-present set_modules_non_present(p_pid, p_pgd) for module in task.get_init_modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(p_pid, p_pgd, module, update_symbols) if list_entry_size is None: list_entry_size = module.InLoadOrderLinks.size() list_entry_regions.append( (module.obj_offset, module.InLoadOrderLinks.obj_offset, list_entry_size * 3)) for module in task.get_mem_modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(p_pid, p_pgd, module, update_symbols) if list_entry_size is None: list_entry_size = module.InLoadOrderLinks.size() list_entry_regions.append( (module.obj_offset, module.InLoadOrderLinks.obj_offset, list_entry_size * 3)) for module in task.get_load_modules(): if module.DllBase not in inserted_bases: inserted_bases.append(module.DllBase) windows_insert_module(p_pid, p_pgd, module, update_symbols) if list_entry_size is None: list_entry_size = module.InLoadOrderLinks.size() list_entry_regions.append( (module.obj_offset, module.InLoadOrderLinks.obj_offset, list_entry_size * 3)) # Remove all the modules that are not marked as present clean_non_present_modules(p_pid, p_pgd) return list_entry_regions return None
def __init__(self, cb, printer): """ Create a new instance of the GuestAgentPlugin. :arg cb: The callback manager. :arg printer: The printer where logs should go to. """ from utils import ConfigurationManager as conf_m from utils import pp_error self.__cb = None self.__printer = None # status self.__status = None # File descriptors self.__file_descriptor_counter = 0 self.__file_descriptors = {} # Get the file name for the guest_agent, as well as the buffer offset # and max size self.__agent_config_file = None self.__agent_filename = None self.__agent_buffer_offset = None self.__agent_buffer_address = None self.__agent_buffer_size = None # Agent pgd self.__agent_pgd = None # Commands, and command information self.__file_to_execute = {"path": "", "args": "", "env": ""} self.__file_to_copy = {"source": "", "destiny": ""} self.__commands = [ {"command": GuestAgentPlugin.__CMD_WAIT, "meta": {}}] # Initialize guest agent configuration try: if conf_m.agent_filename is None: conf_m.agent_filename = conf_m.config.get('AGENT', 'name') self.__agent_filename = conf_m.agent_filename # Read agent config file if necessary if self.__agent_config_file is None: if not os.path.isfile(conf_m.config.get('AGENT', 'conf')): pp_error( "[!] Could not initialize agent, offset config file missing!\n") return self.__agent_config_file = ConfigParser.RawConfigParser() self.__agent_config_file.read( conf_m.config.get('AGENT', 'conf')) if conf_m.agent_buffer_offset is None: conf_m.agent_buffer_offset = int( self.__agent_config_file.get('BUFFER', 'BufferOffset')) self.__agent_buffer_offset = conf_m.agent_buffer_offset if conf_m.agent_buffer_size is None: conf_m.agent_buffer_size = int( self.__agent_config_file.get('BUFFER', 'BufferSize')) self.__agent_buffer_size = conf_m.agent_buffer_size except ConfigParser.NoSectionError: pp_error( "[*] No agent configuration provided, guest agent will not work if not configured\n") except ConfigParser.NoOptionError: pp_error( "[*] No agent name provided, guest agent will not work if not configured properly\n") # Now, initialize plugin self.__cb = cb self.__printer = printer self.__cb.add_callback( api.CallbackManager.CREATEPROC_CB, self.__new_process_callback, name="host_file_plugin_process_create") # update the status self.__status = GuestAgentPlugin.__AGENT_INITIALIZED
def linux_insert_kernel_module(module, base, size, basename, fullname, update_symbols=False): from vmi import modules from vmi import symbols from vmi import Module from api_internal import dispatch_module_load_callback from api_internal import dispatch_module_remove_callback # Create module, use 0 as checksum as it is irrelevant here mod = Module(base, size, 0, 0, 0, basename, fullname) # Add an entry in the module list, if necessary if (0, 0) not in modules: modules[(0, 0)] = {} #Module load/del notification if base in modules[(0, 0)]: if modules[(0, 0)][base].get_size() != size or \ modules[(0, 0)][base].get_checksum() != checksum or \ modules[(0, 0)][base].get_name() != basename or \ modules[(0, 0)][base].get_fullname() != fullname: # Notify of module deletion and module load dispatch_module_remove_callback( 0, 0, base, modules[(0, 0)][base].get_size(), modules[(0, 0)][base].get_name(), modules[(0, 0)][base].get_fullname()) del modules[(0, 0)][base] dispatch_module_load_callback(0, 0, base, size, basename, fullname) modules[(0, 0)][base] = mod else: # Just notify of module load dispatch_module_load_callback(0, 0, base, size, basename, fullname) modules[(0, 0)][base] = mod # Mark the module as present modules[(0, 0)][base].set_present() if update_symbols: # Use 0 as a checksum, here we should not have name collision checksum = 0 if (checksum, fullname) not in symbols: symbols[(checksum, fullname)] = {} syms = symbols[(checksum, fullname)] try: ''' pp_debug("Processing symbols for module %s\n" % basename) ''' for sym_name, sym_offset in module.get_symbols(): if sym_name in syms: if syms[sym_name] != sym_offset: # There are cases in which the same import is present twice, such as in this case: # nm /lib/x86_64-linux-gnu/libpthread-2.24.so | grep "pthread_getaffinity_np" # 00000000000113f0 T pthread_getaffinity_np@GLIBC_2.3.3 # 00000000000113a0 T # pthread_getaffinity_np@@GLIBC_2.3.4 sym_name = sym_name + "_" while sym_name in syms and syms[ sym_name] != sym_offset: sym_name = sym_name + "_" if sym_name not in syms: syms[sym_name] = sym_offset else: syms[sym_name] = sym_offset except Exception as e: # Probably could not fetch the symbols for this module pp_error("%s" % str(e)) pass mod.set_symbols(symbols[(checksum, fullname)]) return None
def ntopenprocessret(params, cm, callback_name, proc_hdl_p, proc, update_vads, long_size): import volatility.win32.tasks as tasks from interproc import interproc_start_monitoring_process from core import Process from api import get_running_process from utils import get_addr_space import api global interproc_data global interproc_config TARGET_LONG_SIZE = api.get_os_bits() / 8 cpu_index = params["cpu_index"] cpu = params["cpu"] pgd = get_running_process(cpu_index) # First, remove callback cm.rm_callback(callback_name) # Do not continue if EAX/RAX returns and invalid return code. if read_return_parameter(cpu) != 0: return # Load volatility address space addr_space = get_addr_space(pgd) # Get list of processes, and filter out by the process that triggered the # call (current process id) eprocs = [ t for t in tasks.pslist(addr_space) if t.UniqueProcessId == proc.get_pid() ] # Initialize proc_obj, that will point to the eprocess of the new created # process proc_obj = None # Dereference the output argument containing the hdl of the newly created # process proc_hdl = dereference_target_long(proc_hdl_p, pgd, long_size) # Search handle table for the new created process for task in eprocs: if task.UniqueProcessId == proc.get_pid( ) and task.ObjectTable.HandleTableList: for handle in task.ObjectTable.handles(): if handle.is_valid( ) and handle.HandleValue == proc_hdl and handle.get_object_type( ) == "Process": proc_obj = handle.dereference_as("_EPROCESS") break break if proc_obj is not None: if interproc_config.interproc_text_log and interproc_config.interproc_text_log_handle is not None: f = interproc_config.interproc_text_log_handle f.write("[PID: %08x] NtOpenProcess: %s - PID: %x - CR3: %x\n" % (proc.get_pid(), str( proc_obj.ImageFileName), int(proc_obj.UniqueProcessId), int(proc_obj.Pcb.DirectoryTableBase.v()))) # Check if we are already monitoring the process if interproc_data.get_process_by_pid(int( proc_obj.UniqueProcessId)) is not None: return params["pid"] = int(proc_obj.UniqueProcessId) params["pgd"] = int(proc_obj.Pcb.DirectoryTableBase.v()) params["name"] = str(proc_obj.ImageFileName) interproc_start_monitoring_process(params) else: if TARGET_LONG_SIZE == 4: pp_error( "Error while trying to retrieve EPROCESS for handle %x, PID %x, EAX: %x\n" % (proc_hdl, proc.get_pid(), cpu.EAX)) elif TARGET_LONG_SIZE == 8: pp_error( "Error while trying to retrieve EPROCESS for handle %x, PID %x, EAX: %x\n" % (proc_hdl, proc.get_pid(), cpu.RAX)) if update_vads: proc.update_vads() return
def __init__(self, cb, printer): """ Create a new instance of the GuestAgentPlugin. :arg cb: The callback manager. :arg printer: The printer where logs should go to. """ from utils import ConfigurationManager as conf_m from utils import pp_error self.__cb = None self.__printer = None # status self.__status = None # File descriptors self.__file_descriptor_counter = 0 self.__file_descriptors = {} # Get the file name for the guest_agent, as well as the buffer offset # and max size self.__agent_config_file = None self.__agent_filename = None self.__agent_buffer_offset = None self.__agent_buffer_address = None self.__agent_buffer_size = None # Agent pgd self.__agent_pgd = None # Commands, and command information self.__file_to_execute = {"path": "", "args": "", "env": ""} self.__file_to_copy = {"source": "", "destiny": ""} self.__commands = [ {"command": GuestAgentPlugin.__CMD_WAIT, "meta": {}}] # Initialize guest agent configuration try: if conf_m.agent_filename is None: conf_m.agent_filename = conf_m.config.get('AGENT', 'name') self.__agent_filename = conf_m.agent_filename # Read agent config file if necessary if self.__agent_config_file is None: if not os.path.isfile(conf_m.config.get('AGENT', 'conf')): pp_error( "[!] Could not initialize agent, offset config file missing!\n") return self.__agent_config_file = ConfigParser.RawConfigParser() self.__agent_config_file.read( conf_m.config.get('AGENT', 'conf')) if conf_m.agent_buffer_offset is None: conf_m.agent_buffer_offset = int( self.__agent_config_file.get('BUFFER', 'BufferOffset')) self.__agent_buffer_offset = conf_m.agent_buffer_offset if conf_m.agent_buffer_size is None: conf_m.agent_buffer_size = int( self.__agent_config_file.get('BUFFER', 'BufferSize')) self.__agent_buffer_size = conf_m.agent_buffer_size except ConfigParser.NoSectionError: pp_error( "[*] No agent configuration provided, guest agent will not work if not configured\n") except ConfigParser.NoOptionError: pp_error( "[*] No agent name provided, guest agent will not work if not configured properly\n") # Now, initialize plugin self.__cb = cb self.__printer = printer self.__cb.add_callback( api.CallbackManager.CREATEPROC_CB, self.__new_process_callback, name="host_file_plugin_process_create") # update the status self.__status = GuestAgentPlugin.__AGENT_INITIALIZED