def prepare(self): """Prepare env for analysis.""" # Create the folders used for storing the results. create_folders() # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") if self.config.get("clock", None): # Set virtual machine clock. clock = datetime.datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. os.system("date -s \"{0}\"".format(clock.strftime("%y-%m-%d %H:%M:%S"))) # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(tempfile.gettempdir(), self.config.file_name) # If it's a URL, well.. we store the URL. else: self.target = self.config.target
class STAP(Auxiliary): """system-wide syscall trace with stap.""" priority = -10 # low prio to wrap tightly around the analysis def __init__(self): self.config = Config(cfg="analysis.conf") self.proc = None def start(self): # helper function locating the stap module def has_stap(p): only_stap = [fn for fn in os.listdir(p) if fn.startswith("stap_") and fn.endswith(".ko")] if only_stap: return os.path.join(p, only_stap[0]) return False path_cfg = self.config.get("analyzer_stap_path", None) if path_cfg and os.path.exists(path_cfg): path = path_cfg elif os.path.exists("/root/.cuckoo") and has_stap("/root/.cuckoo"): path = has_stap("/root/.cuckoo") else: log.warning("Could not find STAP LKM, aborting systemtap analysis.") return False stap_start = time.time() self.proc = subprocess.Popen([ "staprun", "-vv", "-x", str(os.getpid()), "-o", "stap.log", path, ], stderr=subprocess.PIPE) while "systemtap_module_init() returned 0" not in self.proc.stderr.readline(): pass stap_stop = time.time() log.info("STAP aux module startup took %.2f seconds" % (stap_stop - stap_start)) return True @staticmethod def _upload_file(local, remote): if os.path.exists(local): nf = NetlogFile(remote) with open(local, "rb") as f: for chunk in f: nf.sock.sendall(chunk) # dirty direct send, no reconnecting nf.close() def stop(self): try: r = self.proc.poll() log.debug("stap subprocess retval %r", r) self.proc.kill() except Exception as e: log.warning("Exception killing stap: %s", e) self._upload_file("stap.log", "logs/all.stap")
def prepare(self): """Prepare env for analysis.""" global DEFAULT_DLL global SERVICES_PID # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # Create the folders used for storing the results. create_folders() add_protected_path(os.getcwd()) add_protected_path(PATHS["root"]) # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # NOTE: Windows system has only localized commands with date format # following localization settings, so these commands for english date # format cannot work in other localizations. # In addition DATE and TIME commands are blocking if an incorrect # syntax is provided, so an echo trick is used to bypass the input # request and not block analysis. thedate = clock.strftime("%m-%d-%y") thetime = clock.strftime("%H:%M:%S") os.system("echo:|date {0}".format(thedate)) os.system("echo:|time {0}".format(thetime)) log.info("Date set to: {0}, time set to: {1}".format(thedate, thetime)) # Set the default DLL to be used by the PipeHandler. DEFAULT_DLL = self.config.get_options().get("dll") # get PID for services.exe for monitoring services SERVICES_PID = self.pid_from_process_name("services.exe") # Initialize and start the Pipe Servers. This is going to be used for # communicating with the injected and monitored processes. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x] = PipeServer(self.config.get_options()) self.pipes[x].daemon = True self.pipes[x].start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target
class STAP(Auxiliary): """system-wide syscall trace with stap.""" priority = -10 # low prio to wrap tightly around the analysis def __init__(self): self.config = Config(cfg="analysis.conf") self.fallback_strace = False def start(self): # helper function locating the stap module def has_stap(p): only_stap = [fn for fn in os.listdir(p) if fn.startswith("stap_") and fn.endswith(".ko")] if only_stap: return os.path.join(p, only_stap[0]) return False # highest priority: if the vm config specifies the path if self.config.get("analyzer_stap_path", None) and os.path.exists(self.config.get("analyzer_stap_path")): path = self.config.get("analyzer_lkm_path") # next: if a module was uploaded with the analyzer for our platform elif os.path.exists(platform.machine()) and has_stap(platform.machine()): path = has_stap(platform.machine()) # next: default path inside the machine elif os.path.exists("/root/.cuckoo") and has_stap("/root/.cuckoo"): path = has_stap("/root/.cuckoo") # next: generic module uploaded with the analyzer (single arch setup maybe?) elif has_stap("."): path = has_stap(".") else: # we can't find the stap module, fallback to strace log.warning("Could not find STAP LKM, falling back to strace.") return self.start_strace() stap_start = time.time() stderrfd = open("stap.stderr", "wb") self.proc = subprocess.Popen(["staprun", "-v", "-x", str(os.getpid()), "-o", "stap.log", path], stderr=stderrfd) # read from stderr until the tap script is compiled # while True: # if not self.proc.poll() is None: # break # line = self.proc.stderr.readline() # print "DBG LINE", line # if "Pass 5: starting run." in line: # break time.sleep(10) stap_stop = time.time() log.info("STAP aux module startup took %.2f seconds" % (stap_stop - stap_start)) return True def start_strace(self): try: os.mkdir("strace") except: pass # don't worry, it exists stderrfd = open("strace/strace.stderr", "wb") self.proc = subprocess.Popen(["strace", "-ff", "-o", "strace/straced", "-p", str(os.getpid())], stderr=stderrfd) self.fallback_strace = True return True def get_pids(self): if self.fallback_strace: return [self.proc.pid, ] return [] def stop(self): try: r = self.proc.poll() log.debug("stap subprocess retval %r", r) self.proc.kill() except Exception as e: log.warning("Exception killing stap: %s", e) if os.path.exists("stap.log"): # now upload the logfile nf = NetlogFile("logs/all.stap") fd = open("stap.log", "rb") for chunk in fd: nf.sock.sendall(chunk) # dirty direct send, no reconnecting fd.close() nf.close() # in case we fell back to strace if os.path.exists("strace"): for fn in os.listdir("strace"): # we don't need the logs from the analyzer python process itself if fn == "straced.%u" % os.getpid(): continue fp = os.path.join("strace", fn) # now upload the logfile nf = NetlogFile("logs/%s" % fn) fd = open(fp, "rb") for chunk in fd: nf.sock.sendall(chunk) # dirty direct send, no reconnecting fd.close() nf.close()
def prepare(self): """Prepare env for analysis.""" # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Pass the configuration through to the Process class. Process.set_config(self.config) # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # NOTE: Windows system has only localized commands with date format # following localization settings, so these commands for english date # format cannot work in other localizations. # In addition DATE and TIME commands are blocking if an incorrect # syntax is provided, so an echo trick is used to bypass the input # request and not block analysis. os.system("echo:|date {0}".format(clock.strftime("%m-%d-%y"))) os.system("echo:|time {0}".format(clock.strftime("%H:%M:%S"))) # Set the default DLL to be used for this analysis. self.default_dll = self.config.options.get("dll") # If a pipe name has not set, then generate a random one. if "pipe" in self.config.options: self.config.pipe = "\\\\.\\PIPE\\%s" % self.config.options["pipe"] else: self.config.pipe = "\\\\.\\PIPE\\%s" % random_string(16, 32) # Generate a random name for the logging pipe server. self.config.logpipe = "\\\\.\\PIPE\\%s" % random_string(16, 32) # Initialize and start the Command Handler pipe server. This is going # to be used for communicating with the monitored processes. self.command_pipe = PipeServer(PipeDispatcher, self.config.pipe, message=True, dispatcher=CommandPipeHandler(self)) self.command_pipe.daemon = True self.command_pipe.start() # Initialize and start the Log Pipe Server - the log pipe server will # open up a pipe that monitored processes will use to send logs to # before they head off to the host machine. destination = self.config.ip, self.config.port self.log_pipe_server = PipeServer(PipeForwarder, self.config.logpipe, destination=destination) self.log_pipe_server.daemon = True self.log_pipe_server.start() # We update the target according to its category. If it's a file, then # we store the target path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, self.config.file_name) # If it's a URL, well.. we store the URL. else: self.target = self.config.target
def inject(self, dll=None, interest=None, nosleepskip=False): """Cuckoo DLL injection. @param dll: Cuckoo DLL path. @param interest: path to file of interest, handed to cuckoomon config @param apc: APC use. """ if not self.pid: log.warning("No valid pid specified, injection aborted") return False thread_id = 0 if self.thread_id: thread_id = self.thread_id if not self.is_alive(): log.warning("The process with pid %s is not alive, " "injection aborted", self.pid) return False is_64bit = self.is_64bit() if not dll: if is_64bit: dll = CUCKOOMON64_NAME else: dll = CUCKOOMON32_NAME else: os.path.join("dll", dll) dll = os.path.join(os.getcwd(), dll) if not dll or not os.path.exists(dll): log.warning("No valid DLL specified to be injected in process " "with pid %d, injection aborted.", self.pid) return False config_path = "C:\\%s.ini" % self.pid with open(config_path, "w") as config: cfg = Config("analysis.conf") cfgoptions = cfg.get_options() # start the logserver for this monitored process self.logserver = LogServer(cfg.ip, cfg.port, self.logserver_path) firstproc = Process.first_process config.write("host-ip={0}\n".format(cfg.ip)) config.write("host-port={0}\n".format(cfg.port)) config.write("pipe={0}\n".format(PIPE)) config.write("logserver={0}\n".format(self.logserver_path)) config.write("results={0}\n".format(PATHS["root"])) config.write("analyzer={0}\n".format(os.getcwd())) config.write("first-process={0}\n".format("1" if firstproc else "0")) config.write("startup-time={0}\n".format(Process.startup_time)) config.write("file-of-interest={0}\n".format(interest)) config.write("shutdown-mutex={0}\n".format(SHUTDOWN_MUTEX)) config.write("terminate-event={0}{1}\n".format(TERMINATE_EVENT, self.pid)) if nosleepskip: config.write("force-sleepskip=0\n") if "norefer" not in cfgoptions: config.write("referrer={0}\n".format(get_referrer_url(interest))) simple_optnames = [ "force-sleepskip", "full-logs", "force-flush", "no-stealth", "buffer-max", "large-buffer-max", "serial", "sysvol_ctimelow", "sysvol_ctimehigh", "sys32_ctimelow", "sys32_ctimehigh", "debug", "disable_hook_content" ] for optname in simple_optnames: if optname in cfgoptions: config.write("{0}={1}\n".format(optname, cfgoptions[optname])) if firstproc: Process.first_process = False if thread_id or self.suspended: log.debug("Using QueueUserAPC injection.") else: log.debug("Using CreateRemoteThread injection.") orig_bin_name = "" bit_str = "" if is_64bit: orig_bin_name = LOADER64_NAME bit_str = "64-bit" else: orig_bin_name = LOADER32_NAME bit_str = "32-bit" bin_name = os.path.join(os.getcwd(), orig_bin_name) if os.path.exists(bin_name): ret = subprocess.call([bin_name, "inject", str(self.pid), str(thread_id), dll]) if ret != 0: if ret == 1: log.info("Injected into suspended %s process with pid %d", bit_str, self.pid) else: log.error("Unable to inject into %s process with pid %d, error: %d", bit_str, self.pid, ret) return False else: return True else: log.error("Please place the %s binary from cuckoomon into analyzer/windows/bin in order to analyze %s binaries.", os.path.basename(bin_name), bit_str) return False
def prepare(self): """Prepare env for analysis.""" global DEFAULT_DLL global SERVICES_PID # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # Create the folders used for storing the results. create_folders() add_protected_path(os.getcwd()) add_protected_path(PATHS["root"]) # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # NOTE: Windows system has only localized commands with date format # following localization settings, so these commands for english date # format cannot work in other localizations. # In addition DATE and TIME commands are blocking if an incorrect # syntax is provided, so an echo trick is used to bypass the input # request and not block analysis. os.system("echo:|date {0}".format(clock.strftime("%m-%d-%y"))) os.system("echo:|time {0}".format(clock.strftime("%H:%M:%S"))) # Set the default DLL to be used by the PipeHandler. DEFAULT_DLL = self.config.get_options().get("dll") # get PID for services.exe for monitoring services # tasklist sometimes fails under high-load (http://support.microsoft.com/kb/2732840) # We can retry a few times to hopefully work around failures retries = 4 while retries > 0: stdin, stdout, stderr = os.popen3( "tasklist /V /FI \"IMAGENAME eq services.exe\"") s = stdout.read() err = stderr.read() if 'services.exe' not in s: log.warning('tasklist failed with error "%s"' % (err)) else: # it worked break retries -= 1 if 'services.exe' not in s: # All attempts failed log.error('Unable to retreive services.exe PID') SERVICES_PID = None else: servidx = s.index("services.exe") servstr = s[servidx + 12:].strip() SERVICES_PID = int(servstr[:servstr.index(' ')], 10) log.debug('services.exe PID is %s' % (SERVICES_PID)) # Initialize and start the Pipe Servers. This is going to be used for # communicating with the injected and monitored processes. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x] = PipeServer() self.pipes[x].daemon = True self.pipes[x].start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target
def prepare(self): """Prepare env for analysis.""" global DEFAULT_DLL global SERVICES_PID global HIDE_PIDS # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # randomize cuckoomon DLL and loader executable names copy("dll\\cuckoomon.dll", CUCKOOMON32_NAME) copy("dll\\cuckoomon_x64.dll", CUCKOOMON64_NAME) copy("bin\\loader.exe", LOADER32_NAME) copy("bin\\loader_x64.exe", LOADER64_NAME) # Create the folders used for storing the results. create_folders() add_protected_path(os.getcwd()) add_protected_path(PATHS["root"]) # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # NOTE: Windows system has only localized commands with date format # following localization settings, so these commands for english date # format cannot work in other localizations. # In addition DATE and TIME commands are blocking if an incorrect # syntax is provided, so an echo trick is used to bypass the input # request and not block analysis. thedate = clock.strftime("%m-%d-%y") thetime = clock.strftime("%H:%M:%S") os.system("echo:|date {0}".format(thedate)) os.system("echo:|time {0}".format(thetime)) log.info("Date set to: {0}, time set to: {1}".format(thedate, thetime)) # Set the default DLL to be used by the PipeHandler. DEFAULT_DLL = self.config.get_options().get("dll") # get PID for services.exe for monitoring services svcpid = self.pids_from_process_name_list(["services.exe"]) if svcpid: SERVICES_PID = svcpid[0] protected_procname_list = [ "vmwareuser.exe", "vmwareservice.exe", "vboxservice.exe", "vboxtray.exe", "sandboxiedcomlaunch.exe", "sandboxierpcss.exe", "procmon.exe", "regmon.exe", "filemon.exe", "wireshark.exe", "netmon.exe", "prl_tools_service.exe", "prl_tools.exe", "prl_cc.exe", "sharedintapp.exe", "vmtoolsd.exe", "vmsrvc.exe", "python.exe", "perl.exe", ] HIDE_PIDS = set(self.pids_from_process_name_list(protected_procname_list)) # Initialize and start the Pipe Servers. This is going to be used for # communicating with the injected and monitored processes. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x] = PipeServer(self.config) self.pipes[x].daemon = True self.pipes[x].start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target
def __init__(self): self.config = Config(cfg="analysis.conf") self.pids_reported = set()
from lib.common.results import upload_to_host import subprocess import os import sys import shutil import StringIO from threading import Thread import logging import re from lib.core.config import Config import time log = logging.getLogger(__name__) new_file_name = None cfg = Config("analysis.conf") filesNeeded = [f.strip() for f in cfg.files_needed.split(',')] #filesNeeded = ["EMPState_Debug.out", "EMPState_Visited.bin", "EMPState_Evasive.bin", "EMPState_Continue.txt","EMP_Debug.out", "EMP_Visited.bin", "EMP_Evasive.bin", "EMP_Continue.txt"] def getNewFileName(path): global new_file_name with open(path, 'r') as f: this_line = f.readline() if not this_line == None: tokens = this_line.split("\\") log.info(tokens) new_file_name = tokens[-1] log.info(new_file_name)
class LKM(Auxiliary): """helper LKM for sleep skipping etc""" def __init__(self): self.config = Config(cfg="analysis.conf") self.pids_reported = set() def start(self): # highest priority: if the vm config specifies the path if self.config.get("analyzer_lkm_path", None) and os.path.exists( self.config.get("analyzer_lkm_path")): path = self.config.get("analyzer_lkm_path") # next: if the analyzer was uploaded with a module for our platform elif os.path.exists(os.path.join(platform.machine(), "probelkm.ko")): path = os.path.join(platform.machine(), "probelkm.ko") # next: default path inside the machine elif os.path.exists("/root/.cuckoo/probelkm.ko"): path = "/root/.cuckoo/probelkm.ko" # next: generic module uploaded with the analyzer (single arch setup maybe?) elif os.path.exists("probelkm.ko"): path = "probelkm.ko" else: log.warning("Could not find probelkm :(") return False os.system("insmod %s trace_descendants=1 target_pid=%u" % (path, os.getpid())) return True def get_pids(self): new = [] fd = open("/var/log/kern.log") for line in fd: if not "[probelkm]" in line: continue pos1 = line.find("forked to ") pos2 = line.find("@", pos1 + 10) if pos1 == -1 or pos2 == -1: continue forked_pid = int(line[pos1 + 10:pos2]) if forked_pid in self.pids_reported: continue self.pids_reported.add(forked_pid) new.append(forked_pid) return new def stop(self): # i guess we don't need to unload at all #os.system("rmmod probelkm") # now upload the logfile nf = NetlogFile("logs/all.lkm") fd = open("/var/log/kern.log") for line in fd: if not "[probelkm]" in line: continue nf.sock.sendall(line) # dirty direct send, no reconnecting fd.close() nf.close()
def inject(self, dll=None, interest=None, nosleepskip=False): """Cuckoo DLL injection. @param dll: Cuckoo DLL path. @param interest: path to file of interest, handed to cuckoomon config @param apc: APC use. """ if not self.pid: log.warning("No valid pid specified, injection aborted") return False thread_id = 0 if self.thread_id: thread_id = self.thread_id if not self.is_alive(): log.warning( "The process with pid %s is not alive, " "injection aborted", self.pid) return False is_64bit = self.is_64bit() if not dll: if is_64bit: dll = "cuckoomon_x64.dll" else: dll = "cuckoomon.dll" dll = randomize_dll(os.path.join("dll", dll)) if not dll or not os.path.exists(dll): log.warning( "No valid DLL specified to be injected in process " "with pid %d, injection aborted.", self.pid) return False config_path = "C:\\%s.ini" % self.pid with open(config_path, "w") as config: cfg = Config("analysis.conf") cfgoptions = cfg.get_options() firstproc = Process.first_process config.write("host-ip={0}\n".format(cfg.ip)) config.write("host-port={0}\n".format(cfg.port)) config.write("pipe={0}\n".format(PIPE)) config.write("results={0}\n".format(PATHS["root"])) config.write("analyzer={0}\n".format(os.getcwd())) config.write( "first-process={0}\n".format("1" if firstproc else "0")) config.write("startup-time={0}\n".format(Process.startup_time)) config.write("file-of-interest={0}\n".format(interest)) config.write("shutdown-mutex={0}\n".format(SHUTDOWN_MUTEX)) config.write("terminate-event={0}{1}\n".format( TERMINATE_EVENT, self.pid)) if nosleepskip: config.write("force-sleepskip=0\n") elif "force-sleepskip" in cfgoptions: config.write("force-sleepskip={0}\n".format( cfgoptions["force-sleepskip"])) if "full-logs" in cfgoptions: config.write("full-logs={0}\n".format(cfgoptions["full-logs"])) if "no-stealth" in cfgoptions: config.write("no-stealth={0}\n".format( cfgoptions["no-stealth"])) if firstproc: Process.first_process = False if thread_id or self.suspended: log.debug("Using QueueUserAPC injection.") else: log.debug("Using CreateRemoteThread injection.") if is_64bit: if os.path.exists("bin/loader_x64.exe"): ret = subprocess.call([ "bin/loader_x64.exe", "inject", str(self.pid), str(thread_id), dll ]) if ret != 0: if ret == 1: log.info( "Injected into suspended 64-bit process with pid %d", self.pid) else: log.error( "Unable to inject into 64-bit process with pid %d, error: %d", self.pid, ret) return False else: return True else: log.error( "Please place the loader_x64.exe binary from cuckoomon into analyzer/windows/bin in order to analyze x64 binaries." ) return False else: if os.path.exists("bin/loader.exe"): ret = subprocess.call([ "bin/loader.exe", "inject", str(self.pid), str(thread_id), dll ]) if ret != 0: if ret == 1: log.info( "Injected into suspended 32-bit process with pid %d", self.pid) else: log.error( "Unable to inject into 32-bit process with pid %d, error: %d", self.pid, ret) return False else: return True else: return self.old_inject(dll, self.thread_id or self.suspended)
def run(self): """Run handler. @return: operation status. """ data = "" response = "OK" wait = False proc = None # Read the data submitted to the Pipe Server. while True: bytes_read = c_int(0) buf = create_string_buffer(BUFSIZE) success = KERNEL32.ReadFile(self.h_pipe, buf, sizeof(buf), byref(bytes_read), None) data += buf.value if not success and KERNEL32.GetLastError() == ERROR_MORE_DATA: continue #elif not success or bytes_read.value == 0: # if KERNEL32.GetLastError() == ERROR_BROKEN_PIPE: # pass break if data: command = data.strip() # Parse the prefix for the received notification. # In case of GETPIDS we're gonna return the current process ID # and the process ID of our parent process (agent.py). if command == "GETPIDS": response = struct.pack("II", PID, PPID) # When analyzing we don't want to hook all functions, as we're # having some stability issues with regards to webbrowsers. elif command == "HOOKDLLS": is_url = Config(cfg="analysis.conf").category != "file" url_dlls = "ntdll", "kernel32" def hookdll_encode(names): # We have to encode each dll name as unicode string # with length 16. names = [ name + "\x00" * (16 - len(name)) for name in names ] f = lambda s: "".join(ch + "\x00" for ch in s) return "".join(f(name) for name in names) # If this sample is not a URL, then we don't want to limit # any API hooks (at least for now), so we write a null-byte # which indicates that all DLLs should be hooked. if not is_url: response = "\x00" else: response = hookdll_encode(url_dlls) # In case of PID, the client is trying to notify the creation of # a new process to be injected and monitored. elif command.startswith("PROCESS:"): # We acquire the process lock in order to prevent the analyzer # to terminate the analysis while we are operating on the new # process. PROCESS_LOCK.acquire() # Set the current DLL to the default one provided # at submission. dll = DEFAULT_DLL # We parse the process ID. data = command[8:] process_id = thread_id = None if not "," in data: if data.isdigit(): process_id = int(data) elif len(data.split(",")) == 2: process_id, param = data.split(",") thread_id = None if process_id.isdigit(): process_id = int(process_id) else: process_id = None if param.isdigit(): thread_id = int(param) else: # XXX: Expect a new DLL as a message parameter? if isinstance(param, str): dll = param if process_id: if process_id not in (PID, PPID): # We inject the process only if it's not being # monitored already, otherwise we would generated # polluted logs. if process_id not in PROCESS_LIST: # Open the process and inject the DLL. # Hope it enjoys it. proc = Process(pid=process_id, thread_id=thread_id) filepath = proc.get_filepath() filename = os.path.basename(filepath) log.info("Announced process name: %s", filename) if not protected_filename(filename): # Add the new process ID to the list of # monitored processes. add_pids(process_id) # If we have both pid and tid, then we can use # apc to inject if process_id and thread_id: proc.inject(dll, apc=True) else: # we inject using CreateRemoteThread, this # needs the waiting in order to make sure # no race conditions occur proc.inject(dll) wait = True log.info( "Successfully injected process with " "pid %s", proc.pid) else: log.warning("Received request to inject Cuckoo " "processes, skip") # Once we're done operating on the processes list, we release # the lock. PROCESS_LOCK.release() # In case of FILE_NEW, the client is trying to notify the creation # of a new file. elif command.startswith("FILE_NEW:"): # We extract the file path. file_path = command[9:].decode("utf-8") # We add the file to the list. add_file(file_path) # In case of FILE_DEL, the client is trying to notify an ongoing # deletion of an existing file, therefore we need to dump it # straight away. elif command.startswith("FILE_DEL:"): # Extract the file path. file_path = command[9:].decode("utf-8") # Dump the file straight away. del_file(file_path) elif command.startswith("FILE_MOVE:"): # syntax = FILE_MOVE:old_file_path::new_file_path if "::" in command[10:]: old_fname, new_fname = command[10:].split("::", 1) move_file(old_fname.decode("utf-8"), new_fname.decode("utf-8")) KERNEL32.WriteFile(self.h_pipe, create_string_buffer(response), len(response), byref(bytes_read), None) KERNEL32.CloseHandle(self.h_pipe) # We wait until cuckoomon reports back. if wait: proc.wait() if proc: proc.close() return True
def __init__(self, options={}, config=None): Thread.__init__(self) Auxiliary.__init__(self, options, config) self.config = Config(cfg="analysis.conf") self.enabled = self.config.evtx self.do_run = self.enabled
def __init__(self, options={}, analyzer=None): self.config = Config(cfg="analysis.conf") self.proc = None
class STAP(Auxiliary): """System-wide syscall trace with stap.""" priority = -10 # low prio to wrap tightly around the analysis def __init__(self, options={}, analyzer=None): self.config = Config(cfg="analysis.conf") self.proc = None def start(self): # helper function locating the stap module def has_stap(p): for fn in os.listdir(p): if fn.startswith("stap_") and fn.endswith(".ko"): return os.path.join(p, fn) return False path_cfg = self.config.get("analyzer_stap_path") cuckoo_path = os.path.join("/root", ".cuckoo") cape_path = os.path.join("/root", ".cape") if path_cfg and os.path.exists(path_cfg): path = path_cfg elif os.path.exists(cuckoo_path) and has_stap(cuckoo_path): path = has_stap(cuckoo_path) elif os.path.exists(cape_path) and has_stap(cape_path): path = has_stap(cape_path) else: log.warning("Could not find STAP LKM, aborting systemtap analysis") return False stap_start = time.time() self.proc = subprocess.Popen( [ "staprun", "-vv", "-x", str(os.getpid()), "-o", "stap.log", path, ], stderr=subprocess.PIPE, ) while b"systemtap_module_init() returned 0" not in self.proc.stderr.readline( ): pass self.proc.terminate() self.proc.wait() stap_stop = time.time() log.info("STAP aux module startup took %.2f seconds", stap_stop - stap_start) return True def stop(self): try: r = self.proc.poll() log.debug("stap subprocess retval %d", r) self.proc.kill() except Exception as e: log.warning("Exception killing stap: %s", e) upload_to_host("stap.log", "stap/stap.log", False)
def kernel_analyze(self): """zer0m0n kernel analysis """ log.info("Starting kernel analysis") log.info("Installing driver") if is_os_64bit(): sys_file = os.path.join(os.getcwd(), "dll", "zer0m0n_x64.sys") else: sys_file = os.path.join(os.getcwd(), "dll", "zer0m0n.sys") exe_file = os.path.join(os.getcwd(), "dll", "logs_dispatcher.exe") if not sys_file or not exe_file or not os.path.exists( sys_file) or not os.path.exists(exe_file): log.warning( "No valid zer0m0n files to be used for process with pid %d, injection aborted", self.pid) return False exe_name = random_string(6) service_name = random_string(6) driver_name = random_string(6) inf_data = '[Version]\r\nSignature = "$Windows NT$"\r\nClass = "ActivityMonitor"\r\nClassGuid = {b86dff51-a31e-4bac-b3cf-e8cfe75c9fc2}\r\nProvider= %Prov%\r\nDriverVer = 22/01/2014,1.0.0.0\r\nCatalogFile = %DriverName%.cat\r\n[DestinationDirs]\r\nDefaultDestDir = 12\r\nMiniFilter.DriverFiles = 12\r\n[DefaultInstall]\r\nOptionDesc = %ServiceDescription%\r\nCopyFiles = MiniFilter.DriverFiles\r\n[DefaultInstall.Services]\r\nAddService = %ServiceName%,,MiniFilter.Service\r\n[DefaultUninstall]\r\nDelFiles = MiniFilter.DriverFiles\r\n[DefaultUninstall.Services]\r\nDelService = %ServiceName%,0x200\r\n[MiniFilter.Service]\r\nDisplayName= %ServiceName%\r\nDescription= %ServiceDescription%\r\nServiceBinary= %12%\\%DriverName%.sys\r\nDependencies = "FltMgr"\r\nServiceType = 2\r\nStartType = 3\r\nErrorControl = 1\r\nLoadOrderGroup = "FSFilter Activity Monitor"\r\nAddReg = MiniFilter.AddRegistry\r\n[MiniFilter.AddRegistry]\r\nHKR,,"DebugFlags",0x00010001 ,0x0\r\nHKR,"Instances","DefaultInstance",0x00000000,%DefaultInstance%\r\nHKR,"Instances\\"%Instance1.Name%,"Altitude",0x00000000,%Instance1.Altitude%\r\nHKR,"Instances\\"%Instance1.Name%,"Flags",0x00010001,%Instance1.Flags%\r\n[MiniFilter.DriverFiles]\r\n%DriverName%.sys\r\n[SourceDisksFiles]\r\n' + driver_name + '.sys = 1,,\r\n[SourceDisksNames]\r\n1 = %DiskId1%,,,\r\n[Strings]\r\n' + 'Prov = "' + random_string( 8 ) + '"\r\nServiceDescription = "' + random_string( 12 ) + '"\r\nServiceName = "' + service_name + '"\r\nDriverName = "' + driver_name + '"\r\nDiskId1 = "' + service_name + ' Device Installation Disk"\r\nDefaultInstance = "' + service_name + ' Instance"\r\nInstance1.Name = "' + service_name + ' Instance"\r\nInstance1.Altitude = "370050"\r\nInstance1.Flags = 0x0' new_inf = os.path.join(os.getcwd(), "dll", "{0}.inf".format(service_name)) new_sys = os.path.join(os.getcwd(), "dll", "{0}.sys".format(driver_name)) copy(sys_file, new_sys) new_exe = os.path.join(os.getcwd(), "dll", "{0}.exe".format(exe_name)) copy(exe_file, new_exe) log.info("[-] Driver name : " + new_sys) log.info("[-] Inf name : " + new_inf) log.info("[-] Application name : " + new_exe) log.info("[-] Service : " + service_name) fh = open(new_inf, "w") fh.write(inf_data) fh.close() os_is_64bit = is_os_64bit() if os_is_64bit: wow64 = c_ulong(0) KERNEL32.Wow64DisableWow64FsRedirection(byref(wow64)) os.system( 'cmd /c "rundll32 setupapi.dll, InstallHinfSection DefaultInstall 132 ' + new_inf + '"') os.system("net start " + service_name) si = STARTUPINFO() si.cb = sizeof(si) pi = PROCESS_INFORMATION() cr = CREATE_NEW_CONSOLE ldp = KERNEL32.CreateProcessA(new_exe, None, None, None, None, cr, None, os.getenv("TEMP"), byref(si), byref(pi)) if not ldp: if os_is_64bit: KERNEL32.Wow64RevertWow64FsRedirection(wow64) log.error("Failed starting " + exe_name + ".exe.") return False config_path = os.path.join(os.getenv("TEMP"), "%s.ini" % self.pid) with open(config_path, "w") as config: cfg = Config("analysis.conf") config.write("host-ip={0}\n".format(cfg.ip)) config.write("host-port={0}\n".format(cfg.port)) config.write("pipe={0}\n".format(PIPE)) log.info("Sending startup information") hFile = KERNEL32.CreateFileA(PATH_KERNEL_DRIVER, GENERIC_READ | GENERIC_WRITE, 0, None, OPEN_EXISTING, 0, None) if os_is_64bit: KERNEL32.Wow64RevertWow64FsRedirection(wow64) if hFile: p = Process(pid=os.getpid()) ppid = p.get_parent_pid() pid_vboxservice = 0 pid_vboxtray = 0 # get pid of VBoxService.exe and VBoxTray.exe proc_info = PROCESSENTRY32() proc_info.dwSize = sizeof(PROCESSENTRY32) snapshot = KERNEL32.CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) flag = KERNEL32.Process32First(snapshot, byref(proc_info)) while flag: if proc_info.sz_exeFile == "VBoxService.exe": log.info("VBoxService.exe found !") pid_vboxservice = proc_info.th32ProcessID flag = 0 elif proc_info.sz_exeFile == "VBoxTray.exe": pid_vboxtray = proc_info.th32ProcessID log.info("VBoxTray.exe found !") flag = 0 flag = KERNEL32.Process32Next(snapshot, byref(proc_info)) bytes_returned = c_ulong(0) msg = str(self.pid) + "_" + str(ppid) + "_" + str( os.getpid()) + "_" + str(pi.dwProcessId) + "_" + str( pid_vboxservice) + "_" + str(pid_vboxtray) + '\0' KERNEL32.DeviceIoControl(hFile, IOCTL_PID, msg, len(msg), None, 0, byref(bytes_returned), None) msg = os.getcwd() + '\0' KERNEL32.DeviceIoControl(hFile, IOCTL_CUCKOO_PATH, unicode(msg), len(unicode(msg)), None, 0, byref(bytes_returned), None) else: log.warning("Failed to access kernel driver") return True
class Analyzer: """Cuckoo Linux Analyzer. This class handles the initialization and execution of the analysis procedure, including the auxiliary modules and the analysis packages. """ def __init__(self): self.config = None self.target = None def prepare(self): """Prepare env for analysis.""" # Create the folders used for storing the results. create_folders() # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") if self.config.get("clock", None): # Set virtual machine clock. clock = datetime.datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. os.system("date -s \"{0}\"".format(clock.strftime("%y-%m-%d %H:%M:%S"))) # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(tempfile.gettempdir(), self.config.file_name) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def complete(self): """End analysis.""" # Dump all the notified files. dump_files() # Hell yeah. log.info("Analysis completed.") def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") if self.config.category == "file": package = "generic" else: package = "wget" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. pack = package_class(self.config.get_options()) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules(auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning("Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in sorted(Auxiliary.__subclasses__(), key=lambda x: x.priority, reverse=True): # Try to start the auxiliary module. try: aux = module() aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", aux.__class__.__name__) continue except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", aux.__class__.__name__, e) continue finally: log.debug("Started auxiliary module %s", aux.__class__.__name__) aux_enabled.append(aux) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. try: pids = pack.start(self.target) except NotImplementedError: raise CuckooError("The package \"{0}\" doesn't contain a run " "function.".format(package_name)) except CuckooPackageError as e: raise CuckooError("The package \"{0}\" start function raised an " "error: {1}".format(package_name, e)) except Exception as e: raise CuckooError("The package \"{0}\" start function encountered " "an unhandled exception: " "{1}".format(package_name, e)) # If the analysis package returned a list of process IDs, we add them # to the list of monitored processes and enable the process monitor. if pids: add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False time_counter = 0 while True: time_counter += 1 if time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: for pid in list(PROCESS_LIST): if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) # ask the package if it knows any new pids add_pids(pack.get_pids()) # also ask the auxiliaries for aux in aux_avail: add_pids(aux.get_pids()) # If none of the monitored processes are still alive, we # can terminate the analysis. if not PROCESS_LIST: log.info("Process list is empty, " "terminating analysis.") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. pack.set_pids(PROCESS_LIST) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not pack.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning("The package \"%s\" check function raised " "an exception: %s", package_name, e) except Exception as e: log.exception("The PID watching loop raised an exception: %s", e) finally: # Zzz. time.sleep(1) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. pack.finish() except Exception as e: log.warning("The package \"%s\" finish function raised an " "exception: %s", package_name, e) try: # Upload files the package created to package_files in the results folder package_files = pack.package_files() if package_files is not None: for package in package_files: upload_to_host( package[0], os.path.join("package_files", package[1]) ) except Exception as e: log.warning("The package \"%s\" package_files function raised an " "exception: %s", package_name, e) # Terminate the Auxiliary modules. for aux in sorted(aux_enabled, key=lambda x: x.priority): try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown.") for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: proc.terminate() except: continue # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. self.complete() return True
class STAP(Auxiliary): """System-wide syscall trace with stap.""" priority = -10 # low prio to wrap tightly around the analysis def __init__(self): self.config = Config(cfg="analysis.conf") self.proc = None def start(self): # helper function locating the stap module def has_stap(p): only_stap = [ fn for fn in os.listdir(p) if fn.startswith("stap_") and fn.endswith(".ko") ] if only_stap: return os.path.join(p, only_stap[0]) return False path_cfg = self.config.get("analyzer_stap_path", None) if path_cfg and os.path.exists(path_cfg): path = path_cfg elif os.path.exists("/root/.cuckoo") and has_stap("/root/.cuckoo"): path = has_stap("/root/.cuckoo") else: log.warning( "Could not find STAP LKM, aborting systemtap analysis.") return False stap_start = time.time() self.proc = subprocess.Popen([ "staprun", "-vv", "-x", str(os.getpid()), "-o", "stap.log", path, ], stderr=subprocess.PIPE) while "systemtap_module_init() returned 0" not in self.proc.stderr.readline( ): pass stap_stop = time.time() log.info("STAP aux module startup took %.2f seconds" % (stap_stop - stap_start)) return True @staticmethod def _upload_file(local, remote): if os.path.exists(local): nf = NetlogFile(remote) with open(local, "rb") as f: for chunk in f: nf.sock.sendall( chunk) # dirty direct send, no reconnecting nf.close() def stop(self): try: r = self.proc.poll() log.debug("stap subprocess retval %r", r) self.proc.kill() except Exception as e: log.warning("Exception killing stap: %s", e) self._upload_file("stap.log", "logs/all.stap")
def prepare(self): """Prepare env for analysis.""" global DEFAULT_DLL global SERVICES_PID # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # Create the folders used for storing the results. create_folders() add_protected_path(os.getcwd()) add_protected_path(PATHS["root"]) # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # NOTE: Windows system has only localized commands with date format # following localization settings, so these commands for english date # format cannot work in other localizations. # In addition DATE and TIME commands are blocking if an incorrect # syntax is provided, so an echo trick is used to bypass the input # request and not block analysis. os.system("echo:|date {0}".format(clock.strftime("%m-%d-%y"))) os.system("echo:|time {0}".format(clock.strftime("%H:%M:%S"))) # Set the default DLL to be used by the PipeHandler. DEFAULT_DLL = self.config.get_options().get("dll") # get PID for services.exe for monitoring services # tasklist sometimes fails under high-load (http://support.microsoft.com/kb/2732840) # We can retry a few times to hopefully work around failures retries = 4 while retries > 0: stdin, stdout, stderr = os.popen3("tasklist /V /FI \"IMAGENAME eq services.exe\"") s = stdout.read() err = stderr.read() if 'services.exe' not in s: log.warning('tasklist failed with error "%s"' % (err)) else: # it worked break retries -= 1 if 'services.exe' not in s: # All attempts failed log.error('Unable to retreive services.exe PID') SERVICES_PID = None else: servidx = s.index("services.exe") servstr = s[servidx + 12:].strip() SERVICES_PID = int(servstr[:servstr.index(' ')], 10) log.debug('services.exe PID is %s' % (SERVICES_PID)) # Initialize and start the Pipe Servers. This is going to be used for # communicating with the injected and monitored processes. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x] = PipeServer() self.pipes[x].daemon = True self.pipes[x].start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target
def __init__(self): self.config = Config(cfg="analysis.conf") self.proc = None
def inject(self, dll=None, apc=False): """Cuckoo DLL injection. @param dll: Cuckoo DLL path. @param apc: APC use. """ if not self.pid: log.warning("No valid pid specified, injection aborted") return False if not self.is_alive(): log.warning("The process with pid %s is not alive, " "injection aborted", self.pid) return False if not dll: dll = "cuckoomon.dll" dll = randomize_dll(os.path.join("dll", dll)) if not dll or not os.path.exists(dll): log.warning("No valid DLL specified to be injected in process " "with pid %d, injection aborted.", self.pid) return False arg = KERNEL32.VirtualAllocEx(self.h_process, None, len(dll) + 1, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE) if not arg: log.error( "VirtualAllocEx failed when injecting process with " "pid %d, injection aborted (Error: %s)", self.pid, get_error_string(KERNEL32.GetLastError()), ) return False bytes_written = c_int(0) if not KERNEL32.WriteProcessMemory(self.h_process, arg, dll + "\x00", len(dll) + 1, byref(bytes_written)): log.error( "WriteProcessMemory failed when injecting process with " "pid %d, injection aborted (Error: %s)", self.pid, get_error_string(KERNEL32.GetLastError()), ) return False kernel32_handle = KERNEL32.GetModuleHandleA("kernel32.dll") load_library = KERNEL32.GetProcAddress(kernel32_handle, "LoadLibraryA") config_path = os.path.join(os.getenv("TEMP"), "%s.ini" % self.pid) with open(config_path, "w") as config: cfg = Config("analysis.conf") cfgoptions = cfg.get_options() # The first time we come up with a random startup-time. if Process.first_process: # This adds 1 up to 30 times of 20 minutes to the startup # time of the process, therefore bypassing anti-vm checks # which check whether the VM has only been up for <10 minutes. Process.startup_time = random.randint(1, 30) * 20 * 60 * 1000 config.write("host-ip={0}\n".format(cfg.ip)) config.write("host-port={0}\n".format(cfg.port)) config.write("pipe={0}\n".format(PIPE)) config.write("results={0}\n".format(PATHS["root"])) config.write("analyzer={0}\n".format(os.getcwd())) config.write("first-process={0}\n".format("1" if Process.first_process else "0")) config.write("startup-time={0}\n".format(Process.startup_time)) config.write("shutdown-mutex={0}\n".format(SHUTDOWN_MUTEX)) config.write("force-sleepskip={0}\n".format(cfgoptions.get("force-sleepskip", "0"))) Process.first_process = False if apc or self.suspended: log.debug("Using QueueUserAPC injection.") if not self.h_thread: log.info( "No valid thread handle specified for injecting " "process with pid %d, injection aborted.", self.pid, ) return False if not KERNEL32.QueueUserAPC(load_library, self.h_thread, arg): log.error( "QueueUserAPC failed when injecting process with " "pid %d (Error: %s)", self.pid, get_error_string(KERNEL32.GetLastError()), ) return False else: event_name = "CuckooEvent%d" % self.pid self.event_handle = KERNEL32.CreateEventA(None, False, False, event_name) if not self.event_handle: log.warning("Unable to create notify event..") return False log.debug("Using CreateRemoteThread injection.") new_thread_id = c_ulong(0) thread_handle = KERNEL32.CreateRemoteThread( self.h_process, None, 0, load_library, arg, 0, byref(new_thread_id) ) if not thread_handle: log.error( "CreateRemoteThread failed when injecting process " "with pid %d (Error: %s)", self.pid, get_error_string(KERNEL32.GetLastError()), ) KERNEL32.CloseHandle(self.event_handle) self.event_handle = None return False else: KERNEL32.CloseHandle(thread_handle) log.info("Successfully injected process with pid %d." % self.pid) return True
class Macalyser: """Cuckoo OS X analyzer. """ target = "" target_artefacts = [] config= [] def prepare(self): """Prepare env for analysis.""" # Create the folders used for storing the results. create_folders() # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.parse_config("analysis.conf") # Setup machine time self.setup_machine_time() def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) # Retrieve analysis package package = self.analysis_package_for_current_target() # Initialize analysis package self.initialize_package(package) # Setup and start auxiliary modules aux = self.setup_auxiliary_modules() # Start analysis package results = self.analysis(package) # Shutdown Auxiliary modules self.shutdown_auxiliary_modules(aux) # TODO: figure out a way to do this cleanly # shutdown_spawned_modules(results.procs_still_alive) # Done! self.complete(package) def complete(self, package): self.upload_artefacts(package) self.cleanup() def parse_config(self, config_name="analysis.conf"): self.config = Config(cfg=config_name) def analysis_package_for_current_target(self): # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name) if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package return package def initialize_package(self, package): # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. pack = package_class(self.config.get_options()) def setup_auxiliary_modules(self): # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules( auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning("Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module(self.config.get_options()) aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", aux.__class__.__name__) continue except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", aux.__class__.__name__, e) continue finally: log.debug("Started auxiliary module %s", aux.__class__.__name__) aux_enabled.append(aux) def setup_machine_time(self): # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # TODO(phretor): check how to set seconds os.system("date {0}".format(clock.strftime("%m%d%H%M%y"))) # TODO(phretor): add support for other than "file" if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) def analysis(self, package): try: pids = package.start(self.target) except NotImplementedError: raise CuckooError("The package \"{0}\" doesn't contain a run " "function.".format(package)) except CuckooPackageError as e: raise CuckooError("The package \"{0}\" start function raised an " "error: {1}".format(package, e)) except Exception as e: raise CuckooError("The package \"{0}\" start function encountered " "an unhandled exception: " "{1}".format(package, e)) # Should we enforce timeout? if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") time.sleep(self.config.timeout) try: # Before finishing the analysis, the package can perform some # final operations through the finish() function. package.finish() except Exception as e: log.warning("The package \"%s\" finish function raised an " "exception: %s", package, e) def shutdown_auxiliary_modules(self, aux): pass def shutdown_spawned_processes(self, procs): pass def upload_artefacts(self, package): try: # Upload files the package created to package_files in the # results folder to host package_files = package.package_files() if package_files != None: for p in package_files: upload_to_host( p[0], os.path.join("package_files", p[1])); except Exception as e: log.warning("The package \"%s\" package_files function raised an " "exception: %s", package, e) def cleanup(self): pass
def parse_config(self, config_name="analysis.conf"): self.config = Config(cfg=config_name)
def inject(self, dll=None, apc=False): """Cuckoo DLL injection. @param dll: Cuckoo DLL path. @param apc: APC use. """ if not self.pid: log.warning("No valid pid specified, injection aborted") return False if not self.is_alive(): log.warning( "The process with pid %s is not alive, " "injection aborted", self.pid) return False if not dll: dll = "cuckoomon.dll" dll = randomize_dll(os.path.join("dll", dll)) if not dll or not os.path.exists(dll): log.warning( "No valid DLL specified to be injected in process " "with pid %d, injection aborted.", self.pid) return False arg = KERNEL32.VirtualAllocEx(self.h_process, None, len(dll) + 1, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE) if not arg: log.error( "VirtualAllocEx failed when injecting process with " "pid %d, injection aborted (Error: %s)", self.pid, get_error_string(KERNEL32.GetLastError())) return False bytes_written = c_int(0) if not KERNEL32.WriteProcessMemory(self.h_process, arg, dll + "\x00", len(dll) + 1, byref(bytes_written)): log.error( "WriteProcessMemory failed when injecting process with " "pid %d, injection aborted (Error: %s)", self.pid, get_error_string(KERNEL32.GetLastError())) return False kernel32_handle = KERNEL32.GetModuleHandleA("kernel32.dll") load_library = KERNEL32.GetProcAddress(kernel32_handle, "LoadLibraryA") config_path = os.path.join(os.getenv("TEMP"), "%s.ini" % self.pid) with open(config_path, "w") as config: cfg = Config("analysis.conf") # The first time we come up with a random startup-time. if Process.first_process: # This adds 1 up to 30 times of 20 minutes to the startup # time of the process, therefore bypassing anti-vm checks # which check whether the VM has only been up for <10 minutes. Process.startup_time = random.randint(1, 30) * 20 * 60 * 1000 config.write("host-ip={0}\n".format(cfg.ip)) config.write("host-port={0}\n".format(cfg.port)) config.write("pipe={0}\n".format(PIPE)) config.write("results={0}\n".format(PATHS["root"])) config.write("analyzer={0}\n".format(os.getcwd())) config.write("first-process={0}\n".format(Process.first_process)) config.write("startup-time={0}\n".format(Process.startup_time)) Process.first_process = False if apc or self.suspended: log.info("Using QueueUserAPC injection") if not self.h_thread: log.info( "No valid thread handle specified for injecting " "process with pid %d, injection aborted.", self.pid) return False if not KERNEL32.QueueUserAPC(load_library, self.h_thread, arg): log.error( "QueueUserAPC failed when injecting process with " "pid %d (Error: %s)", self.pid, get_error_string(KERNEL32.GetLastError())) return False log.info("Successfully injected process with pid %d." % self.pid) else: event_name = "CuckooEvent%d" % self.pid self.event_handle = KERNEL32.CreateEventA(None, False, False, event_name) if not self.event_handle: log.warning("Unable to create notify event..") return False log.info("Using CreateRemoteThread injection.") new_thread_id = c_ulong(0) thread_handle = KERNEL32.CreateRemoteThread( self.h_process, None, 0, load_library, arg, 0, byref(new_thread_id)) if not thread_handle: log.error( "CreateRemoteThread failed when injecting process " "with pid %d (Error: %s)", self.pid, get_error_string(KERNEL32.GetLastError())) KERNEL32.CloseHandle(self.event_handle) self.event_handle = None return False else: KERNEL32.CloseHandle(thread_handle) return True
# Copyright (C) 2010-2015 Cuckoo Foundation. # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. import logging import os import socket import time from pathlib import Path from lib.core.config import Config config = Config(cfg="analysis.conf") log = logging.getLogger(__name__) BUFSIZE = 1024 * 1024 def upload_to_host(file_path, dump_path, pids="", ppids="", metadata="", category="", duplicated=False): nc = None if not os.path.exists(file_path): log.warning("File %s doesn't exist anymore", file_path) return file_size = Path(file_path).stat().st_size
def prepare(self): """Prepare env for analysis.""" # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_privilege("SeDebugPrivilege") grant_privilege("SeLoadDriverPrivilege") # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Pass the configuration through to the Process class. Process.set_config(self.config) # Set virtual machine clock. set_clock( datetime.datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S")) # Set the default DLL to be used for this analysis. self.default_dll = self.config.options.get("dll") # If a pipe name has not set, then generate a random one. self.config.pipe = self.get_pipe_path( self.config.options.get("pipe", random_string(16, 32))) # Generate a random name for the logging pipe server. self.config.logpipe = self.get_pipe_path(random_string(16, 32)) # Initialize and start the Command Handler pipe server. This is going # to be used for communicating with the monitored processes. self.command_pipe = PipeServer(PipeDispatcher, self.config.pipe, message=True, dispatcher=CommandPipeHandler(self)) self.command_pipe.daemon = True self.command_pipe.start() # Initialize and start the Log Pipe Server - the log pipe server will # open up a pipe that monitored processes will use to send logs to # before they head off to the host machine. destination = self.config.ip, self.config.port self.log_pipe_server = PipeServer(PipeForwarder, self.config.logpipe, destination=destination) self.log_pipe_server.daemon = True self.log_pipe_server.start() # We update the target according to its category. If it's a file, then # we store the target path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"], self.config.file_name) elif self.config.category == "archive": zip_path = os.path.join(os.environ["TEMP"], self.config.file_name) zipfile.ZipFile(zip_path).extractall(os.environ["TEMP"]) self.target = os.path.join(os.environ["TEMP"], self.config.options["filename"]) # If it's a URL, well.. we store the URL. else: self.target = self.config.target
def inject(self, dll=None, interest=None, nosleepskip=False): """Cuckoo DLL injection. @param dll: Cuckoo DLL path. @param interest: path to file of interest, handed to cuckoomon config @param apc: APC use. """ global LOGSERVER_POOL if not self.pid: return False thread_id = 0 if self.thread_id: thread_id = self.thread_id if not self.is_alive(): log.warning("The process with pid %s is not alive, " "injection aborted", self.pid) return False is_64bit = self.is_64bit() if not dll: if is_64bit: dll = CUCKOOMON64_NAME else: dll = CUCKOOMON32_NAME else: os.path.join("dll", dll) dll = os.path.join(os.getcwd(), dll) if not dll or not os.path.exists(dll): log.warning("No valid DLL specified to be injected in process " "with pid %d, injection aborted.", self.pid) return False if thread_id or self.suspended: log.debug("Using QueueUserAPC injection.") else: log.debug("Using CreateRemoteThread injection.") config_path = "C:\\%s.ini" % self.pid with open(config_path, "w") as config: cfg = Config("analysis.conf") cfgoptions = cfg.get_options() # start the logserver for this monitored process logserver_path = LOGSERVER_PREFIX + str(self.pid) if logserver_path not in LOGSERVER_POOL: LOGSERVER_POOL[logserver_path] = LogServer(cfg.ip, cfg.port, logserver_path) Process.process_num += 1 firstproc = Process.process_num == 1 config.write("host-ip={0}\n".format(cfg.ip)) config.write("host-port={0}\n".format(cfg.port)) config.write("pipe={0}\n".format(PIPE)) config.write("logserver={0}\n".format(logserver_path)) config.write("results={0}\n".format(PATHS["root"])) config.write("analyzer={0}\n".format(os.getcwd())) config.write("first-process={0}\n".format("1" if firstproc else "0")) config.write("startup-time={0}\n".format(Process.startup_time)) config.write("file-of-interest={0}\n".format(interest)) config.write("shutdown-mutex={0}\n".format(SHUTDOWN_MUTEX)) config.write("terminate-event={0}{1}\n".format(TERMINATE_EVENT, self.pid)) if nosleepskip or ("force-sleepskip" not in cfgoptions and len(interest) > 2 and interest[1] != ':' and interest[0] != '\\' and Process.process_num <= 2): config.write("force-sleepskip=0\n") if "norefer" not in cfgoptions and "referrer" not in cfgoptions: config.write("referrer={0}\n".format(get_referrer_url(interest))) simple_optnames = [ "force-sleepskip", "full-logs", "force-flush", "no-stealth", "buffer-max", "large-buffer-max", "serial", "sysvol_ctimelow", "sysvol_ctimehigh", "sys32_ctimelow", "sys32_ctimehigh", "debug", "disable_hook_content", "hook-type", "exclude-apis", "exclude-dlls", "referrer", ] for optname in simple_optnames: if optname in cfgoptions: config.write("{0}={1}\n".format(optname, cfgoptions[optname])) orig_bin_name = "" bit_str = "" if is_64bit: orig_bin_name = LOADER64_NAME bit_str = "64-bit" else: orig_bin_name = LOADER32_NAME bit_str = "32-bit" bin_name = os.path.join(os.getcwd(), orig_bin_name) if os.path.exists(bin_name): ret = subprocess.call([bin_name, "inject", str(self.pid), str(thread_id), dll]) if ret != 0: if ret == 1: log.info("Injected into suspended %s process with pid %d", bit_str, self.pid) else: log.error("Unable to inject into %s process with pid %d, error: %d", bit_str, self.pid, ret) return False else: return True else: log.error("Please place the %s binary from cuckoomon into analyzer/windows/bin in order to analyze %s binaries.", os.path.basename(bin_name), bit_str) return False
class Analyzer(object): """Cuckoo Linux Analyzer. This class handles the initialization and execution of the analysis procedure. """ def __init__(self): self.pserver = None self.config = None self.target = None def prepare(self): """Prepare env for analysis.""" # Create the folders used for storing the results. create_folders() # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") if self.config.get("clock", None): # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. os.system("date -s \"{0}\"".format(clock.strftime("%y-%m-%d %H:%M:%S"))) # Initialize and start the Pipe Server. This is going to be used for # communicating with the injected and monitored processes. self.pserver = PipeServer() self.pserver.start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(gettempdir(), str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def complete(self): """End analysis.""" # Dump all the notified files dump_files() # We're done! log.info("Analysis completed.") def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) log.debug("Target is: %s", self.target) # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": if ".bash" in self.config.file_name: arguments = ["/bin/bash", self.target] elif ".sh" in self.config.file_name: arguments = ["/bin/sh", self.target] elif ".pl" in self.config.file_name: arguments = ["/bin/perl", self.target] else: arguments = [self.target, ''] os.system("chmod +x " + str(self.target)) if self.config.options: if len(arguments) < 2: arguments.pop() arguments.append(self.config.options) else: raise CuckooError("No browser support yet") # Start file system tracer thread fstrace = FilesystemTracer() fstrace.start() # Start system call tracer thread proctrace = SyscallTracer(arguments) proctrace.start() if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") time_counter = 0 while True: time_counter += 1 if time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break if proctrace.is_running() == False: log.info("No remaining processes. Waiting a few seconds before shutdown.") sleep(10) break # For timeout calculation sleep(1) if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown.") fstrace.stop() proctrace.stop() # Let's invoke the completion procedure. self.complete() return True
class STAP(Auxiliary): """system-wide syscall trace with stap.""" priority = -10 # low prio to wrap tightly around the analysis def __init__(self): self.config = Config(cfg="analysis.conf") self.fallback_strace = False def start(self): # helper function locating the stap module def has_stap(p): files = os.listdir(p) only_stap = [fn for fn in os.listdir(p) if fn.startswith("stap_")] if only_stap: return os.path.join(p, only_stap[0]) return False # highest priority: if the vm config specifies the path if self.config.get("analyzer_stap_path", None) and os.path.exists( self.config.get("analyzer_stap_path")): path = self.config.get("analyzer_lkm_path") # next: if a module was uploaded with the analyzer for our platform elif os.path.exists(platform.machine()) and has_stap( platform.machine()): path = has_stap(platform.machine()) # next: default path inside the machine elif os.path.exists("/root/.cuckoo") and has_stap("/root/.cuckoo"): path = has_stap("/root/.cuckoo") # next: generic module uploaded with the analyzer (single arch setup maybe?) elif has_stap("."): path = has_stap(".") else: # we can't find the stap module, fallback to strace log.warning("Could not find STAP LKM, falling back to strace.") return self.start_strace() stap_start = time.time() self.proc = subprocess.Popen( ["staprun", "-v", "-x", str(os.getpid()), "-o", "stap.log", path], stderr=subprocess.PIPE) # read from stderr until the tap script is compiled # while True: # if not self.proc.poll() is None: # break # line = self.proc.stderr.readline() # print "DBG LINE", line # if "Pass 5: starting run." in line: # break time.sleep(10) stap_stop = time.time() log.info("STAP aux module startup took %.2f seconds" % (stap_stop - stap_start)) return True def start_strace(self): try: os.mkdir("strace") except: pass # don't worry, it exists stderrfd = open("strace/strace.stderr", "wb") self.proc = subprocess.Popen( ["strace", "-ff", "-o", "strace/straced", "-p", str(os.getpid())], stderr=stderrfd) self.fallback_strace = True return True def get_pids(self): if self.fallback_strace: return [ self.proc.pid, ] return [] def stop(self): try: self.proc.kill() except Exception as e: log.warning("Exception killing stap: %s", e) if os.path.exists("stap.log"): # now upload the logfile nf = NetlogFile("logs/all.stap") fd = open("stap.log", "rb") for chunk in fd: nf.sock.sendall(chunk) # dirty direct send, no reconnecting fd.close() nf.close() # in case we fell back to strace if os.path.exists("strace"): for fn in os.listdir("strace"): # we don't need the logs from the analyzer python process itself if fn == "straced.%u" % os.getpid(): continue fp = os.path.join("strace", fn) # now upload the logfile nf = NetlogFile("logs/%s" % fn) fd = open(fp, "rb") for chunk in fd: nf.sock.sendall( chunk) # dirty direct send, no reconnecting fd.close() nf.close()
def __init__(self): self.config = Config(cfg="analysis.conf") self.fallback_strace = False
def __init__(self, proto=""): config = Config(cfg="analysis.conf") self.hostip, self.hostport = config.ip, config.port self.sock, self.file = None, None self.proto = proto
class Analyzer: """Cuckoo Windows Analyzer. This class handles the initialization and execution of the analysis procedure, including handling of the pipe server, the auxiliary modules and the analysis packages. """ PIPE_SERVER_COUNT = 4 def __init__(self): self.pipes = [None]*self.PIPE_SERVER_COUNT self.config = None self.target = None def pids_from_process_name_list(self, namelist): proclist = [] pidlist = [] buf = create_string_buffer(1024 * 1024) p = cast(buf, c_void_p) retlen = c_ulong(0) retval = NTDLL.NtQuerySystemInformation(5, buf, 1024 * 1024, byref(retlen)) if retval: return [] proc = cast(p, POINTER(SYSTEM_PROCESS_INFORMATION)).contents while proc.NextEntryOffset: p.value += proc.NextEntryOffset proc = cast(p, POINTER(SYSTEM_PROCESS_INFORMATION)).contents proclist.append((proc.ImageName.Buffer[:proc.ImageName.Length/2], proc.UniqueProcessId)) for proc in proclist: lowerproc = proc[0].lower() for name in namelist: if lowerproc == name: pidlist.append(proc[1]) break return pidlist def prepare(self): """Prepare env for analysis.""" global DEFAULT_DLL global SERVICES_PID global HIDE_PIDS # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # randomize cuckoomon DLL and loader executable names copy("dll\\cuckoomon.dll", CUCKOOMON32_NAME) copy("dll\\cuckoomon_x64.dll", CUCKOOMON64_NAME) copy("bin\\loader.exe", LOADER32_NAME) copy("bin\\loader_x64.exe", LOADER64_NAME) # Create the folders used for storing the results. create_folders() add_protected_path(os.getcwd()) add_protected_path(PATHS["root"]) # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") systime = SYSTEMTIME() systime.wYear = clock.year systime.wMonth = clock.month systime.wDay = clock.day systime.wHour = clock.hour systime.wMinute = clock.minute systime.wSecond = clock.second systime.wMilliseconds = 0 KERNEL32.SetSystemTime(byref(systime)) thedate = clock.strftime("%m-%d-%y") thetime = clock.strftime("%H:%M:%S") log.info("Date set to: {0}, time set to: {1}".format(thedate, thetime)) # Set the default DLL to be used by the PipeHandler. DEFAULT_DLL = self.config.get_options().get("dll") # get PID for services.exe for monitoring services svcpid = self.pids_from_process_name_list(["services.exe"]) if svcpid: SERVICES_PID = svcpid[0] protected_procname_list = [ "vmwareuser.exe", "vmwareservice.exe", "vboxservice.exe", "vboxtray.exe", "sandboxiedcomlaunch.exe", "sandboxierpcss.exe", "procmon.exe", "regmon.exe", "filemon.exe", "wireshark.exe", "netmon.exe", "prl_tools_service.exe", "prl_tools.exe", "prl_cc.exe", "sharedintapp.exe", "vmtoolsd.exe", "vmsrvc.exe", "python.exe", "perl.exe", ] HIDE_PIDS = set(self.pids_from_process_name_list(protected_procname_list)) # Initialize and start the Pipe Servers. This is going to be used for # communicating with the injected and monitored processes. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x] = PipeServer(self.config) self.pipes[x].daemon = True self.pipes[x].start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def complete(self): """End analysis.""" # Stop the Pipe Servers. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x].stop() # Dump all the notified files. dump_files() # Hell yeah. log.info("Analysis completed.") def get_completion_key(self): if hasattr(self.config, "completion_key"): return self.config.completion_key else: return "" def run(self): """Run analysis. @return: operation status. """ log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) log.debug("Pipe server name: %s", PIPE) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name, self.config.exports) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. pack = package_class(self.config.get_options(), self.config) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules(auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning("Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_avail = [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module(self.config.get_options(), self.config) aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", module.__name__) except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", module.__name__, e) else: log.debug("Started auxiliary module %s", module.__name__) AUX_ENABLED.append(aux) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. try: pids = pack.start(self.target) except NotImplementedError: raise CuckooError("The package \"{0}\" doesn't contain a run " "function.".format(package_name)) except CuckooPackageError as e: raise CuckooError("The package \"{0}\" start function raised an " "error: {1}".format(package_name, e)) except Exception as e: raise CuckooError("The package \"{0}\" start function encountered " "an unhandled exception: " "{1}".format(package_name, e)) # If the analysis package returned a list of process IDs, we add them # to the list of monitored processes and enable the process monitor. if pids: add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False time_counter = 0 kernel_analysis = self.config.get_options().get("kernel_analysis", False) if kernel_analysis != False: kernel_analysis = True emptytime = None while True: time_counter += 1 if time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we # cannot proceed with the checks until the lock is released. if PROCESS_LOCK.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: if not kernel_analysis: for pid in PROCESS_LIST: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if not PROCESS_LIST and (not LASTINJECT_TIME or (datetime.now() >= (LASTINJECT_TIME + timedelta(seconds=15)))): if emptytime and (datetime.now() >= (emptytime + timedelta(seconds=5))): log.info("Process list is empty, " "terminating analysis.") break elif not emptytime: emptytime = datetime.now() else: emptytime = None # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. pack.set_pids(PROCESS_LIST) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not pack.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning("The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) # Create the shutdown mutex. KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX) log.info("Created shutdown mutex.") # since the various processes poll for the existence of the mutex, sleep # for a second to ensure they see it before they're terminated KERNEL32.Sleep(1000) log.info("Shutting down package.") try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. pack.finish() except Exception as e: log.warning("The package \"%s\" finish function raised an " "exception: %s", package_name, e) log.info("Stopping auxiliary modules.") # Terminate the Auxiliary modules. for aux in AUX_ENABLED: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) # Tell all processes to flush their logs regardless of terminate_processes setting if not kernel_analysis: for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: proc.set_terminate_event() except: continue if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown.") if not kernel_analysis: for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: if not proc.is_critical(): proc.terminate() else: log.info("Not terminating critical process with pid %d.", proc.pid) except: continue log.info("Finishing auxiliary modules.") # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. log.info("Shutting down pipe server and dumping dropped files.") self.complete() return True
class Analyzer(object): """Cuckoo Linux Analyzer. This class handles the initialization and execution of the analysis procedure. """ def __init__(self): self.pserver = None self.config = None self.target = None def prepare(self): """Prepare env for analysis.""" # Create the folders used for storing the results. create_folders() # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") if self.config.get("clock", None): # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. os.system("date -s \"{0}\"".format( clock.strftime("%y-%m-%d %H:%M:%S"))) # Initialize and start the Pipe Server. This is going to be used for # communicating with the injected and monitored processes. self.pserver = PipeServer() self.pserver.start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(gettempdir(), str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def complete(self): """End analysis.""" # Dump all the notified files dump_files() # We're done! log.info("Analysis completed.") def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) log.debug("Target is: %s", self.target) # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": if ".bash" in self.config.file_name: arguments = ["/bin/bash", self.target] elif ".sh" in self.config.file_name: arguments = ["/bin/sh", self.target] elif ".pl" in self.config.file_name: arguments = ["/bin/perl", self.target] else: arguments = [self.target, ''] os.system("chmod +x " + str(self.target)) if self.config.options: if len(arguments) < 2: arguments.pop() arguments.append(self.config.options) else: raise CuckooError("No browser support yet") # Start file system tracer thread fstrace = FilesystemTracer() fstrace.start() # Start system call tracer thread proctrace = SyscallTracer(arguments) proctrace.start() if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") time_counter = 0 while True: time_counter += 1 if time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break if proctrace.is_running() == False: log.info( "No remaining processes. Waiting a few seconds before shutdown." ) sleep(10) break # For timeout calculation sleep(1) if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown.") fstrace.stop() proctrace.stop() # Let's invoke the completion procedure. self.complete() return True
def prepare(self): """Prepare env for analysis.""" global DEFAULT_DLL global SERVICES_PID global HIDE_PIDS # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # randomize cuckoomon DLL and loader executable names copy("dll\\cuckoomon.dll", CUCKOOMON32_NAME) copy("dll\\cuckoomon_x64.dll", CUCKOOMON64_NAME) copy("bin\\loader.exe", LOADER32_NAME) copy("bin\\loader_x64.exe", LOADER64_NAME) # Create the folders used for storing the results. create_folders() add_protected_path(os.getcwd()) add_protected_path(PATHS["root"]) # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") systime = SYSTEMTIME() systime.wYear = clock.year systime.wMonth = clock.month systime.wDay = clock.day systime.wHour = clock.hour systime.wMinute = clock.minute systime.wSecond = clock.second systime.wMilliseconds = 0 KERNEL32.SetSystemTime(byref(systime)) thedate = clock.strftime("%m-%d-%y") thetime = clock.strftime("%H:%M:%S") log.info("Date set to: {0}, time set to: {1}".format(thedate, thetime)) # Set the default DLL to be used by the PipeHandler. DEFAULT_DLL = self.config.get_options().get("dll") # get PID for services.exe for monitoring services svcpid = self.pids_from_process_name_list(["services.exe"]) if svcpid: SERVICES_PID = svcpid[0] protected_procname_list = [ "vmwareuser.exe", "vmwareservice.exe", "vboxservice.exe", "vboxtray.exe", "sandboxiedcomlaunch.exe", "sandboxierpcss.exe", "procmon.exe", "regmon.exe", "filemon.exe", "wireshark.exe", "netmon.exe", "prl_tools_service.exe", "prl_tools.exe", "prl_cc.exe", "sharedintapp.exe", "vmtoolsd.exe", "vmsrvc.exe", "python.exe", "perl.exe", ] HIDE_PIDS = set(self.pids_from_process_name_list(protected_procname_list)) # Initialize and start the Pipe Servers. This is going to be used for # communicating with the injected and monitored processes. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x] = PipeServer(self.config) self.pipes[x].daemon = True self.pipes[x].start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target
def debug_inject(self, dll=None, interest=None, childprocess=False, nosleepskip=False): """CAPE DLL debugger injection. @param dll: CAPE DLL debugger path. @param interest: path to file of interest, handed to cuckoomon config """ global LOGSERVER_POOL if not self.pid: log.warning("No valid pid specified, injection aborted") return False thread_id = 0 if self.thread_id: thread_id = self.thread_id if not self.is_alive(): log.warning( "The process with pid %s is not alive, " "injection aborted", self.pid) return False is_64bit = self.is_64bit() if not dll: log.debug("No debugger DLL has been specified for injection") if is_64bit: dll = CUCKOOMON64_NAME else: dll = CUCKOOMON32_NAME else: dll = os.path.join("dll", dll) log.info("DLL to inject is %s", dll) dll = os.path.join(os.getcwd(), dll) if not dll or not os.path.exists(dll): log.warning( "No valid DLL specified to be injected in process " "with pid %d, injection aborted.", self.pid) return False config_path = "C:\\%s.ini" % self.pid with open(config_path, "w") as config: cfg = Config("analysis.conf") cfgoptions = cfg.get_options() # start the logserver for this monitored process logserver_path = LOGSERVER_PREFIX + str(self.pid) if logserver_path not in LOGSERVER_POOL: LOGSERVER_POOL[logserver_path] = LogServer( cfg.ip, cfg.port, logserver_path) Process.process_num += 1 firstproc = Process.process_num == 1 config.write("host-ip={0}\n".format(cfg.ip)) config.write("host-port={0}\n".format(cfg.port)) config.write("pipe={0}\n".format(PIPE)) config.write("logserver={0}\n".format(logserver_path)) config.write("results={0}\n".format(PATHS["root"])) config.write("analyzer={0}\n".format(os.getcwd())) config.write( "first-process={0}\n".format("1" if firstproc else "0")) config.write("startup-time={0}\n".format(Process.startup_time)) config.write("file-of-interest={0}\n".format(interest)) config.write("shutdown-mutex={0}\n".format(SHUTDOWN_MUTEX)) config.write("terminate-event={0}{1}\n".format( TERMINATE_EVENT, self.pid)) if nosleepskip: config.write("force-sleepskip=0\n") elif "force-sleepskip" in cfgoptions: config.write("force-sleepskip={0}\n".format( cfgoptions["force-sleepskip"])) if "full-logs" in cfgoptions: config.write("full-logs={0}\n".format(cfgoptions["full-logs"])) if "no-stealth" in cfgoptions: config.write("no-stealth={0}\n".format( cfgoptions["no-stealth"])) if "buffer-max" in cfgoptions: config.write("buffer-max={0}\n".format( cfgoptions["buffer-max"])) if "large-buffer-max" in cfgoptions: config.write("large-buffer-max={0}\n".format( cfgoptions["large-buffer-max"])) if "serial" in cfgoptions: config.write("serial={0}\n".format(cfgoptions["serial"])) if "sysvol_ctimelow" in cfgoptions: config.write("sysvol_ctimelow={0}\n".format( cfgoptions["sysvol_ctimelow"])) if "sysvol_ctimehigh" in cfgoptions: config.write("sysvol_ctimehigh={0}\n".format( cfgoptions["sysvol_ctimehigh"])) if "sys32_ctimelow" in cfgoptions: config.write("sys32_ctimelow={0}\n".format( cfgoptions["sys32_ctimelow"])) if "sys32_ctimehigh" in cfgoptions: config.write("sys32_ctimehigh={0}\n".format( cfgoptions["sys32_ctimehigh"])) if "norefer" not in cfgoptions: config.write("referrer={0}\n".format( get_referrer_url(interest))) if firstproc: Process.first_process = False if "breakpoint" in cfgoptions: config.write("breakpoint={0}\n".format( cfgoptions["breakpoint"])) orig_bin_name = "" bit_str = "" if is_64bit: orig_bin_name = LOADER64_NAME bit_str = "64-bit" else: orig_bin_name = LOADER32_NAME bit_str = "32-bit" bin_name = os.path.join(os.getcwd(), orig_bin_name) if os.path.exists(bin_name) == False: log.error( "Please place the %s binary from cuckoomon into analyzer/windows/bin in order to debug %s binaries.", os.path.basename(bin_name), bit_str) return False else: if childprocess == False: ret = subprocess.call([ bin_name, "debug_load", str(self.pid), str(thread_id), str(self.h_process), str(self.h_thread), dll ]) else: ret = subprocess.call([ bin_name, "debug", str(self.pid), str(thread_id), str(self.h_process), str(self.h_thread), dll ]) if ret != 0: if ret == 1: log.info( "Injected debugger DLL into suspended %s process with pid %d", bit_str, self.pid) else: log.error( "Unable to inject debugger DLL into %s process with pid %d, error: %d", bit_str, self.pid, ret) return False else: return True
class Analyzer: """Cuckoo Windows Analyzer. This class handles the initialization and execution of the analysis procedure, including handling of the pipe server, the auxiliary modules and the analysis packages. """ PIPE_SERVER_COUNT = 4 def __init__(self): self.pipes = [None]*self.PIPE_SERVER_COUNT self.config = None self.target = None def prepare(self): """Prepare env for analysis.""" global DEFAULT_DLL global SERVICES_PID # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # Create the folders used for storing the results. create_folders() add_protected_path(os.getcwd()) add_protected_path(PATHS["root"]) # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # NOTE: Windows system has only localized commands with date format # following localization settings, so these commands for english date # format cannot work in other localizations. # In addition DATE and TIME commands are blocking if an incorrect # syntax is provided, so an echo trick is used to bypass the input # request and not block analysis. os.system("echo:|date {0}".format(clock.strftime("%m-%d-%y"))) os.system("echo:|time {0}".format(clock.strftime("%H:%M:%S"))) # Set the default DLL to be used by the PipeHandler. DEFAULT_DLL = self.config.get_options().get("dll") # get PID for services.exe for monitoring services # tasklist sometimes fails under high-load (http://support.microsoft.com/kb/2732840) # We can retry a few times to hopefully work around failures retries = 4 while retries > 0: stdin, stdout, stderr = os.popen3("tasklist /V /FI \"IMAGENAME eq services.exe\"") s = stdout.read() err = stderr.read() if 'services.exe' not in s: log.warning('tasklist failed with error "%s"' % (err)) else: # it worked break retries -= 1 if 'services.exe' not in s: # All attempts failed log.error('Unable to retreive services.exe PID') SERVICES_PID = None else: servidx = s.index("services.exe") servstr = s[servidx + 12:].strip() SERVICES_PID = int(servstr[:servstr.index(' ')], 10) log.debug('services.exe PID is %s' % (SERVICES_PID)) # Initialize and start the Pipe Servers. This is going to be used for # communicating with the injected and monitored processes. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x] = PipeServer() self.pipes[x].daemon = True self.pipes[x].start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def complete(self): """End analysis.""" # Stop the Pipe Servers. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x].stop() # Dump all the notified files. dump_files() # Hell yeah. log.info("Analysis completed.") def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) log.debug("Pipe server name: %s", PIPE) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name, self.config.exports) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. pack = package_class(self.config.get_options(), self.config) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules(auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning("Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module(self.config.get_options()) aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", module.__name__) continue except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", module.__name__, e) continue finally: log.debug("Started auxiliary module %s", module.__name__) aux_enabled.append(aux) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. try: pids = pack.start(self.target) except NotImplementedError: raise CuckooError("The package \"{0}\" doesn't contain a run " "function.".format(package_name)) except CuckooPackageError as e: raise CuckooError("The package \"{0}\" start function raised an " "error: {1}".format(package_name, e)) except Exception as e: raise CuckooError("The package \"{0}\" start function encountered " "an unhandled exception: " "{1}".format(package_name, e)) # If the analysis package returned a list of process IDs, we add them # to the list of monitored processes and enable the process monitor. if pids: add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False time_counter = 0 kernel_analysis = self.config.get_options().get("kernel_analysis", False) if kernel_analysis != False: kernel_analysis = True while True: time_counter += 1 if time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we # cannot proceed with the checks until the lock is released. if PROCESS_LOCK.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: if not kernel_analysis: for pid in PROCESS_LIST: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if not PROCESS_LIST and (not LASTINJECT_TIME or (datetime.now() >= (LASTINJECT_TIME + timedelta(seconds=15)))): log.info("Process list is empty, " "terminating analysis.") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. pack.set_pids(PROCESS_LIST) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not pack.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning("The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) # Create the shutdown mutex. KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX) # since the various processes poll for the existence of the mutex, sleep # for a second to ensure they see it before they're terminated KERNEL32.Sleep(1000) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. pack.finish() except Exception as e: log.warning("The package \"%s\" finish function raised an " "exception: %s", package_name, e) # Terminate the Auxiliary modules. for aux in aux_enabled: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown.") if not kernel_analysis: for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: if not proc.is_critical(): proc.terminate() else: proc.set_terminate_event() log.info("Not terminating critical process with pid %d.", proc.pid) except: continue # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. self.complete() return True
class Analyzer: """Cuckoo Linux Analyzer. This class handles the initialization and execution of the analysis procedure, including the auxiliary modules and the analysis packages. """ def __init__(self): # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") self.target = None def prepare(self): """Prepare env for analysis.""" # Create the folders used for storing the results. create_folders() # Initialize logging. init_logging() if self.config.get("clock", None): # Set virtual machine clock. clock = datetime.datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. os.system("date -s \"{0}\"".format(clock.strftime("%y-%m-%d %H:%M:%S"))) # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(tempfile.gettempdir(), self.config.file_name) # If it's a URL, well.. we store the URL. elif self.config.category == "archive": zip_path = os.path.join(os.environ.get("TEMP", "/tmp"), self.config.file_name) zipfile.ZipFile(zip_path).extractall(os.environ.get("TEMP", "/tmp")) self.target = os.path.join( os.environ.get("TEMP", "/tmp"), self.config.options["filename"] ) else: self.target = self.config.target def complete(self): """End analysis.""" # Dump all the notified files. dump_files() # Hell yeah. log.info("Analysis completed.") return True def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) # If no analysis package was specified at submission, we try to select # one automatically. """ if not self.config.package: log.debug("No analysis package specified, trying to detect it automagically.") if self.config.category == "file": package = "generic" else: package = "wget" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], 0) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) """ if self.config.package: suggestion = self.config.package elif self.config.category != "file": suggestion = "url" else: suggestion = None if self.config.package == "ie": suggestion = "ff" # Try to figure out what analysis package to use with this target kwargs = {"suggestion": suggestion} if self.config.category == "file": package_class = choose_package_class(self.config.file_type, self.config.file_name, **kwargs) else: package_class = choose_package_class(None, None, **kwargs) if not package_class: raise Exception("Could not find an appropriate analysis package") # Package initialization kwargs = { "options": self.config.options, "timeout": self.config.timeout } # Initialize the analysis package. #pack = package_class(self.config.get_options()) pack = package_class(self.target, **kwargs) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules(auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], 0) except ImportError as e: log.warning("Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in sorted(Auxiliary.__subclasses__(), key=lambda x: x.priority, reverse=True): # Try to start the auxiliary module. try: aux = module() aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", aux.__class__.__name__) continue except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", aux.__class__.__name__, e) continue finally: log.debug("Started auxiliary module %s", aux.__class__.__name__) #aux_enabled.append(aux) if aux: aux_enabled.append(aux) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. try: #pids = pack.start(self.target) pids = pack.start() except NotImplementedError: raise CuckooError("The package \"{0}\" doesn't contain a run " "function.".format(package_class)) except CuckooPackageError as e: raise CuckooError("The package \"{0}\" start function raised an " "error: {1}".format(package_class, e)) except Exception as e: raise CuckooError("The package \"{0}\" start function encountered " "an unhandled exception: " "{1}".format(package_class, e)) # If the analysis package returned a list of process IDs, we add them # to the list of monitored processes and enable the process monitor. if pids: add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False time_counter = 0 while True: time_counter += 1 if time_counter > int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: for pid in list(PROCESS_LIST): if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) #ToDo # ask the package if it knows any new pids #add_pids(pack.get_pids()) # also ask the auxiliaries for aux in aux_avail: add_pids(aux.get_pids()) # If none of the monitored processes are still alive, we # can terminate the analysis. if not PROCESS_LIST: log.info("Process list is empty, " "terminating analysis.") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. pack.set_pids(PROCESS_LIST) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not pack.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning("The package \"%s\" check function raised " "an exception: %s", package_class, e) except Exception as e: log.exception("The PID watching loop raised an exception: %s", e) finally: # Zzz. time.sleep(1) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. pack.finish() except Exception as e: log.warning("The package \"%s\" finish function raised an " "exception: %s", package_class, e) try: # Upload files the package created to files in the results folder package_files = pack.package_files() if package_files is not None: for package in package_files: upload_to_host( package[0], os.path.join("files", package[1]) ) except Exception as e: log.warning("The package \"%s\" package_files function raised an " "exception: %s", package_class, e) # Terminate the Auxiliary modules. for aux in sorted(aux_enabled, key=lambda x: x.priority): try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown.") for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: proc.terminate() except: continue # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. self.complete() return True
class LKM(Auxiliary): """helper LKM for sleep skipping etc""" def __init__(self): self.config = Config(cfg="analysis.conf") self.pids_reported = set() def start(self): # highest priority: if the vm config specifies the path if self.config.get("analyzer_lkm_path", None) and os.path.exists(self.config.get("analyzer_lkm_path")): path = self.config.get("analyzer_lkm_path") # next: if the analyzer was uploaded with a module for our platform elif os.path.exists(os.path.join(platform.machine(), "probelkm.ko")): path = os.path.join(platform.machine(), "probelkm.ko") # next: default path inside the machine elif os.path.exists("/root/.cuckoo/probelkm.ko"): path = "/root/.cuckoo/probelkm.ko" # next: generic module uploaded with the analyzer (single arch setup maybe?) elif os.path.exists("probelkm.ko"): path = "probelkm.ko" else: log.warning("Could not find probelkm :(") return False os.system("insmod %s trace_descendants=1 target_pid=%u" % (path, os.getpid())) return True def get_pids(self): new = [] fd = open("/var/log/kern.log") for line in fd: if not "[probelkm]" in line: continue pos1 = line.find("forked to ") pos2 = line.find("@", pos1+10) if pos1 == -1 or pos2 == -1: continue forked_pid = int(line[pos1+10:pos2]) if forked_pid in self.pids_reported: continue self.pids_reported.add(forked_pid) new.append(forked_pid) return new def stop(self): # i guess we don't need to unload at all #os.system("rmmod probelkm") # now upload the logfile nf = NetlogFile("logs/all.lkm") fd = open("/var/log/kern.log") for line in fd: if not "[probelkm]" in line: continue nf.sock.sendall(line) # dirty direct send, no reconnecting fd.close() nf.close()
def __init__(self): # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") self.target = None
def run(self): """Run handler. @return: operation status. """ global MONITORED_SERVICES global LASTINJECT_TIME data = "" response = "OK" # Read the data submitted to the Pipe Server. while True: bytes_read = c_int(0) buf = create_string_buffer(BUFSIZE) success = KERNEL32.ReadFile(self.h_pipe, buf, sizeof(buf), byref(bytes_read), None) data += buf.value if not success and KERNEL32.GetLastError() == ERROR_MORE_DATA: continue # elif not success or bytes_read.value == 0: # if KERNEL32.GetLastError() == ERROR_BROKEN_PIPE: # pass break if data: command = data.strip() # Debug, Regular, Warning, or Critical information from CuckooMon. if command.startswith("DEBUG:"): log.debug(command[6:]) elif command.startswith("INFO:"): log.info(command[5:]) elif command.startswith("WARNING:"): log.warning(command[8:]) elif command.startswith("CRITICAL:"): log.critical(command[9:]) # Parse the prefix for the received notification. # In case of GETPIDS we're gonna return the current process ID # and the process ID of our parent process (agent.py). elif command == "GETPIDS": response = struct.pack("II", PID, PPID) # When analyzing we don't want to hook all functions, as we're # having some stability issues with regards to webbrowsers. elif command == "HOOKDLLS": is_url = Config(cfg="analysis.conf").category != "file" url_dlls = "ntdll", "kernel32" def hookdll_encode(names): # We have to encode each dll name as unicode string # with length 16. names = [ name + "\x00" * (16 - len(name)) for name in names ] f = lambda s: "".join(ch + "\x00" for ch in s) return "".join(f(name) for name in names) # If this sample is not a URL, then we don't want to limit # any API hooks (at least for now), so we write a null-byte # which indicates that all DLLs should be hooked. if not is_url: response = "\x00" else: response = hookdll_encode(url_dlls) # remove pid from process list because we received a notification # from kernel land elif command.startswith("KTERMINATE:"): data = command[11:] process_id = int(data) if process_id: if process_id in PROCESS_LIST: remove_pid(process_id) # same than below but we don't want to inject any DLLs because # it's a kernel analysis elif command.startswith("KPROCESS:"): PROCESS_LOCK.acquire() data = command[9:] process_id = int(data) thread_id = None if process_id: if process_id not in (PID, PPID): if process_id not in PROCESS_LIST: proc = Process(pid=process_id, thread_id=thread_id) filepath = proc.get_filepath() filename = os.path.basename(filepath) if not protected_filename(filename): add_pid(process_id) log.info("Announce process name : %s", filename) PROCESS_LOCK.release() elif command.startswith("KERROR:"): error_msg = command[7:] log.error("Error : %s", str(error_msg)) # if a new driver has been loaded, we stop the analysis elif command == "KSUBVERT": for pid in PROCESS_LIST: log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) # Handle case of a service being started by a monitored process # Switch the service type to own process behind its back so we # can monitor the service more easily with less noise elif command.startswith("SERVICE:"): servname = command[8:] si = subprocess.STARTUPINFO() si.dwFlags = subprocess.STARTF_USESHOWWINDOW si.wShowWindow = subprocess.SW_HIDE subprocess.call("sc config " + servname + " type= own", startupinfo=si) log.info("Announced starting service \"%s\"", servname) if not MONITORED_SERVICES: # Inject into services.exe so we can monitor service creation # if tasklist previously failed to get the services.exe PID we'll be # unable to inject if SERVICES_PID: servproc = Process(pid=SERVICES_PID, suspended=False) filepath = servproc.get_filepath() servproc.inject(dll=DEFAULT_DLL, interest=filepath, nosleepskip=True) LASTINJECT_TIME = datetime.now() servproc.close() KERNEL32.Sleep(1000) MONITORED_SERVICES = True else: log.error('Unable to monitor service %s' % (servname)) # For now all we care about is bumping up our LASTINJECT_TIME to account for long delays between # injection and actual resume time where the DLL would have a chance to load in the new process # and report back to have its pid added to the list of monitored processes elif command.startswith("RESUME:"): LASTINJECT_TIME = datetime.now() # Handle case of malware terminating a process -- notify the target # ahead of time so that it can flush its log buffer elif command.startswith("KILL:"): PROCESS_LOCK.acquire() process_id = int(command[5:]) if process_id not in (PID, PPID) and process_id in PROCESS_LIST: # only notify processes we've hooked event_name = TERMINATE_EVENT + str(process_id) event_handle = KERNEL32.OpenEventA(EVENT_MODIFY_STATE, False, event_name) if not event_handle: log.warning( "Unable to open termination event for pid %u.", process_id) else: # make sure process is aware of the termination KERNEL32.SetEvent(event_handle) KERNEL32.CloseHandle(event_handle) PROCESS_LOCK.release() # Handle notification of cuckoomon loading in a process elif command.startswith("LOADED:"): PROCESS_LOCK.acquire() process_id = int(command[7:]) if process_id not in PROCESS_LIST: add_pids(process_id) PROCESS_LOCK.release() log.info( "Cuckoomon successfully loaded in process with pid %u.", process_id) # In case of PID, the client is trying to notify the creation of # a new process to be injected and monitored. elif command.startswith("PROCESS:"): # We acquire the process lock in order to prevent the analyzer # to terminate the analysis while we are operating on the new # process. PROCESS_LOCK.acquire() # Set the current DLL to the default one provided # at submission. dll = DEFAULT_DLL suspended = False # We parse the process ID. data = command[8:] if len(data) > 2 and data[1] == ':': if data[0] == '1': suspended = True data = command[10:] process_id = thread_id = None if "," not in data: if data.isdigit(): process_id = int(data) elif data.count(",") == 1: process_id, param = data.split(",") thread_id = None if process_id.isdigit(): process_id = int(process_id) else: process_id = None if param.isdigit(): thread_id = int(param) if process_id: if process_id not in (PID, PPID): # We inject the process only if it's not being # monitored already, otherwise we would generate # polluted logs. if process_id not in PROCESS_LIST: # Open the process and inject the DLL. # Hope it enjoys it. proc = Process(pid=process_id, thread_id=thread_id, suspended=suspended) filepath = proc.get_filepath() is_64bit = proc.is_64bit() filename = os.path.basename(filepath) log.info("Announced %s process name: %s pid: %d", "64-bit" if is_64bit else "32-bit", filename, process_id) if not in_protected_path(filename): res = proc.inject(dll, filepath) LASTINJECT_TIME = datetime.now() proc.close() else: log.warning( "Received request to inject Cuckoo " "process with pid %d, skip", process_id) # Once we're done operating on the processes list, we release # the lock. PROCESS_LOCK.release() # In case of FILE_NEW, the client is trying to notify the creation # of a new file. elif command.startswith("FILE_NEW:"): # We extract the file path. file_path = unicode(command[9:].decode("utf-8")) # We add the file to the list. add_file(file_path) # In case of FILE_DEL, the client is trying to notify an ongoing # deletion of an existing file, therefore we need to dump it # straight away. elif command.startswith("FILE_DEL:"): # Extract the file path. file_path = unicode(command[9:].decode("utf-8")) # Dump the file straight away. del_file(file_path) elif command.startswith("FILE_MOVE:"): # Syntax = "FILE_MOVE:old_file_path::new_file_path". if "::" in command[10:]: old_fname, new_fname = command[10:].split("::", 1) move_file(unicode(old_fname.decode("utf-8")), unicode(new_fname.decode("utf-8"))) else: log.warning("Received unknown command from cuckoomon: %s", command) KERNEL32.WriteFile(self.h_pipe, create_string_buffer(response), len(response), byref(bytes_read), None) KERNEL32.CloseHandle(self.h_pipe) return True
class Analyzer: """Cuckoo Windows Analyzer. This class handles the initialization and execution of the analysis procedure, including handling of the pipe server, the auxiliary modules and the analysis packages. """ PIPE_SERVER_COUNT = 4 def __init__(self): self.pipes = [None]*self.PIPE_SERVER_COUNT self.config = None self.target = None def pids_from_process_name_list(self, namelist): proclist = [] pidlist = [] buf = create_string_buffer(1024 * 1024) p = cast(buf, c_void_p) retlen = c_ulong(0) retval = NTDLL.NtQuerySystemInformation(5, buf, 1024 * 1024, byref(retlen)) if retval: return [] proc = cast(p, POINTER(SYSTEM_PROCESS_INFORMATION)).contents while proc.NextEntryOffset: p.value += proc.NextEntryOffset proc = cast(p, POINTER(SYSTEM_PROCESS_INFORMATION)).contents proclist.append((proc.ImageName.Buffer[:proc.ImageName.Length/2], proc.UniqueProcessId)) for proc in proclist: lowerproc = proc[0].lower() for name in namelist: if lowerproc == name: pidlist.append(proc[1]) break return pidlist def prepare(self): """Prepare env for analysis.""" global DEFAULT_DLL global SERVICES_PID global HIDE_PIDS # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # randomize cuckoomon DLL and loader executable names copy("dll\\cuckoomon.dll", CUCKOOMON32_NAME) copy("dll\\cuckoomon_x64.dll", CUCKOOMON64_NAME) copy("bin\\loader.exe", LOADER32_NAME) copy("bin\\loader_x64.exe", LOADER64_NAME) # Create the folders used for storing the results. create_folders() add_protected_path(os.getcwd()) add_protected_path(PATHS["root"]) # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # NOTE: Windows system has only localized commands with date format # following localization settings, so these commands for english date # format cannot work in other localizations. # In addition DATE and TIME commands are blocking if an incorrect # syntax is provided, so an echo trick is used to bypass the input # request and not block analysis. thedate = clock.strftime("%m-%d-%y") thetime = clock.strftime("%H:%M:%S") os.system("echo:|date {0}".format(thedate)) os.system("echo:|time {0}".format(thetime)) log.info("Date set to: {0}, time set to: {1}".format(thedate, thetime)) # Set the default DLL to be used by the PipeHandler. DEFAULT_DLL = self.config.get_options().get("dll") # get PID for services.exe for monitoring services svcpid = self.pids_from_process_name_list(["services.exe"]) if svcpid: SERVICES_PID = svcpid[0] protected_procname_list = [ "vmwareuser.exe", "vmwareservice.exe", "vboxservice.exe", "vboxtray.exe", "sandboxiedcomlaunch.exe", "sandboxierpcss.exe", "procmon.exe", "regmon.exe", "filemon.exe", "wireshark.exe", "netmon.exe", "prl_tools_service.exe", "prl_tools.exe", "prl_cc.exe", "sharedintapp.exe", "vmtoolsd.exe", "vmsrvc.exe", "python.exe", "perl.exe", ] HIDE_PIDS = set(self.pids_from_process_name_list(protected_procname_list)) # Initialize and start the Pipe Servers. This is going to be used for # communicating with the injected and monitored processes. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x] = PipeServer(self.config) self.pipes[x].daemon = True self.pipes[x].start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def complete(self): """End analysis.""" # Stop the Pipe Servers. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x].stop() # Dump all the notified files. dump_files() # Hell yeah. log.info("Analysis completed.") def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) log.debug("Pipe server name: %s", PIPE) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name, self.config.exports) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. pack = package_class(self.config.get_options(), self.config) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules(auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning("Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module(self.config.get_options(), self.config) aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", module.__name__) except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", module.__name__, e) else: log.debug("Started auxiliary module %s", module.__name__) aux_enabled.append(aux) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. try: pids = pack.start(self.target) except NotImplementedError: raise CuckooError("The package \"{0}\" doesn't contain a run " "function.".format(package_name)) except CuckooPackageError as e: raise CuckooError("The package \"{0}\" start function raised an " "error: {1}".format(package_name, e)) except Exception as e: raise CuckooError("The package \"{0}\" start function encountered " "an unhandled exception: " "{1}".format(package_name, e)) # If the analysis package returned a list of process IDs, we add them # to the list of monitored processes and enable the process monitor. if pids: add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False time_counter = 0 kernel_analysis = self.config.get_options().get("kernel_analysis", False) if kernel_analysis != False: kernel_analysis = True emptytime = None while True: time_counter += 1 if time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we # cannot proceed with the checks until the lock is released. if PROCESS_LOCK.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: if not kernel_analysis: for pid in PROCESS_LIST: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if not PROCESS_LIST and (not LASTINJECT_TIME or (datetime.now() >= (LASTINJECT_TIME + timedelta(seconds=15)))): if emptytime and (datetime.now() >= (emptytime + timedelta(seconds=5))): log.info("Process list is empty, " "terminating analysis.") break elif not emptytime: emptytime = datetime.now() else: emptytime = None # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. pack.set_pids(PROCESS_LIST) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not pack.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning("The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) # Create the shutdown mutex. KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX) log.info("Created shutdown mutex.") # since the various processes poll for the existence of the mutex, sleep # for a second to ensure they see it before they're terminated KERNEL32.Sleep(1000) log.info("Shutting down package.") try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. pack.finish() except Exception as e: log.warning("The package \"%s\" finish function raised an " "exception: %s", package_name, e) log.info("Stopping auxiliary modules.") # Terminate the Auxiliary modules. for aux in aux_enabled: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) # Tell all processes to flush their logs regardless of terminate_processes setting if not kernel_analysis: for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: proc.set_terminate_event() except: continue if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown.") if not kernel_analysis: for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: if not proc.is_critical(): proc.terminate() else: log.info("Not terminating critical process with pid %d.", proc.pid) except: continue log.info("Finishing auxiliary modules.") # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. log.info("Shutting down pipe server and dumping dropped files.") self.complete() return True
class Analyzer: """Cuckoo Windows Analyzer. This class handles the initialization and execution of the analysis procedure, including handling of the pipe server, the auxiliary modules and the analysis packages. """ PIPE_SERVER_COUNT = 4 def __init__(self): self.pipes = [None] * self.PIPE_SERVER_COUNT self.config = None self.target = None def prepare(self): """Prepare env for analysis.""" global DEFAULT_DLL global SERVICES_PID # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # Create the folders used for storing the results. create_folders() add_protected_path(os.getcwd()) add_protected_path(PATHS["root"]) # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # NOTE: Windows system has only localized commands with date format # following localization settings, so these commands for english date # format cannot work in other localizations. # In addition DATE and TIME commands are blocking if an incorrect # syntax is provided, so an echo trick is used to bypass the input # request and not block analysis. os.system("echo:|date {0}".format(clock.strftime("%m-%d-%y"))) os.system("echo:|time {0}".format(clock.strftime("%H:%M:%S"))) # Set the default DLL to be used by the PipeHandler. DEFAULT_DLL = self.config.get_options().get("dll") # get PID for services.exe for monitoring services # tasklist sometimes fails under high-load (http://support.microsoft.com/kb/2732840) # We can retry a few times to hopefully work around failures retries = 4 while retries > 0: stdin, stdout, stderr = os.popen3( "tasklist /V /FI \"IMAGENAME eq services.exe\"") s = stdout.read() err = stderr.read() if 'services.exe' not in s: log.warning('tasklist failed with error "%s"' % (err)) else: # it worked break retries -= 1 if 'services.exe' not in s: # All attempts failed log.error('Unable to retreive services.exe PID') SERVICES_PID = None else: servidx = s.index("services.exe") servstr = s[servidx + 12:].strip() SERVICES_PID = int(servstr[:servstr.index(' ')], 10) log.debug('services.exe PID is %s' % (SERVICES_PID)) # Initialize and start the Pipe Servers. This is going to be used for # communicating with the injected and monitored processes. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x] = PipeServer() self.pipes[x].daemon = True self.pipes[x].start() # We update the target according to its category. If it's a file, then # we store the path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, str(self.config.file_name)) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def complete(self): """End analysis.""" # Stop the Pipe Servers. for x in xrange(self.PIPE_SERVER_COUNT): self.pipes[x].stop() # Dump all the notified files. dump_files() # Hell yeah. log.info("Analysis completed.") def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) log.debug("Pipe server name: %s", PIPE) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name, self.config.exports) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. pack = package_class(self.config.get_options(), self.config) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules( auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning( "Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module(self.config.get_options(), self.config) aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", module.__name__) continue except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", module.__name__, e) continue finally: log.debug("Started auxiliary module %s", module.__name__) aux_enabled.append(aux) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. try: pids = pack.start(self.target) except NotImplementedError: raise CuckooError("The package \"{0}\" doesn't contain a run " "function.".format(package_name)) except CuckooPackageError as e: raise CuckooError("The package \"{0}\" start function raised an " "error: {1}".format(package_name, e)) except Exception as e: raise CuckooError("The package \"{0}\" start function encountered " "an unhandled exception: " "{1}".format(package_name, e)) # If the analysis package returned a list of process IDs, we add them # to the list of monitored processes and enable the process monitor. if pids: add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False time_counter = 0 kernel_analysis = self.config.get_options().get( "kernel_analysis", False) if kernel_analysis != False: kernel_analysis = True while True: time_counter += 1 if time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we # cannot proceed with the checks until the lock is released. if PROCESS_LOCK.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: if not kernel_analysis: for pid in PROCESS_LIST: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if not PROCESS_LIST and ( not LASTINJECT_TIME or (datetime.now() >= (LASTINJECT_TIME + timedelta(seconds=15)))): log.info("Process list is empty, " "terminating analysis.") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. pack.set_pids(PROCESS_LIST) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not pack.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning( "The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) # Create the shutdown mutex. KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX) # since the various processes poll for the existence of the mutex, sleep # for a second to ensure they see it before they're terminated KERNEL32.Sleep(1000) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. pack.finish() except Exception as e: log.warning( "The package \"%s\" finish function raised an " "exception: %s", package_name, e) # Terminate the Auxiliary modules. for aux in aux_enabled: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown.") if not kernel_analysis: for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: if not proc.is_critical(): proc.terminate() else: proc.set_terminate_event() log.info( "Not terminating critical process with pid %d.", proc.pid) except: continue # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning( "Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. self.complete() return True
def inject(self, dll=None, interest=None, nosleepskip=False): """Cuckoo DLL injection. @param dll: Cuckoo DLL path. @param interest: path to file of interest, handed to cuckoomon config @param apc: APC use. """ if not self.pid: log.warning("No valid pid specified, injection aborted") return False thread_id = 0 if self.thread_id: thread_id = self.thread_id if not self.is_alive(): log.warning("The process with pid %s is not alive, " "injection aborted", self.pid) return False is_64bit = self.is_64bit() if not dll: if is_64bit: dll = "cuckoomon_x64.dll" else: dll = "cuckoomon.dll" dll = randomize_bin(os.path.join("dll", dll), "dll") if not dll or not os.path.exists(dll): log.warning("No valid DLL specified to be injected in process " "with pid %d, injection aborted.", self.pid) return False config_path = "C:\\%s.ini" % self.pid with open(config_path, "w") as config: cfg = Config("analysis.conf") cfgoptions = cfg.get_options() # start the logserver for this monitored process self.logserver = LogServer(cfg.ip, cfg.port, self.logserver_path) firstproc = Process.first_process config.write("host-ip={0}\n".format(cfg.ip)) config.write("host-port={0}\n".format(cfg.port)) config.write("pipe={0}\n".format(PIPE)) config.write("logserver={0}\n".format(self.logserver_path)) config.write("results={0}\n".format(PATHS["root"])) config.write("analyzer={0}\n".format(os.getcwd())) config.write("first-process={0}\n".format("1" if firstproc else "0")) config.write("startup-time={0}\n".format(Process.startup_time)) config.write("file-of-interest={0}\n".format(interest)) config.write("shutdown-mutex={0}\n".format(SHUTDOWN_MUTEX)) config.write("terminate-event={0}{1}\n".format(TERMINATE_EVENT, self.pid)) if nosleepskip: config.write("force-sleepskip=0\n") elif "force-sleepskip" in cfgoptions: config.write("force-sleepskip={0}\n".format(cfgoptions["force-sleepskip"])) if "full-logs" in cfgoptions: config.write("full-logs={0}\n".format(cfgoptions["full-logs"])) if "no-stealth" in cfgoptions: config.write("no-stealth={0}\n".format(cfgoptions["no-stealth"])) if "norefer" not in cfgoptions: config.write("referrer={0}\n".format(get_referrer_url(interest))) if firstproc: Process.first_process = False if thread_id or self.suspended: log.debug("Using QueueUserAPC injection.") else: log.debug("Using CreateRemoteThread injection.") orig_bin_name = "" bit_str = "" if is_64bit: orig_bin_name = "loader_x64.exe" bit_str = "64-bit" else: orig_bin_name = "loader.exe" bit_str = "32-bit" bin_name = randomize_bin(os.path.join("bin", orig_bin_name), "exe") if os.path.exists(bin_name): ret = subprocess.call([bin_name, "inject", str(self.pid), str(thread_id), dll]) if ret != 0: if ret == 1: log.info("Injected into suspended %s process with pid %d", bit_str, self.pid) else: log.error("Unable to inject into %s process with pid %d, error: %d", bit_str, self.pid, ret) return False else: return True else: log.error("Please place the %s binary from cuckoomon into analyzer/windows/bin in order to analyze %s binaries.", os.path.basename(bin_name), bit_str) return False
def kernel_analyze(self): """zer0m0n kernel analysis""" log.info("Starting kernel analysis") log.info("Installing driver") if is_os_64bit(): sys_file = os.path.join(os.getcwd(), "dll", "zer0m0n_x64.sys") else: sys_file = os.path.join(os.getcwd(), "dll", "zer0m0n.sys") exe_file = os.path.join(os.getcwd(), "dll", "logs_dispatcher.exe") if not os.path.isfile(sys_file) or not os.path.isfile(exe_file): log.warning( "No valid zer0m0n files to be used for process with pid %d, injection aborted", self.pid) return False exe_name = service_name = driver_name = random_string(6) inf_data = ( "[Version]\r\n" 'Signature = "$Windows NT$"\r\n' 'Class = "ActivityMonitor"\r\n' "ClassGuid = {{b86dff51-a31e-4bac-b3cf-e8cfe75c9fc2}}\r\n" "Provider = %Prov%\r\n" "DriverVer = 22/01/2014,1.0.0.0\r\n" "CatalogFile = %DriverName%.cat\r\n" "[DestinationDirs]\r\n" "DefaultDestDir = 12\r\n" "MiniFilter.DriverFiles = 12\r\n" "[DefaultInstall]\r\n" "OptionDesc = %ServiceDescription%\r\n" "CopyFiles = MiniFilter.DriverFiles\r\n" "[DefaultInstall.Services]\r\n" "AddService = %ServiceName%,,MiniFilter.Service\r\n" "[DefaultUninstall]\r\n" "DelFiles = MiniFilter.DriverFiles\r\n" "[DefaultUninstall.Services]\r\n" "DelService = %ServiceName%,0x200\r\n" "[MiniFilter.Service]\r\n" "DisplayName = %ServiceName%\r\n" "Description = %ServiceDescription%\r\n" "ServiceBinary = %12%\\%DriverName%.sys\r\n" 'Dependencies = "FltMgr"\r\n' "ServiceType = 2\r\n" "StartType = 3\r\n" "ErrorControl = 1\r\n" 'LoadOrderGroup = "FSFilter Activity Monitor"\r\n' "AddReg = MiniFilter.AddRegistry\r\n" "[MiniFilter.AddRegistry]\r\n" 'HKR,,"DebugFlags",0x00010001 ,0x0\r\n' 'HKR,"Instances","DefaultInstance",0x00000000,%DefaultInstance%\r\n' 'HKR,"Instances\\"%Instance1.Name%,"Altitude",0x00000000,%Instance1.Altitude%\r\n' 'HKR,"Instances\\"%Instance1.Name%,"Flags",0x00010001,%Instance1.Flags%\r\n' "[MiniFilter.DriverFiles]\r\n" "%DriverName%.sys\r\n" "[SourceDisksFiles]\r\n" f"{driver_name}.sys = 1,,\r\n" "[SourceDisksNames]\r\n" "1 = %DiskId1%,,,\r\n" "[Strings]\r\n" f'Prov = "{random_string(8)}"\r\n' f'ServiceDescription = "{random_string(12)}"\r\n' f'ServiceName = "{service_name}"\r\n' f'DriverName = "{driver_name}"\r\n' f'DiskId1 = "{service_name} Device Installation Disk"\r\n' f'DefaultInstance = "{service_name} Instance"\r\n' f'Instance1.Name = "{service_name} Instance"\r\n' 'Instance1.Altitude = "370050"\r\n' "Instance1.Flags = 0x0") new_inf = os.path.join(os.getcwd(), "dll", f"{service_name}.inf") new_sys = os.path.join(os.getcwd(), "dll", f"{driver_name}.sys") copy(sys_file, new_sys) new_exe = os.path.join(os.getcwd(), "dll", f"{exe_name}.exe") copy(exe_file, new_exe) log.info("[-] Driver name : %s", new_sys) log.info("[-] Inf name : %s", new_inf) log.info("[-] Application name : %s", new_exe) log.info("[-] Service : %s", service_name) with open(new_inf, "w") as fh: fh.write(inf_data) os_is_64bit = is_os_64bit() if os_is_64bit: wow64 = c_ulong(0) KERNEL32.Wow64DisableWow64FsRedirection(byref(wow64)) os.system( f'cmd /c "rundll32 setupapi.dll, InstallHinfSection DefaultInstall 132 {new_inf}"' ) os.system(f"net start {service_name}") si = STARTUPINFO() si.cb = sizeof(si) pi = PROCESS_INFORMATION() cr = CREATE_NEW_CONSOLE ldp = KERNEL32.CreateProcessW(new_exe, None, None, None, None, cr, None, os.getenv("TEMP"), byref(si), byref(pi)) if not ldp: if os_is_64bit: KERNEL32.Wow64RevertWow64FsRedirection(wow64) log.error("Failed starting %s.exe", exe_name) return False config_path = os.path.join(os.getenv("TEMP"), f"{self.pid}.ini") cfg = Config("analysis.conf") with open(config_path, "w") as config: config.write(f"host-ip={cfg.ip}\n") config.write(f"host-port={cfg.port}\n") config.write(f"pipe={PIPE}\n") log.info("Sending startup information") hFile = KERNEL32.CreateFileW(PATH_KERNEL_DRIVER, GENERIC_READ | GENERIC_WRITE, 0, None, OPEN_EXISTING, 0, None) if os_is_64bit: KERNEL32.Wow64RevertWow64FsRedirection(wow64) if hFile: p = Process(pid=os.getpid()) ppid = p.get_parent_pid() pid_vboxservice = 0 pid_vboxtray = 0 # get pid of VBoxService.exe and VBoxTray.exe proc_info = PROCESSENTRY32() proc_info.dwSize = sizeof(PROCESSENTRY32) snapshot = KERNEL32.CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) flag = KERNEL32.Process32First(snapshot, byref(proc_info)) while flag: if proc_info.sz_exeFile == "VBoxService.exe": log.info("VBoxService.exe found!") pid_vboxservice = proc_info.th32ProcessID elif proc_info.sz_exeFile == "VBoxTray.exe": pid_vboxtray = proc_info.th32ProcessID log.info("VBoxTray.exe found!") flag = KERNEL32.Process32Next(snapshot, byref(proc_info)) bytes_returned = c_ulong(0) msg = f"{self.pid}_{ppid}_{os.getpid()}_{pi.dwProcessId}_{pid_vboxservice}_{pid_vboxtray}\0" KERNEL32.DeviceIoControl(hFile, IOCTL_PID, msg, len(msg), None, 0, byref(bytes_returned), None) msg = f"{os.getcwd()}\0" KERNEL32.DeviceIoControl(hFile, IOCTL_CUCKOO_PATH, msg, len(msg), None, 0, byref(bytes_returned), None) else: log.warning("Failed to access kernel driver") return True