def start(self, path): free = self.options.get("free", False) function = self.options.get("function", "DllMain") arguments = self.options.get("arguments", None) suspended = True if free: suspended = False args = "{0},{1}".format(path, function) if arguments: args += " {0}".format(arguments) p = Process() if not p.execute(path="C:\\WINDOWS\\system32\\rundll32.exe", args=args, suspended=suspended): raise CuckooPackageError("Unable to execute rundll32, analysis aborted") if not free and suspended: p.inject() p.resume() return p.pid else: return None
def _handle_dumpreqs(self, data): if not data.isdigit(): log.warning( "Received DUMPREQS command with an incorrect argument %r.", data) return pid = int(data) if pid not in self.tracked: log.warning( "Received DUMPREQS command but there are no reqs for pid %d.", pid) return dumpreqs = self.tracked[pid].get("dumpreq", []) for addr, length in dumpreqs: log.debug("tracked dump req (%r, %r, %r)", pid, addr, length) if not addr or not length: continue Process(pid=pid).dump_memory_block(int(addr), int(length))
def start(self, path): free = self.options.get("free", False) dll = self.options.get("dll", None) suspended = True if free: suspended = False iexplore = os.path.join(os.getenv("ProgramFiles"), "Internet Explorer", "iexplore.exe") p = Process() if not p.execute( path=iexplore, args="\"%s\"" % path, suspended=suspended): raise CuckooPackageError("Unable to execute initial Internet " "Explorer process, analysis aborted") if not free and suspended: p.inject(dll) p.resume() return p.pid else: return None
def start(self, path): word = self.get_path() if not word: raise CuckooPackageError("Unable to find any Microsoft " "Office Word executable available") dll = self.options.get("dll", None) free = self.options.get("free", False) suspended = True if free: suspended = False p = Process() if not p.execute(path=word, args="\"%s\"" % path, suspended=suspended): raise CuckooPackageError("Unable to execute initial Microsoft " "Office Word process, analysis aborted") if not free and suspended: p.inject(dll) p.resume() return p.pid else: return None
def execute(self, path, args): """Starts an executable for analysis. @param path: executable path @param args: executable arguments @return: process pid """ dll = self.options.get("dll") free = self.options.get("free") suspended = True if free: suspended = False p = Process() if not p.execute(path=path, args=args, suspended=suspended): raise CuckooPackageError("Unable to execute the initial process, " "analysis aborted.") if not free and suspended: p.inject(dll) p.resume() p.close() return p.pid
def start(self, path): powershell = self.get_path() if not powershell: raise CuckooPackageError("Unable to find any PowerShell executable available") dll = self.options.get("dll", None) free = self.options.get("free", False) suspended = True if free: suspended = False args = "-NoProfile -ExecutionPolicy unrestricted -File \"{0}\"".format(path) p = Process() if not p.execute(path=powershell, args=args, suspended=suspended): raise CuckooPackageError("Unable to execute initial PowerShell process, analysis aborted") if not free and suspended: p.inject(dll) p.resume() return p.pid else: return None
def execute(self, path, args, mode=None, maximize=False): """Starts an executable for analysis. @param path: executable path @param args: executable arguments @param mode: monitor mode - which functions to instrument @param maximize: whether the GUI should start maximized @return: process pid """ dll = self.options.get("dll") free = self.options.get("free") source = self.options.get("from") # Setup pre-defined registry keys. self.init_regkeys(self.REGKEYS) p = Process() if not p.execute(path=path, args=args, dll=dll, free=free, curdir=self.curdir, source=source, mode=mode, maximize=maximize): raise CuckooPackageError("Unable to execute the initial process, " "analysis aborted.") return p.pid
def start(self, path): reader = self.get_path() if not reader: raise CuckooPackageError("Unable to find any Adobe Reader " "executable available") dll = self.options.get("dll", None) free = self.options.get("free", False) suspended = True if free: suspended = False p = Process() if not p.execute(path=reader, args="\"%s\"" % path, suspended=suspended): raise CuckooPackageError("Unable to execute initial Adobe Reader " "process, analysis aborted") if not free and suspended: p.inject(dll) p.resume() return p.pid else: return None
def start(self, path): wscript = self.get_path() if not wscript: raise CuckooPackageError("Unable to find any WScript " "executable available") dll = self.options.get("dll", None) free = self.options.get("free", False) suspended = True if free: suspended = False p = Process() if not p.execute(path=wscript, args="\"{0}\"".format(path), suspended=suspended): raise CuckooPackageError("Unable to execute initial WScript " "process, analysis aborted") if not free and suspended: p.inject(dll) p.resume() return p.pid else: return None
def start(self, path): p = Process() rundll32 = "C:\\WINDOWS\\system32\\rundll32.exe" if "function" in self.options: p.execute(path=rundll32, args="%s,%s" % (path, self.options["function"]), suspended=True) else: p.execute(path=rundll32, args="%s,DllMain" % path, suspended=True) inject = True if "free" in self.options: if self.options["free"] == "yes": inject = False if inject: p.inject() p.resume() return p.pid
def start(self, path): pin = "C:\\pin\\pin.exe" pindll = os.path.join(os.getcwd(), "dll", "PinVMShield.dll") if not pindll: raise CuckooPackageError("Unable to find any DBA available") free = self.options.get("free", False) args = self.options.get("arguments", None) dbi = self.options.get("dbi", None) if dbi == "true": isdbi = True else: isdbi = False suspended = True if free: suspended = False p = Process() if not isdbi: if not p.execute(path=path, args=args, suspended=suspended): raise CuckooPackageError( "Unable to execute initial process, analysis aborted") else: if not p.execute(path=pin, args="-t \"%s\" -- \"%s\" %s" % (pindll, path, args), suspended=suspended): raise CuckooPackageError( "Unable to execute initial process, analysis aborted") if not free and suspended: p.inject() p.resume() p.close() return p.pid else: return None
def execute(self, path, args, interest): """Starts an executable for analysis. @param path: executable path @param args: executable arguments @param interest: file of interest, passed to the cuckoomon config @return: process pid """ dll = self.options.get("dll") free = self.options.get("free") gw = self.options.get("setgw", None) u = Utils() if gw: u.set_default_gw(gw) suspended = True if free: suspended = False kernel_analysis = self.options.get("kernel_analysis", False) if kernel_analysis != False: kernel_analysis = True p = Process() if not p.execute(path=path, args=args, suspended=suspended, kernel_analysis=kernel_analysis): raise CuckooPackageError("Unable to execute the initial process, " "analysis aborted.") if free: return None if not kernel_analysis: p.inject(dll, interest) p.resume() p.close() return p.pid
def debug(self, path, args, interest): """Starts an executable for analysis. @param path: executable path @param args: executable arguments @param interest: file of interest, passed to the cuckoomon config @return: process pid """ suspended = True p = Process(options=self.options, config=self.config) if not p.execute(path=path, args=args, suspended=suspended, kernel_analysis=False): raise CuckooPackageError("Unable to execute the initial process, analysis aborted") is_64bit = p.is_64bit() if is_64bit: p.debug_inject(interest, childprocess=False) else: p.debug_inject(interest, childprocess=False) p.resume() p.close() return p.pid
def run(self): """Run handler. @return: operation status. """ data = "" response = "OK" wait = False proc = None # Read the data submitted to the Pipe Server. while True: bytes_read = c_int(0) buf = create_string_buffer(BUFSIZE) success = KERNEL32.ReadFile(self.h_pipe, buf, sizeof(buf), byref(bytes_read), None) data += buf.value if not success and KERNEL32.GetLastError() == ERROR_MORE_DATA: continue #elif not success or bytes_read.value == 0: # if KERNEL32.GetLastError() == ERROR_BROKEN_PIPE: # pass break if data: command = data.strip() # Parse the prefix for the received notification. # In case of GETPIDS we're gonna return the current process ID # and the process ID of our parent process (agent.py). if command == "GETPIDS": response = struct.pack("II", PID, PPID) # When analyzing we don't want to hook all functions, as we're # having some stability issues with regards to webbrowsers. elif command == "HOOKDLLS": is_url = Config(cfg="analysis.conf").category != "file" url_dlls = "ntdll", "kernel32" def hookdll_encode(names): # We have to encode each dll name as unicode string # with length 16. names = [ name + "\x00" * (16 - len(name)) for name in names ] f = lambda s: "".join(ch + "\x00" for ch in s) return "".join(f(name) for name in names) # If this sample is not a URL, then we don't want to limit # any API hooks (at least for now), so we write a null-byte # which indicates that all DLLs should be hooked. if not is_url: response = "\x00" else: response = hookdll_encode(url_dlls) # In case of PID, the client is trying to notify the creation of # a new process to be injected and monitored. elif command.startswith("PROCESS:"): # We acquire the process lock in order to prevent the analyzer # to terminate the analysis while we are operating on the new # process. PROCESS_LOCK.acquire() # Set the current DLL to the default one provided # at submission. dll = DEFAULT_DLL # We parse the process ID. data = command[8:] process_id = thread_id = None if not "," in data: if data.isdigit(): process_id = int(data) elif len(data.split(",")) == 2: process_id, param = data.split(",") thread_id = None if process_id.isdigit(): process_id = int(process_id) else: process_id = None if param.isdigit(): thread_id = int(param) else: # XXX: Expect a new DLL as a message parameter? if isinstance(param, str): dll = param if process_id: if process_id not in (PID, PPID): # We inject the process only if it's not being # monitored already, otherwise we would generated # polluted logs. if process_id not in PROCESS_LIST: # Open the process and inject the DLL. # Hope it enjoys it. proc = Process(pid=process_id, thread_id=thread_id) filepath = proc.get_filepath() filename = os.path.basename(filepath) log.info("Announced process name: %s", filename) if not protected_filename(filename): # Add the new process ID to the list of # monitored processes. add_pids(process_id) # If we have both pid and tid, then we can use # apc to inject if process_id and thread_id: proc.inject(dll, apc=True) else: # we inject using CreateRemoteThread, this # needs the waiting in order to make sure # no race conditions occur proc.inject(dll) wait = True log.info( "Successfully injected process with " "pid %s", proc.pid) else: log.warning("Received request to inject Cuckoo " "processes, skip") # Once we're done operating on the processes list, we release # the lock. PROCESS_LOCK.release() # In case of FILE_NEW, the client is trying to notify the creation # of a new file. elif command.startswith("FILE_NEW:"): # We extract the file path. file_path = command[9:].decode("utf-8") # We add the file to the list. add_file(file_path) # In case of FILE_DEL, the client is trying to notify an ongoing # deletion of an existing file, therefore we need to dump it # straight away. elif command.startswith("FILE_DEL:"): # Extract the file path. file_path = command[9:].decode("utf-8") # Dump the file straight away. del_file(file_path) elif command.startswith("FILE_MOVE:"): # syntax = FILE_MOVE:old_file_path::new_file_path if "::" in command[10:]: old_fname, new_fname = command[10:].split("::", 1) move_file(old_fname.decode("utf-8"), new_fname.decode("utf-8")) KERNEL32.WriteFile(self.h_pipe, create_string_buffer(response), len(response), byref(bytes_read), None) KERNEL32.CloseHandle(self.h_pipe) # We wait until cuckoomon reports back. if wait: proc.wait() if proc: proc.close() return True
def run(self): """Run analysis. @return: operation status. """ self.prepare() log.info("Starting analyzer from: %s", os.getcwd()) log.info("Storing results at: %s", PATHS["root"]) log.info("Pipe server name: %s", PIPE) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.info("No analysis package specified, trying to detect " "it automagically") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract's subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. pack = package_class(self.get_options()) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules( auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning( "Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled = [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module() aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", aux.__class__.__name__) continue except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", aux.__class__.__name__, e) continue finally: log.info("Started auxiliary module %s", aux.__class__.__name__) aux_enabled.append(aux) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. try: pids = pack.start(self.target) except NotImplementedError: raise CuckooError("The package \"{0}\" doesn't contain a run " "function.".format(package_name)) except CuckooPackageError as e: raise CuckooError("The package \"{0}\" start function raised an " "error: {1}".format(package_name, e)) except Exception as e: raise CuckooError("The package \"{0}\" start function encountered " "an unhandled exception: " "{1}".format(package_name, e)) # If the analysis package returned a list of process IDs, we add them # to the list of monitored processes and enable the process monitor. if pids: add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout") pid_check = False time_counter = 0 while True: time_counter += 1 if time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we cannot # proceed with the checks until the lock is released. if PROCESS_LOCK.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: for pid in PROCESS_LIST: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if len(PROCESS_LIST) == 0: log.info("Process list is empty, " "terminating analysis...") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. pack.set_pids(PROCESS_LIST) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not pack.check(): log.info("The analysis package requested the " "termination of the analysis...") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning( "The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. pack.finish() except Exception as e: log.warning( "The package \"%s\" finish function raised an " "exception: %s", package_name, e) # Terminate the Auxiliary modules. for aux in aux_enabled: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown...") for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: proc.terminate() except: continue # Let's invoke the completion procedure. self.complete() return True
def run(self): """Run analysis. @return: operation status. """ self.prepare() self.path = os.getcwd() log.debug("Starting analyzer from: %s", self.path) log.debug("Pipe server name: %s", self.config.pipe) log.debug("Log pipe server name: %s", self.config.logpipe) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name, self.config.pe_exports.split(",")) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. self.package = package_class(self.config.options, analyzer=self) # Move the sample to the current working directory as provided by the # task - one is able to override the starting path of the sample. # E.g., for some samples it might be useful to run from %APPDATA% # instead of %TEMP%. if self.config.category == "file": self.target = self.package.move_curdir(self.target) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules( auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning( "Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module(options=self.config.options, analyzer=self) aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", module.__name__) except CuckooDisableModule: continue except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", module.__name__, e) else: log.debug("Started auxiliary module %s", module.__name__) aux_enabled.append(aux) ########################################################### #new code 2017_05_19 shutil.copyfile(self.target, 'C:\\dbg\\sample.exe') with open('log.txt', 'w') as creation: creation.write('log start') with open(self.target, 'rb') as sample: s = sample.read(2) if s != 'MZ': is32bit = False else: sample.seek(60) s = sample.read(4) header_offset = struct.unpack("<L", s)[0] sample.seek(header_offset + 4) s = sample.read(2) machine = struct.unpack('<H', s)[0] is32bit = (machine == 332) if is32bit: self.target = 'C:\\dbg\\Helper32.exe' else: self.target = 'C:\\dbg\\Helper64.exe' try: proc = Popen(self.target) pids = proc.pid except Exception as e: log.error('custom : fail to open process %s : %s', self.target, e) ########################################################### #origin # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. # try: # pids = self.package.start(self.target) # except NotImplementedError: # raise CuckooError( # "The package \"%s\" doesn't contain a run function." % # package_name # ) # except CuckooPackageError as e: # raise CuckooError( # "The package \"%s\" start function raised an error: %s" % # (package_name, e) # ) # except Exception as e: # raise CuckooError( # "The package \"%s\" start function encountered an unhandled " # "exception: %s" % (package_name, e) # ) ########################################################### # If the analysis package returned a list of process identifiers, we # add them to the list of monitored processes and enable the process monitor. if pids: self.process_list.add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False while self.do_run: self.time_counter += 1 if self.time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we # cannot proceed with the checks until the lock is released. if self.process_lock.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: for pid in self.process_list.pids: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) self.process_list.remove_pid(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if not self.process_list.pids: log.info("Process list is empty, " "terminating analysis.") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. self.package.set_pids(self.process_list.pids) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not self.package.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning( "The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) if not self.do_run: log.debug("The analyzer has been stopped on request by an " "auxiliary module.") # Create the shutdown mutex. KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. self.package.finish() except Exception as e: log.warning( "The package \"%s\" finish function raised an " "exception: %s", package_name, e) try: # Upload files the package created to package_files in the # results folder. for path, name in self.package.package_files() or []: upload_to_host(path, os.path.join("package_files", name)) except Exception as e: log.warning( "The package \"%s\" package_files function raised an " "exception: %s", package_name, e) # Terminate the Auxiliary modules. for aux in aux_enabled: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. log.info("Terminating remaining processes before shutdown.") for pid in self.process_list.pids: proc = Process(pid=pid) if proc.is_alive(): try: proc.terminate() except: continue ############################################################################### #new code time.sleep(3) with open('C:\\dbg\\log.txt') as f_log: raw = f_log.read() data = ''.join(raw.split('\x00')) log.debug('logged : \n%s', data) ############################################################################### # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning( "Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. self.complete() return True
def run(self): """Run handler. @return: operation status. """ global MONITORED_SERVICES global LASTINJECT_TIME data = "" response = "OK" # Read the data submitted to the Pipe Server. while True: bytes_read = c_int(0) buf = create_string_buffer(BUFSIZE) success = KERNEL32.ReadFile(self.h_pipe, buf, sizeof(buf), byref(bytes_read), None) data += buf.value if not success and KERNEL32.GetLastError() == ERROR_MORE_DATA: continue # elif not success or bytes_read.value == 0: # if KERNEL32.GetLastError() == ERROR_BROKEN_PIPE: # pass break if data: command = data.strip() # Debug, Regular, Warning, or Critical information from CuckooMon. if command.startswith("DEBUG:"): log.debug(command[6:]) elif command.startswith("INFO:"): log.info(command[5:]) elif command.startswith("WARNING:"): log.warning(command[8:]) elif command.startswith("CRITICAL:"): log.critical(command[9:]) # Parse the prefix for the received notification. # In case of GETPIDS we're gonna return the current process ID # and the process ID of our parent process (agent.py). elif command == "GETPIDS": response = struct.pack("II", PID, PPID) # When analyzing we don't want to hook all functions, as we're # having some stability issues with regards to webbrowsers. elif command == "HOOKDLLS": is_url = Config(cfg="analysis.conf").category != "file" url_dlls = "ntdll", "kernel32" def hookdll_encode(names): # We have to encode each dll name as unicode string # with length 16. names = [name + "\x00" * (16-len(name)) for name in names] f = lambda s: "".join(ch + "\x00" for ch in s) return "".join(f(name) for name in names) # If this sample is not a URL, then we don't want to limit # any API hooks (at least for now), so we write a null-byte # which indicates that all DLLs should be hooked. if not is_url: response = "\x00" else: response = hookdll_encode(url_dlls) # remove pid from process list because we received a notification # from kernel land elif command.startswith("KTERMINATE:"): data = command[11:] process_id = int(data) if process_id: if process_id in PROCESS_LIST: remove_pid(process_id) # same than below but we don't want to inject any DLLs because # it's a kernel analysis elif command.startswith("KPROCESS:"): PROCESS_LOCK.acquire() data = command[9:] process_id = int(data) thread_id = None if process_id: if process_id not in (PID, PPID): if process_id not in PROCESS_LIST: proc = Process(pid=process_id,thread_id=thread_id) filepath = proc.get_filepath() filename = os.path.basename(filepath) if not protected_filename(filename): add_pid(process_id) log.info("Announce process name : %s", filename) PROCESS_LOCK.release() elif command.startswith("KERROR:"): error_msg = command[7:] log.error("Error : %s", str(error_msg)) # if a new driver has been loaded, we stop the analysis elif command == "KSUBVERT": for pid in PROCESS_LIST: log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) # Handle case of a service being started by a monitored process # Switch the service type to own process behind its back so we # can monitor the service more easily with less noise elif command.startswith("SERVICE:"): servname = command[8:] si = subprocess.STARTUPINFO() si.dwFlags = subprocess.STARTF_USESHOWWINDOW si.wShowWindow = subprocess.SW_HIDE subprocess.call("sc config " + servname + " type= own", startupinfo=si) log.info("Announced starting service \"%s\"", servname) if not MONITORED_SERVICES: # Inject into services.exe so we can monitor service creation # if tasklist previously failed to get the services.exe PID we'll be # unable to inject if SERVICES_PID: servproc = Process(pid=SERVICES_PID,suspended=False) filepath = servproc.get_filepath() servproc.inject(dll=DEFAULT_DLL, interest=filepath, nosleepskip=True) LASTINJECT_TIME = datetime.now() servproc.close() KERNEL32.Sleep(1000) MONITORED_SERVICES = True else: log.error('Unable to monitor service %s' % (servname)) # For now all we care about is bumping up our LASTINJECT_TIME to account for long delays between # injection and actual resume time where the DLL would have a chance to load in the new process # and report back to have its pid added to the list of monitored processes elif command.startswith("RESUME:"): LASTINJECT_TIME = datetime.now() # Handle case of malware terminating a process -- notify the target # ahead of time so that it can flush its log buffer elif command.startswith("KILL:"): PROCESS_LOCK.acquire() process_id = int(command[5:]) if process_id not in (PID, PPID) and process_id in PROCESS_LIST: # only notify processes we've hooked event_name = TERMINATE_EVENT + str(process_id) event_handle = KERNEL32.OpenEventA(EVENT_MODIFY_STATE, False, event_name) if not event_handle: log.warning("Unable to open termination event for pid %u.", process_id) else: # make sure process is aware of the termination KERNEL32.SetEvent(event_handle) KERNEL32.CloseHandle(event_handle) PROCESS_LOCK.release() # Handle notification of cuckoomon loading in a process elif command.startswith("LOADED:"): PROCESS_LOCK.acquire() process_id = int(command[7:]) if process_id not in PROCESS_LIST: add_pids(process_id) PROCESS_LOCK.release() log.info("Cuckoomon successfully loaded in process with pid %u.", process_id) # In case of PID, the client is trying to notify the creation of # a new process to be injected and monitored. elif command.startswith("PROCESS:"): # We acquire the process lock in order to prevent the analyzer # to terminate the analysis while we are operating on the new # process. PROCESS_LOCK.acquire() # Set the current DLL to the default one provided # at submission. dll = DEFAULT_DLL suspended = False # We parse the process ID. data = command[8:] if len(data) > 2 and data[1] == ':': if data[0] == '1': suspended = True data = command[10:] process_id = thread_id = None if "," not in data: if data.isdigit(): process_id = int(data) elif data.count(",") == 1: process_id, param = data.split(",") thread_id = None if process_id.isdigit(): process_id = int(process_id) else: process_id = None if param.isdigit(): thread_id = int(param) if process_id: if process_id not in (PID, PPID): # We inject the process only if it's not being # monitored already, otherwise we would generate # polluted logs. if process_id not in PROCESS_LIST: # Open the process and inject the DLL. # Hope it enjoys it. proc = Process(pid=process_id, thread_id=thread_id, suspended=suspended) filepath = proc.get_filepath() is_64bit = proc.is_64bit() filename = os.path.basename(filepath) log.info("Announced %s process name: %s pid: %d", "64-bit" if is_64bit else "32-bit", filename, process_id) if not in_protected_path(filename): res = proc.inject(dll, filepath) LASTINJECT_TIME = datetime.now() proc.close() else: log.warning("Received request to inject Cuckoo " "process with pid %d, skip", process_id) # Once we're done operating on the processes list, we release # the lock. PROCESS_LOCK.release() # In case of FILE_NEW, the client is trying to notify the creation # of a new file. elif command.startswith("FILE_NEW:"): # We extract the file path. file_path = unicode(command[9:].decode("utf-8")) # We add the file to the list. add_file(file_path) # In case of FILE_DEL, the client is trying to notify an ongoing # deletion of an existing file, therefore we need to dump it # straight away. elif command.startswith("FILE_DEL:"): # Extract the file path. file_path = unicode(command[9:].decode("utf-8")) # Dump the file straight away. del_file(file_path) elif command.startswith("FILE_MOVE:"): # Syntax = "FILE_MOVE:old_file_path::new_file_path". if "::" in command[10:]: old_fname, new_fname = command[10:].split("::", 1) move_file(unicode(old_fname.decode("utf-8")), unicode(new_fname.decode("utf-8"))) else: log.warning("Received unknown command from cuckoomon: %s", command) KERNEL32.WriteFile(self.h_pipe, create_string_buffer(response), len(response), byref(bytes_read), None) KERNEL32.CloseHandle(self.h_pipe) return True
def run(self): """Run analysis. @return: operation status. """ self.prepare() self.path = os.getcwd() log.debug("Starting analyzer from: %s", self.path) log.debug("Pipe server name: %s", self.config.pipe) log.debug("Log pipe server name: %s", self.config.logpipe) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name, self.config.pe_exports.split(",")) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. self.package = package_class(self.config.options, analyzer=self) # Move the sample to the current working directory as provided by the # task - one is able to override the starting path of the sample. # E.g., for some samples it might be useful to run from %APPDATA% # instead of %TEMP%. if self.config.category == "file": self.target = self.package.move_curdir(self.target) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules( auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning( "Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module(options=self.config.options, analyzer=self) aux_avail.append(aux) aux.init() aux.start() except (NotImplementedError, AttributeError): log.exception("Auxiliary module %s was not implemented", module.__name__) except CuckooDisableModule: continue except Exception as e: log.exception("Cannot execute auxiliary module %s: %s", module.__name__, e) else: log.debug("Started auxiliary module %s", module.__name__) aux_enabled.append(aux) # Inform zer0m0n of the ResultServer address. zer0m0n.resultserver(self.config.ip, self.config.port) # Forward the command pipe and logpipe names on to zer0m0n. zer0m0n.cmdpipe(self.config.pipe) zer0m0n.channel(self.config.logpipe) # Hide the Cuckoo Analyzer & Cuckoo Agent. zer0m0n.hidepid(self.pid) zer0m0n.hidepid(self.ppid) # Initialize zer0m0n with our compiled Yara rules. zer0m0n.yarald("bin/rules.yarac") # Propagate the requested dump interval, if set. zer0m0n.dumpint(int(self.config.options.get("dumpint", "0"))) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. pids = self.package.start(self.target) # If the analysis package returned a list of process identifiers, we # add them to the list of monitored processes and enable the process monitor. if pids: self.process_list.add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False while self.do_run: self.time_counter += 1 if self.time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we # cannot proceed with the checks until the lock is released. if self.process_lock.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: # We also track the PIDs provided by zer0m0n. self.process_list.add_pids(zer0m0n.getpids()) for pid in self.process_list.pids: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) self.process_list.remove_pid(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if not self.process_list.pids: log.info("Process list is empty, " "terminating analysis.") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. self.package.set_pids(self.process_list.pids) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not self.package.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning( "The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) if not self.do_run: log.debug("The analyzer has been stopped on request by an " "auxiliary module.") # Create the shutdown mutex. KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. self.package.finish() except Exception as e: log.warning( "The package \"%s\" finish function raised an " "exception: %s", package_name, e) try: # Upload files the package created to package_files in the # results folder. for path, name in self.package.package_files() or []: upload_to_host(path, os.path.join("package_files", name)) except Exception as e: log.warning( "The package \"%s\" package_files function raised an " "exception: %s", package_name, e) # Terminate the Auxiliary modules. for aux in aux_enabled: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. log.info("Terminating remaining processes before shutdown.") for pid in self.process_list.pids: proc = Process(pid=pid) if proc.is_alive(): try: proc.terminate() except: continue # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning( "Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Dump all the notified files. self.files.dump_files() # Hell yeah. log.info("Analysis completed.") return True
def _inject_process(self, process_id, thread_id, mode): """Helper function for injecting the monitor into a process.""" # We acquire the process lock in order to prevent the analyzer to # terminate the analysis while we are operating on the new process. self.analyzer.process_lock.acquire() # Set the current DLL to the default one provided at submission. dll = self.analyzer.default_dll if process_id in (self.analyzer.pid, self.analyzer.ppid): if process_id not in self.ignore_list["pid"]: log.warning("Received request to inject Cuckoo processes, " "skipping it.") self.ignore_list["pid"].append(process_id) self.analyzer.process_lock.release() return # We inject the process only if it's not being monitored already, # otherwise we would generated polluted logs (if it wouldn't crash # horribly to start with). if self.analyzer.process_list.has_pid(process_id): # This pid is already on the notrack list, move it to the # list of tracked pids. if not self.analyzer.process_list.has_pid(process_id, notrack=False): log.debug("Received request to inject pid=%d. It was already " "on our notrack list, moving it to the track list.") self.analyzer.process_list.remove_pid(process_id) self.analyzer.process_list.add_pid(process_id) self.ignore_list["pid"].append(process_id) # Spit out an error once and just ignore it further on. elif process_id not in self.ignore_list["pid"]: log.debug( "Received request to inject pid=%d, but we are " "already injected there.", process_id) self.ignore_list["pid"].append(process_id) # We're done operating on the processes list, release the lock. self.analyzer.process_lock.release() return # Open the process and inject the DLL. Hope it enjoys it. proc = Process(pid=process_id, tid=thread_id) filename = os.path.basename(proc.get_filepath()) if not self.analyzer.files.is_protected_filename(filename): # Add the new process ID to the list of monitored processes. self.analyzer.process_list.add_pid(process_id) # We're done operating on the processes list, # release the lock. Let the injection do its thing. self.analyzer.process_lock.release() # If we have both pid and tid, then we can use APC to inject. if process_id and thread_id: proc.inject(dll, apc=True, mode="%s" % mode) else: proc.inject(dll, apc=False, mode="%s" % mode) log.info("Injected into process with pid %s and name %r", proc.pid, filename)
def start(self): Process(process_name="lsass.exe").inject(track=False, mode="dumptls")
def run(self): """Run analysis. @return: operation status. """ self.prepare() if not self.config.package: log.info( "No analysis package specified, trying to detect it automagically" ) package = choose_package(self.config.file_type) if not package: raise CuckooError( "No valid package available for file type: %s" % self.config.file_type) else: log.info("Automatically selected analysis package \"%s\"" % package) else: package = self.config.package package_name = "packages.%s" % package try: __import__(package_name, globals(), locals(), ["dummy"], -1) except ImportError: raise CuckooError( "Unable to import package \"%s\", does not exist." % package_name) Package() try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError( "Unable to select package class (package=%s): %s" % (package_name, e)) pack = package_class(self.get_options()) timer = Timer(self.config.timeout, self.stop) timer.start() shots = Screenshots() shots.start() try: pids = pack.start(self.file_path) except NotImplementedError: raise CuckooError( "The package \"%s\" doesn't contain a run function." % package_name) add_pids(pids) while self.do_run: PROCESS_LOCK.acquire() try: for pid in PROCESS_LIST: if not Process(pid=pid).is_alive(): log.info("Process with pid %d has terminated" % pid) PROCESS_LIST.remove(pid) if len(PROCESS_LIST) == 0: timer.cancel() break try: if not pack.check(): timer.cancel() break except NotImplementedError: pass finally: PROCESS_LOCK.release() KERNEL32.Sleep(1000) try: pack.finish() except NotImplementedError: pass shots.stop() self.complete() return True
def run(self): """Run handler. @return: operation status. """ data = "" response = "OK" wait = False proc = None # Read the data submitted to the Pipe Server. while True: bytes_read = c_int(0) buf = create_string_buffer(BUFSIZE) success = KERNEL32.ReadFile(self.h_pipe, buf, sizeof(buf), byref(bytes_read), None) data += buf.value if not success and KERNEL32.GetLastError() == ERROR_MORE_DATA: continue #elif not success or bytes_read.value == 0: # if KERNEL32.GetLastError() == ERROR_BROKEN_PIPE: # pass break if data: command = data.strip() # Parse the prefix for the received notification. # In case of GETPIDS we're gonna return the current process ID # and the process ID of our parent process (agent.py). if command == "GETPIDS": response = struct.pack("II", PID, PPID) # In case of PID, the client is trying to notify the creation of # a new process to be injected and monitored. elif command.startswith("PROCESS:"): # We acquire the process lock in order to prevent the analyzer # to terminate the analysis while we are operating on the new # process. PROCESS_LOCK.acquire() # We parse the process ID. data = command[8:] process_id = thread_id = None if not "," in data: if data.isdigit(): process_id = int(data) elif len(data.split(",")) == 2: process_id, thread_id = data.split(",") if process_id.isdigit(): process_id = int(process_id) else: process_id = None if thread_id.isdigit(): thread_id = int(thread_id) else: thread_id = None if process_id: if process_id not in (PID, PPID): # We inject the process only if it's not being monitored # already, otherwise we would generated polluted logs. if process_id not in PROCESS_LIST: # Open the process and inject the DLL. # Hope it enjoys it. proc = Process(pid=process_id, thread_id=thread_id) filepath = proc.get_filepath() filename = os.path.basename(filepath) log.info("Announced process name: %s", filename) if not protected_filename(filename): # Add the new process ID to the list of monitored # processes. add_pids(process_id) # If we have both pid and tid, then we can use # apc to inject if process_id and thread_id: proc.inject(apc=True) else: # we inject using CreateRemoteThread, this # needs the waiting in order to make sure no # race conditions occur proc.inject() wait = True log.info("Successfully injected process with pid %s", proc.pid) else: log.warning("Received request to inject Cuckoo processes, skip") # Once we're done operating on the processes list, we release # the lock. PROCESS_LOCK.release() # In case of FILE_NEW, the client is trying to notify the creation # of a new file. elif command.startswith("FILE_NEW:"): # We extract the file path. file_path = command[9:].decode("utf-8") # We add the file to the list. add_file(file_path) # In case of FILE_DEL, the client is trying to notify an ongoing # deletion of an existing file, therefore we need to dump it # straight away. elif command.startswith("FILE_DEL:"): # Extract the file path. file_path = command[9:].decode("utf-8") # Dump the file straight away. del_file(file_path) elif command.startswith("FILE_MOVE:"): # syntax = FILE_MOVE:old_file_path::new_file_path if "::" in command[10:]: old_fname, new_fname = command[10:].split("::", 1) move_file(old_fname.decode("utf-8"), new_fname.decode("utf-8")) KERNEL32.WriteFile(self.h_pipe, create_string_buffer(response), len(response), byref(bytes_read), None) KERNEL32.CloseHandle(self.h_pipe) # We wait until cuckoomon reports back. if wait: proc.wait() if proc: proc.close() return True
def run(self): """Run handler. @return: operation status. """ data = "" wait = False proc = None # Read the data submitted to the Pipe Server. while True: while True: try: PipeHandler.read_lock.acquire() data = self.h_pipe.readline() PipeHandler.read_lock.release() break except IOError: log.error( "Unable to open process communication pipe, retrying.") if data == '': break if data: #one line = one logging command c = [data] for command in c: if not command.endswith( '\n'): #if we have read a partial line log.info("Saving a part of a log") self.part = command #save it for later continue if self.part != '': # append any pieces to the end log.info("Using a part of a log") command = self.part + command self.part = '' if command.startswith("FILE_ACTIVITY:"): self.writeToLogFile( os.path.join(PATHS["logs"], self.F_LOGFILE), command[14:len(command)]) elif command.startswith("FILE_CREATE:"): self.writeToLogFile( os.path.join(PATHS["logs"], self.C_LOGFILE), command[12:len(command)]) elif command.startswith("FILE_DELETE:"): self.writeToLogFile( os.path.join(PATHS["logs"], self.D_LOGFILE), command[12:len(command)]) elif command.startswith("FILE_WRITE:"): self.writeToLogFile( os.path.join(PATHS["logs"], self.W_LOGFILE), command[11:len(command)]) elif command.startswith("PROCESS:"): process_id = int(command[8:len(command)]) if process_id not in PROCESS_LIST: if psutil.pid_exists(process_id): h_p = psutil.Process(process_id) proc = Process(pid=process_id, h_process=h_p, thread_id=None) filename = proc.get_filepath() log.info( "Announced new process name: %s with pid %d", filename, process_id) if not filename in PROTECTED_LIST: proc.start_trace() add_pids(process_id) elif command.startswith("EXEC:"): log.info(command) else: log.error("Invalid pipe command: %s", command) continue #break # We wait until the injected library reports back. if wait: proc.wait() if proc: proc.close() self.done = True return True
def start(self, path): try: sc = self.get_path("sc.exe") servicename = self.options.get("servicename", "CAPEService") servicedesc = self.options.get("servicedesc", "CAPE Service") arguments = self.options.get("arguments") if "." not in os.path.basename(path): new_path = path + ".exe" os.rename(path, new_path) path = new_path binPath = '"{0}"'.format(path) if arguments: binPath += " {0}".format(arguments) scm_handle = ADVAPI32.OpenSCManagerA(None, None, SC_MANAGER_ALL_ACCESS) if scm_handle == 0: log.info("Failed to open SCManager") log.info(ctypes.FormatError()) return service_handle = ADVAPI32.CreateServiceA( scm_handle, servicename, servicedesc, SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS, SERVICE_DEMAND_START, SERVICE_ERROR_IGNORE, binPath, None, None, None, None, None, ) if service_handle == 0: log.info("Failed to create service") log.info(ctypes.FormatError()) return log.info("Created service (handle: 0x%x)", service_handle) servproc = Process(options=self.options, config=self.config, pid=self.config.services_pid, suspended=False) filepath = servproc.get_filepath() is_64bit = servproc.is_64bit() if is_64bit: servproc.inject(injectmode=INJECT_QUEUEUSERAPC, interest=filepath, nosleepskip=True) else: servproc.inject(injectmode=INJECT_QUEUEUSERAPC, interest=filepath, nosleepskip=True) servproc.close() KERNEL32.Sleep(500) service_launched = ADVAPI32.StartServiceA(service_handle, 0, None) if service_launched == True: log.info("Successfully started service") else: log.info(ctypes.FormatError()) log.info("Failed to start service") ADVAPI32.CloseServiceHandle(service_handle) ADVAPI32.CloseServiceHandle(scm_handle) return except Exception as e: log.info(sys.exc_info()[0]) log.info(e) log.info(e.__dict__) log.info(e.__class__) log.exception(e)
def run(self): """Run handler. @return: operation status. """ global MONITORED_SERVICES global MONITORED_WMI global MONITORED_DCOM global MONITORED_TASKSCHED global MONITORED_BITS global LASTINJECT_TIME global NUM_INJECTED try: data = "" response = "OK" # Read the data submitted to the Pipe Server. while True: bytes_read = c_int(0) buf = create_string_buffer(BUFSIZE) success = KERNEL32.ReadFile(self.h_pipe, buf, sizeof(buf), byref(bytes_read), None) data += buf.value if not success and KERNEL32.GetLastError() == ERROR_MORE_DATA: continue # elif not success or bytes_read.value == 0: # if KERNEL32.GetLastError() == ERROR_BROKEN_PIPE: # pass break if data: command = data.strip() # Debug, Regular, Warning, or Critical information from CuckooMon. if command.startswith("DEBUG:"): log.debug(command[6:]) elif command.startswith("INFO:"): log.info(command[5:]) elif command.startswith("WARNING:"): log.warning(command[8:]) elif command.startswith("CRITICAL:"): log.critical(command[9:]) # Parse the prefix for the received notification. # In case of GETPIDS we're gonna return the current process ID # and the process ID of our parent process (agent.py). elif command == "GETPIDS": hidepids = set() hidepids.update(HIDE_PIDS) hidepids.update([PID, PPID]) response = struct.pack("%dI" % len(hidepids), *hidepids) # remove pid from process list because we received a notification # from kernel land elif command.startswith("KTERMINATE:"): data = command[11:] process_id = int(data) if process_id: if process_id in PROCESS_LIST: remove_pid(process_id) # same than below but we don't want to inject any DLLs because # it's a kernel analysis elif command.startswith("KPROCESS:"): PROCESS_LOCK.acquire() data = command[9:] process_id = int(data) thread_id = None if process_id: if process_id not in (PID, PPID): if process_id not in PROCESS_LIST: proc = Process(pid=process_id,thread_id=thread_id) filepath = proc.get_filepath() filename = os.path.basename(filepath) if not in_protected_path(filename): add_pid(process_id) log.info("Announce process name : %s", filename) PROCESS_LOCK.release() elif command.startswith("KERROR:"): error_msg = command[7:] log.error("Error : %s", str(error_msg)) # if a new driver has been loaded, we stop the analysis elif command == "KSUBVERT": for pid in PROCESS_LIST: log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) elif command.startswith("INTEROP:"): if not MONITORED_DCOM: MONITORED_DCOM = True dcom_pid = pid_from_service_name("DcomLaunch") if dcom_pid: servproc = Process(pid=dcom_pid,suspended=False) servproc.set_critical() filepath = servproc.get_filepath() servproc.inject(dll=DEFAULT_DLL, interest=filepath, nosleepskip=True) LASTINJECT_TIME = datetime.now() servproc.close() KERNEL32.Sleep(2000) elif command.startswith("WMI:"): if not MONITORED_WMI: MONITORED_WMI = True si = subprocess.STARTUPINFO() # STARTF_USESHOWWINDOW si.dwFlags = 1 # SW_HIDE si.wShowWindow = 0 log.info("Stopping WMI Service") subprocess.call(['net', 'stop', 'winmgmt', '/y'], startupinfo=si) log.info("Stopped WMI Service") subprocess.call("sc config winmgmt type= own", startupinfo=si) if not MONITORED_DCOM: MONITORED_DCOM = True dcom_pid = pid_from_service_name("DcomLaunch") if dcom_pid: servproc = Process(pid=dcom_pid,suspended=False) servproc.set_critical() filepath = servproc.get_filepath() servproc.inject(dll=DEFAULT_DLL, interest=filepath, nosleepskip=True) LASTINJECT_TIME = datetime.now() servproc.close() KERNEL32.Sleep(2000) log.info("Starting WMI Service") subprocess.call("net start winmgmt", startupinfo=si) log.info("Started WMI Service") wmi_pid = pid_from_service_name("winmgmt") if wmi_pid: servproc = Process(pid=wmi_pid,suspended=False) servproc.set_critical() filepath = servproc.get_filepath() servproc.inject(dll=DEFAULT_DLL, interest=filepath, nosleepskip=True) LASTINJECT_TIME = datetime.now() servproc.close() KERNEL32.Sleep(2000) elif command.startswith("TASKSCHED:"): if not MONITORED_TASKSCHED: MONITORED_TASKSCHED = True si = subprocess.STARTUPINFO() # STARTF_USESHOWWINDOW si.dwFlags = 1 # SW_HIDE si.wShowWindow = 0 log.info("Stopping Task Scheduler Service") subprocess.call(['net', 'stop', 'schedule', '/y'], startupinfo=si) log.info("Stopped Task Scheduler Service") subprocess.call("sc config schedule type= own", startupinfo=si) log.info("Starting Task Scheduler Service") subprocess.call("net start schedule", startupinfo=si) log.info("Started Task Scheduler Service") sched_pid = pid_from_service_name("schedule") if sched_pid: servproc = Process(pid=sched_pid,suspended=False) servproc.set_critical() filepath = servproc.get_filepath() servproc.inject(dll=DEFAULT_DLL, interest=filepath, nosleepskip=True) LASTINJECT_TIME = datetime.now() servproc.close() KERNEL32.Sleep(2000) elif command.startswith("BITS:"): if not MONITORED_BITS: MONITORED_BITS = True si = subprocess.STARTUPINFO() # STARTF_USESHOWWINDOW si.dwFlags = 1 # SW_HIDE si.wShowWindow = 0 log.info("Stopping BITS Service") subprocess.call(['net', 'stop', 'BITS', '/y'], startupinfo=si) log.info("Stopped BITS Service") subprocess.call("sc config BITS type= own", startupinfo=si) if not MONITORED_DCOM: MONITORED_DCOM = True dcom_pid = pid_from_service_name("DcomLaunch") if dcom_pid: servproc = Process(pid=dcom_pid,suspended=False) servproc.set_critical() filepath = servproc.get_filepath() servproc.inject(dll=DEFAULT_DLL, interest=filepath, nosleepskip=True) LASTINJECT_TIME = datetime.now() servproc.close() KERNEL32.Sleep(2000) log.info("Starting BITS Service") subprocess.call("net start BITS", startupinfo=si) log.info("Started BITS Service") bits_pid = pid_from_service_name("BITS") if bits_pid: servproc = Process(pid=bits_pid,suspended=False) servproc.set_critical() filepath = servproc.get_filepath() servproc.inject(dll=DEFAULT_DLL, interest=filepath, nosleepskip=True) LASTINJECT_TIME = datetime.now() servproc.close() KERNEL32.Sleep(2000) # Handle case of a service being started by a monitored process # Switch the service type to own process behind its back so we # can monitor the service more easily with less noise elif command.startswith("SERVICE:"): servname = command[8:] si = subprocess.STARTUPINFO() # STARTF_USESHOWWINDOW si.dwFlags = 1 # SW_HIDE si.wShowWindow = 0 subprocess.call("sc config " + servname + " type= own", startupinfo=si) log.info("Announced starting service \"%s\"", servname) if not MONITORED_SERVICES: # Inject into services.exe so we can monitor service creation # if tasklist previously failed to get the services.exe PID we'll be # unable to inject if SERVICES_PID: servproc = Process(pid=SERVICES_PID,suspended=False) servproc.set_critical() filepath = servproc.get_filepath() servproc.inject(dll=DEFAULT_DLL, interest=filepath, nosleepskip=True) LASTINJECT_TIME = datetime.now() servproc.close() KERNEL32.Sleep(1000) MONITORED_SERVICES = True else: log.error('Unable to monitor service %s' % (servname)) # For now all we care about is bumping up our LASTINJECT_TIME to account for long delays between # injection and actual resume time where the DLL would have a chance to load in the new process # and report back to have its pid added to the list of monitored processes elif command.startswith("RESUME:"): LASTINJECT_TIME = datetime.now() # Handle attempted shutdowns/restarts -- flush logs for all monitored processes # additional handling can be added later elif command.startswith("SHUTDOWN:"): log.info("Received shutdown request") PROCESS_LOCK.acquire() for process_id in PROCESS_LIST: event_name = TERMINATE_EVENT + str(process_id) event_handle = KERNEL32.OpenEventA(EVENT_MODIFY_STATE, False, event_name) if event_handle: KERNEL32.SetEvent(event_handle) KERNEL32.CloseHandle(event_handle) if self.options.get("procmemdump"): p = Process(pid=process_id) p.dump_memory() dump_files() PROCESS_LOCK.release() # Handle case of malware terminating a process -- notify the target # ahead of time so that it can flush its log buffer elif command.startswith("KILL:"): PROCESS_LOCK.acquire() process_id = int(command[5:]) if process_id not in (PID, PPID) and process_id in PROCESS_LIST: # only notify processes we've hooked event_name = TERMINATE_EVENT + str(process_id) event_handle = KERNEL32.OpenEventA(EVENT_MODIFY_STATE, False, event_name) if not event_handle: log.warning("Unable to open termination event for pid %u.", process_id) else: log.info("Notified of termination of process with pid %u.", process_id) # dump the memory of exiting processes if self.options.get("procmemdump"): p = Process(pid=process_id) p.dump_memory() # make sure process is aware of the termination KERNEL32.SetEvent(event_handle) KERNEL32.CloseHandle(event_handle) PROCESS_LOCK.release() # Handle notification of cuckoomon loading in a process elif command.startswith("LOADED:"): PROCESS_LOCK.acquire() process_id = int(command[7:]) if process_id not in PROCESS_LIST: add_pids(process_id) PROCESS_LOCK.release() NUM_INJECTED += 1 log.info("Cuckoomon successfully loaded in process with pid %u.", process_id) # In case of PID, the client is trying to notify the creation of # a new process to be injected and monitored. elif command.startswith("PROCESS:"): # We acquire the process lock in order to prevent the analyzer # to terminate the analysis while we are operating on the new # process. PROCESS_LOCK.acquire() # Set the current DLL to the default one provided # at submission. dll = DEFAULT_DLL suspended = False # We parse the process ID. data = command[8:] if len(data) > 2 and data[1] == ':': if data[0] == '1': suspended = True data = command[10:] process_id = thread_id = None if "," not in data: if data.isdigit(): process_id = int(data) elif data.count(",") == 1: process_id, param = data.split(",") thread_id = None if process_id.isdigit(): process_id = int(process_id) else: process_id = None if param.isdigit(): thread_id = int(param) if process_id: if process_id not in (PID, PPID): # We inject the process only if it's not being # monitored already, otherwise we would generate # polluted logs. if process_id not in PROCESS_LIST: # Open the process and inject the DLL. # Hope it enjoys it. proc = Process(pid=process_id, thread_id=thread_id, suspended=suspended) filepath = proc.get_filepath() # if it's a URL analysis, provide the URL to all processes as # the "interest" -- this will allow cuckoomon to see in the # child browser process that a URL analysis is occurring if self.config.category == "file" or NUM_INJECTED > 1: interest = filepath else: interest = self.config.target is_64bit = proc.is_64bit() filename = os.path.basename(filepath) if not in_protected_path(filename) and proc.check_inject(): log.info("Announced %s process name: %s pid: %d", "64-bit" if is_64bit else "32-bit", filename, process_id) proc.inject(dll, interest) LASTINJECT_TIME = datetime.now() proc.close() else: log.warning("Received request to inject Cuckoo " "process with pid %d, skip", process_id) # Once we're done operating on the processes list, we release # the lock. PROCESS_LOCK.release() # In case of FILE_NEW, the client is trying to notify the creation # of a new file. elif command.startswith("FILE_NEW:"): FILES_LIST_LOCK.acquire() # We extract the file path. file_path = unicode(command[9:].decode("utf-8")) # We add the file to the list. add_file(file_path) FILES_LIST_LOCK.release() # In case of FILE_DEL, the client is trying to notify an ongoing # deletion of an existing file, therefore we need to dump it # straight away. elif command.startswith("FILE_DEL:"): FILES_LIST_LOCK.acquire() # Extract the file path. file_path = unicode(command[9:].decode("utf-8")) # Dump the file straight away. del_file(file_path) FILES_LIST_LOCK.release() elif command.startswith("FILE_MOVE:"): FILES_LIST_LOCK.acquire() # Syntax = "FILE_MOVE:old_file_path::new_file_path". if "::" in command[10:]: old_fname, new_fname = command[10:].split("::", 1) move_file(unicode(old_fname.decode("utf-8")), unicode(new_fname.decode("utf-8"))) FILES_LIST_LOCK.release() else: log.warning("Received unknown command from cuckoomon: %s", command) KERNEL32.WriteFile(self.h_pipe, create_string_buffer(response), len(response), byref(bytes_read), None) KERNEL32.CloseHandle(self.h_pipe) return True except Exception as e: error_exc = traceback.format_exc() log.exception(error_exc) return True
def execute(self, path, args, mode=None, maximize=False, env=None, source=None, trigger=None): """Starts an executable for analysis. @param path: executable path @param args: executable arguments @param mode: monitor mode - which functions to instrument @param maximize: whether the GUI should start maximized @param env: additional environment variables @param source: parent process of our process @param trigger: trigger to indicate analysis start @return: process pid """ dll = self.options.get("dll") free = self.options.get("free") analysis = self.options.get("analysis") kernel_mode = self.options.get("kernelmode") kernel_pipe = self.options.get("kernel_logpipe", "\\\\.\\ThunderDefPipe") log_pipe = self.options.get("forwarderpipe") dispatcher_pipe = self.options.get("dispatcherpipe") driver_options = self.options.get("driver_options") package = type(self).__name__ # Kernel analysis overrides the free argument. if analysis == "kernel": free = True source = source or self.options.get("from") mode = mode or self.options.get("mode") if not trigger and self.options.get("trigger"): if self.options["trigger"] == "exefile": trigger = "file:%s" % path # Setup pre-defined registry keys. self.init_regkeys(self.REGKEYS) # check preloaded apps self._check_preloaded_apps() p = Process() if not p.execute(path=path, args=args, dll=dll, free=free, kernel_mode=kernel_mode, kernel_pipe=kernel_pipe, forwarder_pipe=log_pipe, dispatcher_pipe=dispatcher_pipe, destination=self.options.get("destination", ("localhost", 1)), curdir=self.curdir, source=source, mode=mode, maximize=maximize, env=env, trigger=trigger, driver_options=driver_options, package=package): raise CuckooPackageError( "Unable to execute the initial process, analysis aborted.") return p.pid
from lib.common.constants import PATHS from lib.common.exceptions import CuckooError, CuckooPackageError from lib.common.results import upload_to_host from lib.core.config import Config from lib.core.packages import choose_package_class from lib.core.startup import create_folders, init_logging from modules import auxiliary log = logging.getLogger() PID = os.getpid() FILES_LIST = set() DUMPED_LIST = set() PROCESS_LIST = set() SEEN_LIST = set() PPID = Process(pid=PID).get_parent_pid() def add_pids(pids): """Add PID.""" if not isinstance(pids, (tuple, list, set)): pids = [pids] for pid in pids: log.info("Added new process to list with pid: %s", pid) pid = int(pid) if pid not in SEEN_LIST: PROCESS_LIST.add(pid) SEEN_LIST.add(pid)
def start(self): Process(process_name="lsass.exe").inject(track=False) log.info("Injected lsass for dumping TLS master secrets!")
def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Storing results at: %s", PATHS["root"]) # If no analysis package was specified at submission, we try to select # one automatically. """ if not self.config.package: log.debug("No analysis package specified, trying to detect it automagically") package = "generic" if self.config.category == "file" else "wget" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError(f"No valid package available for file type: {self.config.file_type}") log.info('Automatically selected analysis package "%s"', package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = f"modules.packages.{package}" # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], 0) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError('Unable to import package "{package_name}", does not exist') # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError(f"Unable to select package class (package={package_name}): {e}") """ if self.config.package: suggestion = "ff" if self.config.package == "ie" else self.config.package elif self.config.category != "file": suggestion = "url" else: suggestion = None # Try to figure out what analysis package to use with this target kwargs = {"suggestion": suggestion} if self.config.category == "file": package_class = choose_package_class(self.config.file_type, self.config.file_name, **kwargs) else: package_class = choose_package_class(None, None, **kwargs) if not package_class: raise Exception("Could not find an appropriate analysis package") # Package initialization kwargs = { "options": self.config.options, "timeout": self.config.timeout } # Initialize the analysis package. # pack = package_class(self.config.get_options()) pack = package_class(self.target, **kwargs) # Initialize Auxiliary modules Auxiliary() prefix = f"{auxiliary.__name__}." for loader, name, ispkg in pkgutil.iter_modules( auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], 0) except ImportError as e: log.warning('Unable to import the auxiliary module "%s": %s', name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in sorted(Auxiliary.__subclasses__(), key=lambda x: x.priority, reverse=True): # Try to start the auxiliary module. try: aux = module() aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", aux.__class__.__name__) continue except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", aux.__class__.__name__, e) continue finally: log.debug("Started auxiliary module %s", aux.__class__.__name__) # aux_enabled.append(aux) if aux: aux_enabled.append(aux) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. try: # pids = pack.start(self.target) pids = pack.start() except NotImplementedError: raise CuckooError( f'The package "{package_class}" doesn\'t contain a run function' ) except CuckooPackageError as e: raise CuckooError( f'The package "{package_class}" start function raised an error: {e}' ) except Exception as e: raise CuckooError( f'The package "{package_class}" start function encountered an unhandled exception: {e}' ) # If the analysis package returned a list of process IDs, we add them # to the list of monitored processes and enable the process monitor. if pids: add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info( "No process IDs returned by the package, running for the full timeout" ) pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout") pid_check = False time_counter = 0 while True: time_counter += 1 if time_counter > int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis") break try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: for pid in list(PROCESS_LIST): if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) PROCESS_LIST.remove(pid) # ToDo # ask the package if it knows any new pids # add_pids(pack.get_pids()) # also ask the auxiliaries for aux in aux_avail: add_pids(aux.get_pids()) # If none of the monitored processes are still alive, we # can terminate the analysis. if not PROCESS_LIST: log.info("Process list is empty, terminating analysis") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. pack.set_pids(PROCESS_LIST) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not pack.check(): log.info( "The analysis package requested the termination of the analysis" ) break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning( 'The package "%s" check function raised an exception: %s', package_class, e) except Exception as e: log.exception("The PID watching loop raised an exception: %s", e) finally: # Zzz. time.sleep(1) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. pack.finish() except Exception as e: log.warning( 'The package "%s" finish function raised an exception: %s', package_class, e) try: # Upload files the package created to files in the results folder package_files = pack.package_files() if package_files is not None: for package in package_files: upload_to_host(package[0], os.path.join("files", package[1])) except Exception as e: log.warning( 'The package "%s" package_files function raised an exception: %s', package_class, e) # Terminate the Auxiliary modules. for aux in sorted(aux_enabled, key=lambda x: x.priority): try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown") for pid in PROCESS_LIST: proc = Process(pid=pid) if proc.is_alive(): try: proc.terminate() except Exception: continue # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning( "Exception running finish callback of auxiliary module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. self.complete() return True
def dump_memory(pid): """Dump process memory using zer0m0n if available, otherwise fallback.""" if zer0m0n.dumpmem(pid) is False: Process(pid=pid).dump_memory()