def collect_metadata(): """ """ md5 = get_file_md5() sha256 = get_file_sha256() return { "timestamp": datetime.datetime.now().isoformat(), # "argv" is not relevant here "sample": { "md5": md5, "sha1": "", # not easily accessible "sha256": sha256, "path": idaapi.get_input_file_path(), }, "analysis": { "format": idaapi.get_file_type_name(), "extractor": "ida", "base_address": idaapi.get_imagebase(), "layout": { # this is updated after capabilities have been collected. # will look like: # # "functions": { 0x401000: { "matched_basic_blocks": [ 0x401000, 0x401005, ... ] }, ... } }, }, "version": capa.version.__version__, }
def init(self): idaapi.autoWait() #Don't try and parse functions before IDA finishes initial analysis self.rename = True self.arch = idaapi.get_file_type_name() self.functionParse() self.rename = False return idaapi.PLUGIN_KEEP
def collect_metadata(): md5 = idautils.GetInputFileMD5() if not isinstance(md5, six.string_types): md5 = capa.features.bytes_to_str(md5) sha256 = idaapi.retrieve_input_file_sha256() if not isinstance(sha256, six.string_types): sha256 = capa.features.bytes_to_str(sha256) return { "timestamp": datetime.datetime.now().isoformat(), # "argv" is not relevant here "sample": { "md5": md5, "sha1": "", # not easily accessible "sha256": sha256, "path": idaapi.get_input_file_path(), }, "analysis": { "format": idaapi.get_file_type_name(), "extractor": "ida", "base_address": idaapi.get_imagebase(), }, "version": capa.version.__version__, }
def load_capa_results(self): """ run capa analysis and render results in UI """ logger.info("-" * 80) logger.info(" Using default embedded rules.") logger.info(" ") logger.info(" You can see the current default rule set here:") logger.info(" https://github.com/fireeye/capa-rules") logger.info("-" * 80) rules_path = os.path.join(os.path.dirname(self.file_loc), "../..", "rules") rules = capa.main.get_rules(rules_path) rules = capa.rules.RuleSet(rules) meta = capa.ida.helpers.collect_metadata() capabilities, counts = capa.main.find_capabilities( rules, capa.features.extractors.ida.IdaFeatureExtractor(), True) meta["analysis"].update(counts) # support binary files specifically for x86/AMD64 shellcode # warn user binary file is loaded but still allow capa to process it # TODO: check specific architecture of binary files based on how user configured IDA processors if idaapi.get_file_type_name() == "Binary file": logger.warning("-" * 80) logger.warning(" Input file appears to be a binary file.") logger.warning(" ") logger.warning( " capa currently only supports analyzing binary files containing x86/AMD64 shellcode with IDA." ) logger.warning( " This means the results may be misleading or incomplete if the binary file loaded in IDA is not x86/AMD64." ) logger.warning( " If you don't know the input file type, you can try using the `file` utility to guess it." ) logger.warning("-" * 80) capa.ida.helpers.inform_user_ida_ui( "capa encountered warnings during analysis") if capa.main.has_file_limitation(rules, capabilities, is_standalone=False): capa.ida.helpers.inform_user_ida_ui( "capa encountered warnings during analysis") logger.info("analysis completed.") self.doc = capa.render.convert_capabilities_to_result_document( meta, rules, capabilities) self.model_data.render_capa_doc(self.doc) self.render_capa_doc_summary() self.render_capa_doc_mitre_summary() self.set_view_tree_default_sort_order() logger.info("render views completed.")
def find_current_arch(): """ find the architecture currently in use for this IDB. """ filetype = idaapi.get_file_type_name() if '386' in filetype: print 'Architecture: 32-bit intel.' return (ir.IR_INTEL_x86, ir.intel.ir_intel_x86, intel.disassembler) elif 'x86-64' in filetype: print 'Architecture: 64-bit intel.' return (ir.IR_INTEL_x64, ir.intel.ir_intel_x64, intel.disassembler) raise RuntimeError("Don't know which arch to choose for %s" % (repr(filetype), ))
def run(self, arg=0): try: if "ELF" not in idaapi.get_file_type_name(): raise Exception("Executable must be ELF fomat") if not idaapi.is_debugger_on() or not is_process_suspended(): raise Exception("The debugger must be active and suspended before using this plugin") f = plugin_gui.HeapPluginForm() f.Show() except Exception as e: idaapi.warning("[%s] %s" % (PLUGNAME, str(e)))
def collect_metadata(): return { "timestamp": datetime.datetime.now().isoformat(), # "argv" is not relevant here "sample": { "md5": capa.features.bytes_to_str(idautils.GetInputFileMD5()), # "sha1" not easily accessible "sha256": capa.features.bytes_to_str(idaapi.retrieve_input_file_sha256()), "path": idaapi.get_input_file_path(), }, "analysis": {"format": idaapi.get_file_type_name(), "extractor": "ida",}, "version": capa.version.__version__, }
def is_supported_file_type(): file_type = idaapi.get_file_type_name() if file_type not in SUPPORTED_FILE_TYPES: logger.error("-" * 80) logger.error(" Input file does not appear to be a PE file.") logger.error(" ") logger.error( " capa currently only supports analyzing PE files (or binary files containing x86/AMD64 shellcode) with IDA." ) logger.error(" If you don't know the input file type, you can try using the `file` utility to guess it.") logger.error("-" * 80) return False return True
def run(self, arg=0): try: if "ELF" not in idaapi.get_file_type_name(): raise Exception("Executable must be ELF fomat") if not idaapi.is_debugger_on() or not idaapi.dbg_can_query(): raise Exception("The debugger must be active and suspended before using this plugin") f = HeapPluginForm() f.Show() except Exception as e: idaapi.warning("[%s] %s" % (PLUGNAME, e.message))
def find_current_arch(): """ find the architecture currently in use for this IDB. """ filetype = idaapi.get_file_type_name() if '386' in filetype: print 'Architecture: 32-bit intel.' return (ir.IR_INTEL_x86, ir.ir.intel.ir_intel_x86, intel.disassembler) elif 'x86-64' in filetype: print 'Architecture: 64-bit intel.' return (ir.IR_INTEL_x64, ir.intel.ir_intel_x64, intel.disassembler) raise RuntimeError("Don't know which arch to choose for %s" % (repr(filetype), ))
def getProcessInfo(self): #Get basic information from the file being Debugged idainfo = idaapi.get_inf_structure() #Get the name of the input file we want to trace app_name = idc.GetInputFile() Print ("The input file is %s" % app_name ) #Check to see what type of file we're tracing #And set up the proper debugger and input monitor if idainfo.filetype == idaapi.f_PE: Print ("Windows PE file" ) os_type = "windows" elif idainfo.filetype == idaapi.f_MACHO: Print ("Mac OSX Macho file") os_type = "macosx" #debugger = "macosx" #checkInput = InputMonitor.checkMacLibs elif idainfo.filetype == idaapi.f_ELF: Print ("Linux ELF file") os_type = "linux" #debugger = "linux" #checkInput = InputMonitor.checkLinuxLibs else: Print ("Unknown binary, unable to debug") return None #Check the debugged executable if its 32 or 64bit if idainfo.is_64bit(): Print("This binary is 64 bit") os_arch = "64" elif idainfo.is_32bit(): Print( "This binary is 32 bit" ) os_arch = "32" else: Print( "Bad binary. ARM processor?" ) os_arch = "ARM" #Get the file type for the executable being debugger fileType = idaapi.get_file_type_name() #For ARM libraries returns 'ELF for ARM (Shared object) Print( fileType ) return (app_name,os_type,os_arch)
def _dialog_accepted(self, dialog): """ Called when the save dialog is accepted by the user. :param dialog: the save dialog """ repo, branch = dialog.get_result() # Create new repository if necessary if not repo: hash = idautils.GetInputFileMD5() file = idc.GetInputFile() type = idaapi.get_file_type_name() dateFormat = "%Y/%m/%d %H:%M" date = datetime.datetime.now().strftime(dateFormat) repo = Repository(hash, file, type, date) d = self._plugin.network.send_packet(NewRepository.Query(repo)) d.addCallback(partial(self._on_new_repository_reply, repo, branch)) d.addErrback(logger.exception) else: self._on_new_repository_reply(repo, branch, None)
def collect_metadata(): """ """ md5 = get_file_md5() sha256 = get_file_sha256() return { "timestamp": datetime.datetime.now().isoformat(), # "argv" is not relevant here "sample": { "md5": md5, "sha1": "", # not easily accessible "sha256": sha256, "path": idaapi.get_input_file_path(), }, "analysis": { "format": idaapi.get_file_type_name(), "extractor": "ida", "base_address": idaapi.get_imagebase(), }, "version": capa.version.__version__, }
def collect_class_info_internal(): """Collect information about C++ classes defined in a kernelcache. Arm64 only. """ filetype = idaapi.get_file_type_name() if not _check_filetype(filetype): _log(-1, 'Bad file type "{}"', filetype) return None _log(1, 'Collecting information about OSMetaClass instances') metaclass_info = _collect_metaclasses() if not metaclass_info: _log(-1, 'Could not collect OSMetaClass instances') return None _log(1, 'Searching for virtual method tables') class_info, all_vtables = _collect_vtables(metaclass_info) if not class_info: _log(-1, 'Could not collect virtual method tables') return None _log(1, 'Done') return class_info, all_vtables
def init(self): self.hexrays_inited = False self.registered_actions = [] self.registered_hx_actions = [] global arch global bits global is_cgc arch = idaapi.ph_get_id() info = idaapi.get_inf_structure() if info.is_64bit(): bits = 64 elif info.is_32bit(): bits = 32 else: bits = 16 is_cgc = "CGC" in idaapi.get_file_type_name() print "LazyIDA (Python Version) (v1.0.0.1) plugin has been loaded." # Register menu actions menu_actions = ( idaapi.action_desc_t(ACTION_CONVERT[0], "Convert to string", menu_action_handler_t(ACTION_CONVERT[0]), None, None, 80), idaapi.action_desc_t(ACTION_CONVERT[1], "Convert to hex string", menu_action_handler_t(ACTION_CONVERT[1]), None, None, 8), idaapi.action_desc_t(ACTION_CONVERT[2], "Convert to C/C++ array (BYTE)", menu_action_handler_t(ACTION_CONVERT[2]), None, None, 38), idaapi.action_desc_t(ACTION_CONVERT[3], "Convert to C/C++ array (WORD)", menu_action_handler_t(ACTION_CONVERT[3]), None, None, 38), idaapi.action_desc_t(ACTION_CONVERT[4], "Convert to C/C++ array (DWORD)", menu_action_handler_t(ACTION_CONVERT[4]), None, None, 38), idaapi.action_desc_t(ACTION_CONVERT[5], "Convert to C/C++ array (QWORD)", menu_action_handler_t(ACTION_CONVERT[5]), None, None, 38), idaapi.action_desc_t(ACTION_CONVERT[6], "Convert to python list (BYTE)", menu_action_handler_t(ACTION_CONVERT[6]), None, None, 201), idaapi.action_desc_t(ACTION_CONVERT[7], "Convert to python list (WORD)", menu_action_handler_t(ACTION_CONVERT[7]), None, None, 201), idaapi.action_desc_t(ACTION_CONVERT[8], "Convert to python list (DWORD)", menu_action_handler_t(ACTION_CONVERT[8]), None, None, 201), idaapi.action_desc_t(ACTION_CONVERT[9], "Convert to python list (QWORD)", menu_action_handler_t(ACTION_CONVERT[9]), None, None, 201), idaapi.action_desc_t(ACTION_XORDATA, "Get xored data", menu_action_handler_t(ACTION_XORDATA), None, None, 9), idaapi.action_desc_t(ACTION_FILLNOP, "Fill with NOPs", menu_action_handler_t(ACTION_FILLNOP), None, None, 9), idaapi.action_desc_t(ACTION_SCANVUL, "Scan format string vulnerabilities", menu_action_handler_t(ACTION_SCANVUL), None, None, 160), ) for action in menu_actions: idaapi.register_action(action) self.registered_actions.append(action.name) # Register hotkey actions hotkey_actions = ( idaapi.action_desc_t(ACTION_COPYEA, "Copy EA", hotkey_action_handler_t(ACTION_COPYEA), "w", "Copy current EA", 0), ) for action in hotkey_actions: idaapi.register_action(action) self.registered_actions.append(action.name) # Add ui hook self.ui_hook = UI_Hook() self.ui_hook.hook() # Add idb hook self.idb_hook = IDB_Hook() self.idb_hook.hook() # Add idp hook self.idp_hook = IDP_Hook() self.idp_hook.hook() # Add hexrays ui callback if idaapi.init_hexrays_plugin(): hx_actions = ( idaapi.action_desc_t(ACTION_HX_REMOVERETTYPE, "Remove return type", hexrays_action_handler_t(ACTION_HX_REMOVERETTYPE), "v"), idaapi.action_desc_t(ACTION_HX_COPYEA , "Copy ea", hexrays_action_handler_t(ACTION_HX_COPYEA), "w"), idaapi.action_desc_t(ACTION_HX_COPYNAME, "Copy name", hexrays_action_handler_t(ACTION_HX_COPYNAME), "c"), ) for action in hx_actions: idaapi.register_action(action) self.registered_hx_actions.append(action.name) idaapi.install_hexrays_callback(hexrays_callback) self.hexrays_inited = True # Auto apply libcgc signature if is_cgc and os.path.exists(idaapi.get_sig_filename("libcgc.sig")): if "libcgc.sig" not in [idaapi.get_idasgn_desc(i)[0] for i in range(idaapi.get_idasgn_qty())]: idaapi.plan_to_apply_idasgn("libcgc.sig") return idaapi.PLUGIN_KEEP
def is_exe(): ''' is the currently loaded module a PE file? you can *probably* assume its for windows, if so. ''' return 'Portable executable' in idaapi.get_file_type_name()
def load_capa_results(self, use_cache=False): """run capa analysis and render results in UI note: this function must always return, exception or not, in order for plugin to safely close the IDA wait box """ if not use_cache: # new analysis, new doc self.doc = None self.process_total = 0 self.process_count = 1 def slot_progress_feature_extraction(text): """slot function to handle feature extraction progress updates""" update_wait_box("%s (%d of %d)" % (text, self.process_count, self.process_total)) self.process_count += 1 extractor = CapaExplorerFeatureExtractor() extractor.indicator.progress.connect( slot_progress_feature_extraction) update_wait_box("calculating analysis") try: self.process_total += len(tuple(extractor.get_functions())) except Exception as e: logger.error("Failed to calculate analysis (error: %s).", e) return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False update_wait_box("loading rules") if not self.load_capa_rules(): return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False update_wait_box("extracting features") try: meta = capa.ida.helpers.collect_metadata() capabilities, counts = capa.main.find_capabilities( self.ruleset_cache, extractor, disable_progress=True) meta["analysis"].update(counts) except UserCancelledError: logger.info("User cancelled analysis.") return False except Exception as e: logger.error( "Failed to extract capabilities from database (error: %s)", e) return False update_wait_box("checking for file limitations") try: # support binary files specifically for x86/AMD64 shellcode # warn user binary file is loaded but still allow capa to process it # TODO: check specific architecture of binary files based on how user configured IDA processors if idaapi.get_file_type_name() == "Binary file": logger.warning("-" * 80) logger.warning(" Input file appears to be a binary file.") logger.warning(" ") logger.warning( " capa currently only supports analyzing binary files containing x86/AMD64 shellcode with IDA." ) logger.warning( " This means the results may be misleading or incomplete if the binary file loaded in IDA is not x86/AMD64." ) logger.warning( " If you don't know the input file type, you can try using the `file` utility to guess it." ) logger.warning("-" * 80) capa.ida.helpers.inform_user_ida_ui( "capa encountered file type warnings during analysis") if capa.main.has_file_limitation(self.ruleset_cache, capabilities, is_standalone=False): capa.ida.helpers.inform_user_ida_ui( "capa encountered file limitation warnings during analysis" ) except Exception as e: logger.error( "Failed to check for file limitations (error: %s)", e) return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False update_wait_box("rendering results") try: self.doc = capa.render.result_document.convert_capabilities_to_result_document( meta, self.ruleset_cache, capabilities) except Exception as e: logger.error("Failed to render results (error: %s)", e) return False try: self.model_data.render_capa_doc( self.doc, self.view_show_results_by_function.isChecked()) self.set_view_status_label("capa rules directory: %s (%d rules)" % (settings.user[CAPA_SETTINGS_RULE_PATH], len(self.rules_cache))) except Exception as e: logger.error("Failed to render results (error: %s)", e) return False return True
def load_capa_results(self): """run capa analysis and render results in UI""" # new analysis, new doc self.doc = None # resolve rules directory - check self and settings first, then ask user if not self.rule_path: if "rule_path" in settings and os.path.exists( settings["rule_path"]): self.rule_path = settings["rule_path"] else: rule_path = self.ask_user_directory() if not rule_path: capa.ida.helpers.inform_user_ida_ui( "You must select a file directory containing capa rules to start analysis" ) logger.warning( "No rules loaded, cannot start analysis. You can download the standard collection of capa rules from https://github.com/fireeye/capa-rules." ) self.set_view_status_label("No rules loaded.") self.disable_controls() return self.rule_path = rule_path settings.user["rule_path"] = rule_path try: rules = capa.main.get_rules(self.rule_path, True) rule_count = len(rules) rules = capa.rules.RuleSet(rules) except (IOError, capa.rules.InvalidRule, capa.rules.InvalidRuleSet) as e: capa.ida.helpers.inform_user_ida_ui( "Failed to load capa rules from %s" % self.rule_path) logger.error( "Failed to load rules from %s (%s). Make sure your file directory contains properly formatted capa rules. You can download the standard collection of capa rules from https://github.com/fireeye/capa-rules.", self.rule_path, e, ) self.rule_path = "" settings.user.del_value("rule_path") self.set_view_status_label("No rules loaded") self.disable_controls() return meta = capa.ida.helpers.collect_metadata() capabilities, counts = capa.main.find_capabilities( rules, capa.features.extractors.ida.IdaFeatureExtractor(), True) meta["analysis"].update(counts) # support binary files specifically for x86/AMD64 shellcode # warn user binary file is loaded but still allow capa to process it # TODO: check specific architecture of binary files based on how user configured IDA processors if idaapi.get_file_type_name() == "Binary file": logger.warning("-" * 80) logger.warning(" Input file appears to be a binary file.") logger.warning(" ") logger.warning( " capa currently only supports analyzing binary files containing x86/AMD64 shellcode with IDA." ) logger.warning( " This means the results may be misleading or incomplete if the binary file loaded in IDA is not x86/AMD64." ) logger.warning( " If you don't know the input file type, you can try using the `file` utility to guess it." ) logger.warning("-" * 80) capa.ida.helpers.inform_user_ida_ui( "capa encountered file type warnings during analysis") if capa.main.has_file_limitation(rules, capabilities, is_standalone=False): capa.ida.helpers.inform_user_ida_ui( "capa encountered file limitation warnings during analysis") self.doc = capa.render.convert_capabilities_to_result_document( meta, rules, capabilities) self.model_data.render_capa_doc(self.doc) self.render_capa_doc_mitre_summary() self.enable_controls() self.set_view_status_label("Loaded %d capa rules from %s" % (rule_count, self.rule_path))
def load_capa_results(self): """run capa analysis and render results in UI note: this function must always return, exception or not, in order for plugin to safely close the IDA wait box """ # new analysis, new doc self.doc = None self.process_total = 0 self.process_count = 1 def update_wait_box(text): """update the IDA wait box""" ida_kernwin.replace_wait_box("capa explorer...%s" % text) def slot_progress_feature_extraction(text): """slot function to handle feature extraction progress updates""" update_wait_box("%s (%d of %d)" % (text, self.process_count, self.process_total)) self.process_count += 1 extractor = CapaExplorerFeatureExtractor() extractor.indicator.progress.connect(slot_progress_feature_extraction) update_wait_box("calculating analysis") try: self.process_total += len(tuple(extractor.get_functions())) except Exception as e: logger.error("Failed to calculate analysis (error: %s).", e) return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False update_wait_box("loading rules") try: # resolve rules directory - check self and settings first, then ask user if not self.rule_path: if "rule_path" in settings and os.path.exists( settings["rule_path"]): self.rule_path = settings["rule_path"] else: idaapi.info( "Please select a file directory containing capa rules." ) rule_path = self.ask_user_directory() if not rule_path: logger.warning( "You must select a file directory containing capa rules before analysis can be run. The standard collection of capa rules can be downloaded from https://github.com/fireeye/capa-rules." ) return False self.rule_path = rule_path settings.user["rule_path"] = rule_path except Exception as e: logger.error("Failed to load capa rules (error: %s).", e) return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False rule_path = self.rule_path try: if not os.path.exists(rule_path): raise IOError( "rule path %s does not exist or cannot be accessed" % rule_path) rule_paths = [] if os.path.isfile(rule_path): rule_paths.append(rule_path) elif os.path.isdir(rule_path): for root, dirs, files in os.walk(rule_path): if ".github" in root: # the .github directory contains CI config in capa-rules # this includes some .yml files # these are not rules continue for file in files: if not file.endswith(".yml"): if not (file.endswith(".md") or file.endswith(".git") or file.endswith(".txt")): # expect to see readme.md, format.md, and maybe a .git directory # other things maybe are rules, but are mis-named. logger.warning("skipping non-.yml file: %s", file) continue rule_path = os.path.join(root, file) rule_paths.append(rule_path) rules = [] total_paths = len(rule_paths) for (i, rule_path) in enumerate(rule_paths): update_wait_box("loading capa rules from %s (%d of %d)" % (self.rule_path, i + 1, total_paths)) if ida_kernwin.user_cancelled(): raise UserCancelledError("user cancelled") try: rule = capa.rules.Rule.from_yaml_file(rule_path) except capa.rules.InvalidRule: raise else: rule.meta["capa/path"] = rule_path if capa.main.is_nursery_rule_path(rule_path): rule.meta["capa/nursery"] = True rules.append(rule) rule_count = len(rules) rules = capa.rules.RuleSet(rules) except UserCancelledError: logger.info("User cancelled analysis.") return False except Exception as e: capa.ida.helpers.inform_user_ida_ui( "Failed to load capa rules from %s" % self.rule_path) logger.error("Failed to load rules from %s (error: %s).", self.rule_path, e) logger.error( "Make sure your file directory contains properly formatted capa rules. You can download the standard collection of capa rules from https://github.com/fireeye/capa-rules." ) self.rule_path = "" settings.user.del_value("rule_path") return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False update_wait_box("extracting features") try: meta = capa.ida.helpers.collect_metadata() capabilities, counts = capa.main.find_capabilities( rules, extractor, disable_progress=True) meta["analysis"].update(counts) except UserCancelledError: logger.info("User cancelled analysis.") return False except Exception as e: logger.error( "Failed to extract capabilities from database (error: %s)", e) return False update_wait_box("checking for file limitations") try: # support binary files specifically for x86/AMD64 shellcode # warn user binary file is loaded but still allow capa to process it # TODO: check specific architecture of binary files based on how user configured IDA processors if idaapi.get_file_type_name() == "Binary file": logger.warning("-" * 80) logger.warning(" Input file appears to be a binary file.") logger.warning(" ") logger.warning( " capa currently only supports analyzing binary files containing x86/AMD64 shellcode with IDA." ) logger.warning( " This means the results may be misleading or incomplete if the binary file loaded in IDA is not x86/AMD64." ) logger.warning( " If you don't know the input file type, you can try using the `file` utility to guess it." ) logger.warning("-" * 80) capa.ida.helpers.inform_user_ida_ui( "capa encountered file type warnings during analysis") if capa.main.has_file_limitation(rules, capabilities, is_standalone=False): capa.ida.helpers.inform_user_ida_ui( "capa encountered file limitation warnings during analysis" ) except Exception as e: logger.error("Failed to check for file limitations (error: %s)", e) return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False update_wait_box("rendering results") try: self.doc = capa.render.convert_capabilities_to_result_document( meta, rules, capabilities) self.model_data.render_capa_doc(self.doc) self.render_capa_doc_mitre_summary() self.enable_controls() self.set_view_status_label("capa rules directory: %s (%d rules)" % (self.rule_path, rule_count)) except Exception as e: logger.error("Failed to render results (error: %s)", e) return False return True
def load_capa_results(self): """run capa analysis and render results in UI""" # resolve rules directory - check self and settings first, then ask user if not self.rule_path: if "rule_path" in settings: self.rule_path = settings["rule_path"] else: rule_path = self.ask_user_directory() if not rule_path: capa.ida.helpers.inform_user_ida_ui("You must select a rules directory to use for analysis.") logger.warning("no rules directory selected. nothing to do.") return self.rule_path = rule_path settings.user["rule_path"] = rule_path logger.debug("-" * 80) logger.debug(" Using rules from %s.", self.rule_path) logger.debug(" ") logger.debug(" You can see the current default rule set here:") logger.debug(" https://github.com/fireeye/capa-rules") logger.debug("-" * 80) try: rules = capa.main.get_rules(self.rule_path) rules = capa.rules.RuleSet(rules) except (IOError, capa.rules.InvalidRule, capa.rules.InvalidRuleSet) as e: capa.ida.helpers.inform_user_ida_ui("Failed to load rules from %s" % self.rule_path) logger.error("failed to load rules from %s (%s)", self.rule_path, e) self.rule_path = "" return meta = capa.ida.helpers.collect_metadata() capabilities, counts = capa.main.find_capabilities( rules, capa.features.extractors.ida.IdaFeatureExtractor(), True ) meta["analysis"].update(counts) # support binary files specifically for x86/AMD64 shellcode # warn user binary file is loaded but still allow capa to process it # TODO: check specific architecture of binary files based on how user configured IDA processors if idaapi.get_file_type_name() == "Binary file": logger.warning("-" * 80) logger.warning(" Input file appears to be a binary file.") logger.warning(" ") logger.warning( " capa currently only supports analyzing binary files containing x86/AMD64 shellcode with IDA." ) logger.warning( " This means the results may be misleading or incomplete if the binary file loaded in IDA is not x86/AMD64." ) logger.warning(" If you don't know the input file type, you can try using the `file` utility to guess it.") logger.warning("-" * 80) capa.ida.helpers.inform_user_ida_ui("capa encountered warnings during analysis") if capa.main.has_file_limitation(rules, capabilities, is_standalone=False): capa.ida.helpers.inform_user_ida_ui("capa encountered warnings during analysis") logger.debug("analysis completed.") self.doc = capa.render.convert_capabilities_to_result_document(meta, rules, capabilities) # render views self.model_data.render_capa_doc(self.doc) self.render_capa_doc_mitre_summary() self.set_view_tree_default_sort_order() logger.debug("render views completed.")