def patcherex_finish(proc): if proc.returncode != 0: out_str = "Patcherex failed. See attached terminal." idaapi.warning(out_str) print out_str else: out_str = "Patcherex completed successfully." idaapi.info(out_str) print out_str
def reset(self): """reset UI elements e.g. checkboxes and IDA highlighting """ self.ida_reset() logger.info("reset completed.") idaapi.info("%s reset completed." % PLUGIN_NAME)
def reset(self, checked): """reset UI elements e.g. checkboxes and IDA highlighting """ self.ida_reset() logger.info("reset completed.") idaapi.info("%s reset completed." % self.form_title)
def reload(self): """ reload views and re-run capa analysis """ self.ida_reset() self.range_model_proxy.invalidate() self.search_model_proxy.invalidate() self.model_data.clear() self.load_capa_results() logger.debug("%s reload completed", self.form_title) idaapi.info("%s reload completed." % self.form_title)
def _information(self): help_string = "\r\nREDB Commands:\r\n" help_string += "============\r\n" for function in CALLBACK_FUNCTIONS: help_string += function[1] help_string += "\t" help_string += function[0] help_string += "\r\n" idaapi.info(help_string)
def exportNames(self) -> None: """Exports functions list into a file. """ file_name, ext = QtWidgets.QFileDialog.getSaveFileName(None, "Export functions names", QtCore.QDir.homePath(), "CSV Files (*.csv);;TAG Files (*.tag)") if file_name is not None and len(file_name) > 0: if not self._saveFunctionsNames(file_name, ext): idaapi.warning("Failed exporting functions names!") else: idaapi.info("Exported to: %s", (file_name, ))
def reload(self): """ reload views and re-run capa analysis """ self.ida_reset() self.model_proxy.invalidate() self.model_data.clear() self.view_summary.setRowCount(0) self.load_capa_results() logger.info("reload complete.") idaapi.info("%s reload completed." % PLUGIN_NAME)
def OnCommand(self, n, cmd_id): if cmd_id == self.cmd_show_reasons: match = self.items[n] reasons = match[len(match)-1] msg = "\n".join(reasons) info(msg) elif cmd_id == self.cmd_import_all: if askyn_c(0, "HIDECANCEL\nDo you really want to import all matched functions as well as struct, union, enum and typedef definitions?") == 1: import_items = [] for item in self.items: src_id, src_name, bin_ea = int(item[1]), item[2], int(item[3], 16) import_items.append([src_id, src_name, bin_ea]) self.importer.import_items(import_items) elif cmd_id == self.cmd_import_selected: if len(self.selected_items) == 1 or askyn_c(1, "HIDECANCEL\nDo you really want to import the selected functions?") == 1: import_items = [] for index in self.selected_items: item = self.items[index] src_id, src_name, bin_ea = int(item[1]), item[2], int(item[3], 16) import_items.append([src_id, src_name, bin_ea]) import_definitions = askyn_c(0, "HIDECANCEL\nDo you also want to import all struct, union, enum and typedef definitions?") == 1 self.importer.import_items(import_items, import_definitions = import_definitions) elif cmd_id == self.cmd_diff_c: html_diff = CHtmlDiff() item = self.items[n] src_id = long(item[1]) cur = self.differ.db.cursor() sql = "select source from src.functions where id = ?" cur.execute(sql, (src_id,)) row = cur.fetchone() cur.close() if not row: Warning("Cannot find the source function.") return False ea = long(item[3], 16) proto = self.differ.decompile_and_get(ea) if not proto: Warning("Cannot decompile function 0x%08x" % ea) return False buf1 = indent_source(row[0]) buf2 = proto buf2 += "\n".join(self.differ.pseudo[ea]) new_buf = indent_source(buf2) src = html_diff.make_file(new_buf.split("\n"), buf1.split("\n")) title = "Diff pseudo-source %s - %s" % (item[2], item[4]) cdiffer = CHtmlViewer() cdiffer.Show(src, title)
def slot_change_rules_dir(self): """allow user to change rules directory user selection stored in settings for future runs """ path = self.ask_user_directory() if path: settings.user["rule_path"] = path self.rules_cache = None self.ruleset_cache = None idaapi.info("Run analysis again for your changes to take effect.")
def save_function_analysis(self): """ """ s = self.view_rulegen_preview.toPlainText().encode("utf-8") if not s: idaapi.info("No rule to save.") return path = self.ask_user_capa_rule_file() if not path: return write_file(path, s)
def export_json(self): """ export capa results as JSON file """ if not self.doc: idaapi.info("No capa results to export.") return path = idaapi.ask_file(True, "*.json", "Choose file") if os.path.exists(path) and 1 != idaapi.ask_yn(1, "File already exists. Overwrite?"): return with open(path, "wb") as export_file: export_file.write( json.dumps(self.doc, sort_keys=True, cls=capa.render.CapaJsonObjectEncoder).encode("utf-8") )
def save_program_analysis(self): """ """ if not self.doc: idaapi.info("No program analysis to save.") return s = json.dumps(self.doc, sort_keys=True, cls=capa.render.json.CapaJsonObjectEncoder).encode("utf-8") path = self.ask_user_capa_json_file() if not path: return write_file(path, s)
def save_program_analysis(self): """ """ if not self.doc: idaapi.info("No program analysis to save.") return s = json.dumps(self.doc, sort_keys=True, cls=capa.render.json.CapaJsonObjectEncoder).encode("utf-8") path = idaapi.ask_file(True, "*.json", "Choose file to save capa program analysis JSON") if not path: return write_file(path, s)
def reload(self): """re-run capa analysis and reload UI controls called when user selects plugin reload from menu """ self.ida_reset() self.range_model_proxy.invalidate() self.search_model_proxy.invalidate() self.model_data.clear() self.load_capa_results() logger.debug("%s reload completed", self.form_title) idaapi.info("%s reload completed." % self.form_title)
def slot_export_to_tag_file(self): if not self.doc: idaapi.info("No capa results to export.") return Tag_file = self.Parse_json(self.doc) filename = idaapi.ask_file( True, os.path.splitext(ida_nalt.get_root_filename())[0] + ".tag", "Choose file") if not filename: return if os.path.exists(filename) and 1 != idaapi.ask_yn( 1, "File already exists. Overwrite?"): return f = open(filename, "w").write(Tag_file)
def slot_export_pickle(self): """ export capa results as JSON file """ if not self.doc: idaapi.info("No capa results to export.") return path = idaapi.ask_file( True, os.path.splitext(ida_nalt.get_root_filename())[0] + ".capa", "Choose file") if not path: return if os.path.exists(path) and 1 != idaapi.ask_yn( 1, "File already exists. Overwrite?"): return with open(path, "wb") as export_file: pickle.dump(self.doc, export_file, -1)
def importNames(self) -> None: """Imports functions list from a file. """ file_name, ext = QtWidgets.QFileDialog.getOpenFileName(None, "Import functions names", QtCore.QDir.homePath(), "CSV Files (*.csv);;TAG Files (*.tag);;All files (*)") if file_name is not None and len(file_name) > 0: names = self._loadFunctionsNames(file_name, ext) if names is None: idaapi.warning(f"Malformed file %s" % file_name) return (loaded, comments) = names if loaded == 0 and comments == 0: idaapi.warning("Failed importing functions names! Not matching offsets!") else: idaapi.info("Imported %d function names and %d comments" % (loaded, comments))
def update_config(self): try: config.start_tracing_at_startup = self.opt1.isChecked() config.stop_during_tracing = self.opt2.isChecked() config.detect_double_frees_and_overlaps = self.opt3.isChecked() config.filter_library_calls = self.opt4.isChecked() config.hexdump_limit = int(self.t_hexdump_limit.text()) config.libc_offsets = self.get_offsets() config.save() idaapi.info("Config updated") self.parent.init_heap() self.parent.reload_gui_info() except Exception as e: idaapi.warning("ERROR: " + str(e))
def OnButtonUpdate(self, code=0): cnnInfo = {} cnnInfo['server'] = self.GetControlValue(self.txtServer) cnnInfo['protocol'] = self.GetControlValue(self.txtProtocol) cnnInfo['port'] = self.GetControlValue(self.txtPort) cnnInfo['un'] = self.GetControlValue(self.txtUser) cnnInfo['pw'] = self.GetControlValue(self.txtPw) cnnInfo['key'] = cnnInfo['un'] + " @ " + cnnInfo['protocol'] + cnnInfo[ 'server'] + ":" + cnnInfo['port'] self.Kconf['cnns'][cnnInfo['key']] = cnnInfo self.listView.UpdateItems() self.RefreshField(self.fvChooser) # Select the newly added item self.SetControlValue(self.fvChooser, [self.Kconf['cnns'].keys().index(cnnInfo['key'])]) self.updateDpList() info("Updated / added connection %s." % cnnInfo['key'])
def run(self, arg): s = """Memory Dumper Enter the memory region: begin: <:n::12::> size: <:n::12::> (optional, fill it to ignore the end address) or end: <:n::12::> """ currea = idaapi.get_screen_ea() begin = idaapi.Form.NumericArgument('N', currea) size = idaapi.Form.NumericArgument('N', 0x0) end = idaapi.Form.NumericArgument('N', 0x0) ok = idaapi.ask_form(s, begin.arg, size.arg, end.arg) if ok == 1: print("Begin dump") if size.value == 0: if end.value <= begin.value: idaapi.warning("Incorrect Address!") return else: dumpsize = end.value - begin.value else: dumpsize = size.value print("begin: 0x%x, end: 0x%x" % (begin.value, begin.value + dumpsize)) path = ida_kernwin.ask_file(True, "*", "Save dump to?") if not path: return print("path: %s" % path) if idc.savefile(path, 0, begin.value, dumpsize) is not 0: idaapi.info("Save successed!") else: idaapi.warning("Failed to save dump file!")
def OnButtonUpdate(self, code=0): cnnInfo = {} cnnInfo['server'] = self.GetControlValue(self.txtServer) cnnInfo['protocol'] = self.GetControlValue(self.txtProtocol) cnnInfo['port'] = self.GetControlValue(self.txtPort) cnnInfo['un'] = self.GetControlValue(self.txtUser) cnnInfo['pw'] = self.GetControlValue(self.txtPw) cnnInfo['key'] = cnnInfo['un'] + " @ " + cnnInfo['protocol'] + cnnInfo['server'] + ":" + cnnInfo['port'] self.Kconf['cnns'][cnnInfo['key']] = cnnInfo self.listView.UpdateItems() self.RefreshField(self.fvChooser) # Select the newly added item self.SetControlValue(self.fvChooser, [ self.Kconf['cnns'].keys().index( cnnInfo['key'] )]) self.updateDpList() info("Updated / added connection %s." % cnnInfo['key'])
def activate(self, ctx): if ".collare_projects" in ida_nalt.get_input_file_path(): with open( os.path.join( os.path.dirname(ida_nalt.get_input_file_path()), "changes.json"), "r") as changes_file: changes = json.load(changes_file) base = changes["base"] if base != int(idaapi.get_imagebase()): base = int(idaapi.get_imagebase()) - base else: base = 0 for function in changes["function_names"]: # Set function names function_address = int(function) + base idaapi.set_name( function_address, str(changes["function_names"][function]["name"]), idaapi.SN_FORCE) for comment in changes["comments"]: comment_address = int(comment, 10) + base currentComment = get_comment(comment_address) clear_comments(comment_address) if currentComment: if currentComment in changes["comments"][comment]: set_cmt(comment_address, changes["comments"][comment], False) elif changes["comments"][comment] in currentComment: set_cmt(comment_address, currentComment, False) else: set_cmt( comment_address, currentComment + "; " + changes["comments"][comment], False) else: set_cmt(comment_address, changes["comments"][comment], False) print("[*] Import completed!") idaapi.info("CollaRE Import completed!") else: print("[!] This is not a CollaRE project!") idaapi.warning("This is not a CollaRE project!") return 1
def find_fakefast_on_click(self): start_addr = int(self.t_fakefast_addr.text(), 16) fake_chunks = self.heap.find_fakefast(start_addr) if len(fake_chunks) == 0: idaapi.info("Fakefast: 0 results") return self.tbl_fakefast.clearContents() self.tbl_fakefast.setRowCount(0) self.tbl_fakefast.setSortingEnabled(False) for idx, chunk in enumerate(fake_chunks): self.tbl_fakefast.insertRow(idx) self.tbl_fakefast.setItem(idx, 0, QtWidgets.QTableWidgetItem("%d" % chunk['fast_id'])) self.tbl_fakefast.setItem(idx, 1, QtWidgets.QTableWidgetItem("0x%x" % chunk['size'])) self.tbl_fakefast.setItem(idx, 2, QtWidgets.QTableWidgetItem("%d" % chunk['bytes_to'])) self.tbl_fakefast.setItem(idx, 3, QtWidgets.QTableWidgetItem("0x%x" % chunk['address'])) self.tbl_fakefast.resizeRowsToContents() self.tbl_fakefast.resizeColumnsToContents() self.tbl_fakefast.setSortingEnabled(True)
def main(): if not is_jni_header_loaded(): idaapi.warning('Please load jni.h first') load_jni_header() st = idc.set_ida_state(idc.IDA_STATUS_WORK) infos = load_methods() failed = [] succ = 0 for ea in idautils.Functions(): fname = idc.GetFunctionName(ea) if fname.startswith('Java_') or fname in [ 'JNI_OnLoad', 'JNI_OnUnload' ]: sig = infos.get(fname) if sig is None: failed.append(fname) else: succ += 1 apply_signature(ea, sig) idaapi.info('JNI functions loaded, {} success. {} failed. \n{}'.format( succ, len(failed), '\n'.join(failed))) idc.set_ida_state(st)
def SearchCodePathDialog(ret_only=False, extended=False): f1 = idaapi.choose_func("Select starting function", 0) if not f1: return sea = f1.startEA f2 = idaapi.choose_func("Select target function", idc.ScreenEA()) if not f2: return tea = f2.startEA nodes = SearchCodePath(sea, tea, extended) if len(nodes) > 0: if ret_only: return nodes else: g = PathsBrowser( "Code paths from %s to %s" % (idc.GetFunctionName(sea), idc.GetFunctionName(tea)), nodes, sea, tea ) g.Show() else: idaapi.info("No codepath found between %s and %s" % (idc.GetFunctionName(sea), idc.GetFunctionName(tea))) return nodes
def slot_export_json(self): """export capa results as JSON file""" if not self.doc: idaapi.info("No capa results to export.") return path = idaapi.ask_file(True, "*.json", "Choose file") # user cancelled, entered blank input, etc. if not path: return # check file exists, ask to override if os.path.exists(path) and 1 != idaapi.ask_yn( 1, "The selected file already exists. Overwrite?"): return with open(path, "wb") as export_file: export_file.write( json.dumps( self.doc, sort_keys=True, cls=capa.render.CapaJsonObjectEncoder).encode("utf-8"))
def SearchCodePathDialog(ret_only=False, extended=False): f1 = idaapi.choose_func("Select starting function", 0) if not f1: return sea = f1.startEA f2 = idaapi.choose_func("Select target function", idc.ScreenEA()) if not f2: return tea = f2.startEA nodes = SearchCodePath(sea, tea, extended) if len(nodes) > 0: if ret_only: return nodes else: g = PathsBrowser("Code paths from %s to %s" % (idc.GetFunctionName(sea), idc.GetFunctionName(tea)), nodes, sea, tea) g.Show() else: idaapi.info("No codepath found between %s and %s" % (idc.GetFunctionName(sea), idc.GetFunctionName(tea))) return nodes
def slot_export_to_r2_script(self): if not self.doc: idaapi.info("No capa results to export.") return filename = idaapi.ask_file( True, os.path.splitext(ida_nalt.get_root_filename())[0] + ".cutter.r2", "Choose file") if not filename: return if os.path.exists(filename) and 1 != idaapi.ask_yn( 1, "File already exists. Overwrite?"): return Tag_file = self.Parse_json(self.doc) Cutter_RVA = [] Cutter_comment = [] Cutter_script = "" for k in range(0, len(Tag_file.split("\n")) - 1): Cutter_RVA.append(Tag_file.split("\n")[k].split(';')[0]) Cutter_comment.append(Tag_file.split("\n")[k].split(';')[1]) Cutter_script += "CCu base64:" + base64.b64encode( Cutter_comment[k].encode( "utf-8")).decode() + " @ " + "$B+0x" + Cutter_RVA[k] + "\n" f = open(filename, "w").write(Cutter_script)
def inform_user_ida_ui(message): idaapi.info("%s. Please refer to IDA Output window for more information." % message)
HOST = 'localhost' PORT = 8666 def pickle_sendz(host, port, var): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) d = zlib.compress(pickle.dumps(var)) s.send(d) s.close() return None except Exception, e: return str(e) idaapi.info("Please run the Hiew-Names-Server script and press OK") idaapi.show_wait_box("Gathering and sending names to %s:%d" % (HOST, PORT)) info = [] for ea, name in idautils.Names(): offs = idaapi.get_fileregion_offset(ea) if offs == idaapi.BADADDR: continue is_func = False if idaapi.get_func(ea) is None else True info.append((offs, name, is_func)) ok = pickle_sendz(HOST, PORT, info) idaapi.hide_wait_box()
def load_capa_rules(self): """ """ self.ruleset_cache = None self.rules_cache = None try: # resolve rules directory - check self and settings first, then ask user if not os.path.exists( settings.user.get(CAPA_SETTINGS_RULE_PATH, "")): idaapi.info( "Please select a file directory containing capa rules.") path = self.ask_user_directory() if not path: logger.warning( "You must select a file directory containing capa rules before analysis can be run. The standard collection of capa rules can be downloaded from https://github.com/fireeye/capa-rules." ) return False settings.user[CAPA_SETTINGS_RULE_PATH] = path except Exception as e: logger.error("Failed to load capa rules (error: %s).", e) return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False rule_path = settings.user[CAPA_SETTINGS_RULE_PATH] try: # TODO refactor: this first part is identical to capa.main.get_rules if not os.path.exists(rule_path): raise IOError( "rule path %s does not exist or cannot be accessed" % rule_path) rule_paths = [] if os.path.isfile(rule_path): rule_paths.append(rule_path) elif os.path.isdir(rule_path): for root, dirs, files in os.walk(rule_path): if ".github" in root: # the .github directory contains CI config in capa-rules # this includes some .yml files # these are not rules continue for file in files: if not file.endswith(".yml"): if not (file.startswith(".git") or file.endswith( (".git", ".md", ".txt"))): # expect to see .git* files, readme.md, format.md, and maybe a .git directory # other things maybe are rules, but are mis-named. logger.warning("skipping non-.yml file: %s", file) continue rule_path = os.path.join(root, file) rule_paths.append(rule_path) rules = [] total_paths = len(rule_paths) for (i, rule_path) in enumerate(rule_paths): update_wait_box("loading capa rules from %s (%d of %d)" % (settings.user[CAPA_SETTINGS_RULE_PATH], i + 1, total_paths)) if ida_kernwin.user_cancelled(): raise UserCancelledError("user cancelled") try: rule = capa.rules.Rule.from_yaml_file(rule_path) except capa.rules.InvalidRule: raise else: rule.meta["capa/path"] = rule_path if capa.main.is_nursery_rule_path(rule_path): rule.meta["capa/nursery"] = True rules.append(rule) _rules = copy.copy(rules) ruleset = capa.rules.RuleSet(_rules) except UserCancelledError: logger.info("User cancelled analysis.") return False except Exception as e: capa.ida.helpers.inform_user_ida_ui( "Failed to load capa rules from %s" % settings.user[CAPA_SETTINGS_RULE_PATH]) logger.error("Failed to load rules from %s (error: %s).", settings.user[CAPA_SETTINGS_RULE_PATH], e) logger.error( "Make sure your file directory contains properly formatted capa rules. You can download the standard collection of capa rules from https://github.com/fireeye/capa-rules." ) settings.user[CAPA_SETTINGS_RULE_PATH] = "" return False self.ruleset_cache = ruleset self.rules_cache = rules return True
def load_capa_results(self): """run capa analysis and render results in UI note: this function must always return, exception or not, in order for plugin to safely close the IDA wait box """ # new analysis, new doc self.doc = None self.process_total = 0 self.process_count = 1 def update_wait_box(text): """update the IDA wait box""" ida_kernwin.replace_wait_box("capa explorer...%s" % text) def slot_progress_feature_extraction(text): """slot function to handle feature extraction progress updates""" update_wait_box("%s (%d of %d)" % (text, self.process_count, self.process_total)) self.process_count += 1 extractor = CapaExplorerFeatureExtractor() extractor.indicator.progress.connect(slot_progress_feature_extraction) update_wait_box("calculating analysis") try: self.process_total += len(tuple(extractor.get_functions())) except Exception as e: logger.error("Failed to calculate analysis (error: %s).", e) return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False update_wait_box("loading rules") try: # resolve rules directory - check self and settings first, then ask user if not self.rule_path: if "rule_path" in settings and os.path.exists( settings["rule_path"]): self.rule_path = settings["rule_path"] else: idaapi.info( "Please select a file directory containing capa rules." ) rule_path = self.ask_user_directory() if not rule_path: logger.warning( "You must select a file directory containing capa rules before analysis can be run. The standard collection of capa rules can be downloaded from https://github.com/fireeye/capa-rules." ) return False self.rule_path = rule_path settings.user["rule_path"] = rule_path except Exception as e: logger.error("Failed to load capa rules (error: %s).", e) return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False rule_path = self.rule_path try: if not os.path.exists(rule_path): raise IOError( "rule path %s does not exist or cannot be accessed" % rule_path) rule_paths = [] if os.path.isfile(rule_path): rule_paths.append(rule_path) elif os.path.isdir(rule_path): for root, dirs, files in os.walk(rule_path): if ".github" in root: # the .github directory contains CI config in capa-rules # this includes some .yml files # these are not rules continue for file in files: if not file.endswith(".yml"): if not (file.endswith(".md") or file.endswith(".git") or file.endswith(".txt")): # expect to see readme.md, format.md, and maybe a .git directory # other things maybe are rules, but are mis-named. logger.warning("skipping non-.yml file: %s", file) continue rule_path = os.path.join(root, file) rule_paths.append(rule_path) rules = [] total_paths = len(rule_paths) for (i, rule_path) in enumerate(rule_paths): update_wait_box("loading capa rules from %s (%d of %d)" % (self.rule_path, i + 1, total_paths)) if ida_kernwin.user_cancelled(): raise UserCancelledError("user cancelled") try: rule = capa.rules.Rule.from_yaml_file(rule_path) except capa.rules.InvalidRule: raise else: rule.meta["capa/path"] = rule_path if capa.main.is_nursery_rule_path(rule_path): rule.meta["capa/nursery"] = True rules.append(rule) rule_count = len(rules) rules = capa.rules.RuleSet(rules) except UserCancelledError: logger.info("User cancelled analysis.") return False except Exception as e: capa.ida.helpers.inform_user_ida_ui( "Failed to load capa rules from %s" % self.rule_path) logger.error("Failed to load rules from %s (error: %s).", self.rule_path, e) logger.error( "Make sure your file directory contains properly formatted capa rules. You can download the standard collection of capa rules from https://github.com/fireeye/capa-rules." ) self.rule_path = "" settings.user.del_value("rule_path") return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False update_wait_box("extracting features") try: meta = capa.ida.helpers.collect_metadata() capabilities, counts = capa.main.find_capabilities( rules, extractor, disable_progress=True) meta["analysis"].update(counts) except UserCancelledError: logger.info("User cancelled analysis.") return False except Exception as e: logger.error( "Failed to extract capabilities from database (error: %s)", e) return False update_wait_box("checking for file limitations") try: # support binary files specifically for x86/AMD64 shellcode # warn user binary file is loaded but still allow capa to process it # TODO: check specific architecture of binary files based on how user configured IDA processors if idaapi.get_file_type_name() == "Binary file": logger.warning("-" * 80) logger.warning(" Input file appears to be a binary file.") logger.warning(" ") logger.warning( " capa currently only supports analyzing binary files containing x86/AMD64 shellcode with IDA." ) logger.warning( " This means the results may be misleading or incomplete if the binary file loaded in IDA is not x86/AMD64." ) logger.warning( " If you don't know the input file type, you can try using the `file` utility to guess it." ) logger.warning("-" * 80) capa.ida.helpers.inform_user_ida_ui( "capa encountered file type warnings during analysis") if capa.main.has_file_limitation(rules, capabilities, is_standalone=False): capa.ida.helpers.inform_user_ida_ui( "capa encountered file limitation warnings during analysis" ) except Exception as e: logger.error("Failed to check for file limitations (error: %s)", e) return False if ida_kernwin.user_cancelled(): logger.info("User cancelled analysis.") return False update_wait_box("rendering results") try: self.doc = capa.render.convert_capabilities_to_result_document( meta, rules, capabilities) self.model_data.render_capa_doc(self.doc) self.render_capa_doc_mitre_summary() self.enable_controls() self.set_view_status_label("capa rules directory: %s (%d rules)" % (self.rule_path, rule_count)) except Exception as e: logger.error("Failed to render results (error: %s)", e) return False return True
def dump_config(self): idaapi.info(config.dump())