def Instrumentor(self, args): if args.source is None: raise ValueError("no source project provided") kernel = Kernel(args.image) source_cfg = self.loads( os.path.join(self.project_path(args.source), "project.json")) cfg = dict() cfg['workdir'] = self.getConfig("workdir") # cfg['sourcedir'] = self.project_path(args.source) cfg['logLevel'] = "debug" cfg['check_kasan'] = True vuln = self.loads(self.workdir_file("vuln.json")) if vuln['Symbolic']: cfg['vul_size'] = 0 else: cfg['vul_size'] = Pahole.getSize(vuln['Size']) cfg['allocSite'] = vuln['Callsite'] cfg['kasan'], cfg['kasan_ret'] = kernel.getKasanReport() cfg['repro'] = False cfg['ranges'] = [] content = "add_plugin(\"Instrumentor\")\n" content += "pluginsConfig.Instrumentor = %s" % lua.encode(cfg) return content
def addEntryToPlugin(self, content, plugin, key, value): cfg = self.getPluginConfig(plugin, content, hasheader=False) if not cfg: content = self.addPluginConfig(plugin, content) return self.addEntryToPlugin(content, plugin, key, value) data = lua.decode(cfg) data[key] = value new_cfg = lua.encode(data) new_cfg = "pluginsConfig.%s = %s" % (plugin, new_cfg) try: cfg = self.getPluginConfig(plugin, content, hasheader=True) index = content.index(cfg) content = content[:index] + new_cfg + content[index + len(cfg):] return content except ValueError as e: pass return content
def KernelAddressSanitizer(self, kernel): config = '' kasan_func = [ 'check_memory_region', '__asan_store1', '__asan_store2', '__asan_store4', '__asan_store8', '__asan_store16', '__asan_storeN', '__asan_load1', '__asan_load2', '__asan_load4', '__asan_load8', '__asan_load16', '__asan_loadN' ] Additional_checks = ['csum_partial_copy_generic'] config += ("\n") config += ('add_plugin("KernelAddressSanitizer")\n') config += ('pluginsConfig.KernelAddressSanitizer = ') cfg = dict() funcsCfg = dict() for i, func in enumerate(kasan_func): funcCfg = dict() name = "fun_%d" % i symbol = kernel.find_symbol(func) if not symbol: print("Cannot find symbol for %s" % func) continue else: funcCfg["funcName"] = func funcCfg["entry"] = symbol.rebased_addr funcCfg["exit"] = symbol.rebased_addr + symbol.size - 1 funcsCfg[name] = funcCfg cfg["functions"] = funcsCfg checks = dict() for func in Additional_checks: symbol = kernel.find_symbol(func) if not symbol: print("Cannot find symbol for %s" % func) continue else: checks[func] = symbol.rebased_addr cfg["checks"] = checks cfg["kasan_report"], cfg["kasan_ret"] = kernel.getKasanReport() config += lua.encode(cfg) config += ('\n') return config
def PcMonitor(self, args, kernel): data = dict() data["recordTrace"] = False data["trackroot"] = args.trackroot kasan_report, _ = kernel.getKasanReport() if kasan_report: data["kasan_report"] = kasan_report else: print("Failed to parse kasan report\n") data["pid_offset"] = kernel.getStructOffset("task_struct", "pid") data["tgid_offset"] = kernel.getStructOffset("task_struct", "tgid") data["limitcount"] = 300000 data["debuginst"] = False data["hookadc"] = False if args.pids: pids = [int(each) for each in args.pids.split(',')] data["pids"] = pids config = '' config += ("\n") config += ('add_plugin("PcMonitor")\n') config += ('pluginsConfig.PcMonitor = %s\n' % lua.encode(data)) return config
def KernelFunctionModels(self, args, kernel): config = '' if not os.path.exists("template/functions.json"): raise Exception("template/functions.json does not exist") with open("template/functions.json", "r") as f: data = json.load(f) kernel_func = data["model"] constraints = data["avoid"] skipFuncs = data["skip"] config += ("\n") config += '''add_plugin("KernelFunctionModels") pluginsConfig.KernelFunctionModels = { functions = {}, constraints = {}, skips = {}, } g_KernelFunctionModels_function = {} g_KernelFunctionModels_constraint = {} g_KernelFunctionModels_skip = {} safe_load('kernelModels.lua') pluginsConfig.KernelFunctionModels.functions = g_KernelFunctionModels_function pluginsConfig.KernelFunctionModels.constraints = g_KernelFunctionModels_constraint pluginsConfig.KernelFunctionModels.skips = g_KernelFunctionModels_skip ''' modelsPath = self.project_file("kernelModels.lua") if os.path.exists(modelsPath): return config g_KernelFunctionModels_function = dict() for i, func in enumerate(kernel_func): funCfg = dict() funCfg.update(func) symbol = kernel.find_symbol(func['funcName']) if symbol: funCfg["address"] = symbol.rebased_addr else: print("Failed to parse the function %s" % func['funcName']) continue g_KernelFunctionModels_function["fun_%d" % i] = funCfg g_KernelFunctionModels_constraint = dict() project_json = self.project_config() if "constraints" in project_json: constraints += project_json["constraints"] for i, c in enumerate(constraints): funCfg = {'funcName': c} symbol = kernel.find_symbol(c) if not symbol: print("Failed to parse the function %s" % c) continue else: funCfg["entry"] = symbol.rebased_addr funCfg["exit"] = symbol.rebased_addr + symbol.size - 1 g_KernelFunctionModels_constraint["fun_%d" % i] = funCfg g_KernelFunctionModels_skip = dict() for i, c in enumerate(skipFuncs): funCfg = {'funcName': c} subconfig = (' fun_%d = {\n' % i) symbol = kernel.find_symbol(c) if not symbol: print("Failed to parse the function %s" % c) continue else: funCfg["entry"] = symbol.rebased_addr funCfg["exit"] = symbol.rebased_addr + symbol.size - 1 g_KernelFunctionModels_skip["fun_%d" % i] = funCfg with open(modelsPath, "w") as f: f.write("g_KernelFunctionModels_function = %s\n" % lua.encode(g_KernelFunctionModels_function)) f.write("g_KernelFunctionModels_constraint = %s\n" % lua.encode(g_KernelFunctionModels_constraint)) f.write("g_KernelFunctionModels_skip = %s\n" % lua.encode(g_KernelFunctionModels_skip)) return config
def AllocManager(self, args, kernel, vuln=None): config = '' alloc_func = [{ 'funcName': '__get_free_pages', 'args': 2, 'type': 1, 'sizeArg': 1 }, { 'funcName': '__kmalloc', 'args': 2, 'type': 1, 'sizeArg': 0 }, { 'funcName': '__kmalloc_track_caller', 'args': 3, 'type': 1, 'sizeArg': 0 }, { 'funcName': 'kfree', 'args': 1, 'type': 2, 'sizeArg': 0 }, { 'funcName': 'kmem_cache_free', 'args': 2, 'type': 2, 'sizeArg': 1 }, { 'funcName': '__kmalloc_node_track_caller', 'args': 4, 'type': 1, 'sizeArg': 0 }, { 'funcName': 'kmem_cache_alloc', 'args': 2, 'type': 3, 'sizeArg': 0 }, { 'funcName': '__kmalloc_node', 'args': 3, 'type': 1, 'sizeArg': 0 }] cfg = dict() if vuln and not args.race: cfg["symbolicptr"] = True cfg["symbolic"] = { "obj_0": { "callsite": vuln.Callsite, "size": vuln.Size, } } funCfg = dict() for i, func in enumerate(alloc_func): symbol = kernel.find_symbol(func['funcName']) if symbol: func["address"] = symbol.rebased_addr funCfg["fun_%d" % i] = func cfg["functions"] = funCfg # object_size: size without metadata # size: size with padding/metadata cfg["slab_offset"] = kernel.getStructOffset("kmem_cache", "object_size") cfg["name_offset"] = kernel.getStructOffset("kmem_cache", "name") config += ('\n') config += ('add_plugin("AllocManager")\n') config += ('pluginsConfig.AllocManager = %s\n' % lua.encode(cfg)) return config
def KernelInstructionTracer(self, kernel, args): config = '' config += ("\n") config += ('add_plugin("KernelInstructionTracer")\n') config += ('pluginsConfig.KernelInstructionTracer = ') reports = list() # if 2 <= args.mode <= 3 and not args.check_heap: if args.mode == 3 and not args.check_heap: report_file = self.workdir_file("reports.json") if os.path.exists(report_file): with open(report_file) as f: reports = json.loads(f.read()) else: print("Analyzing file %s..." % report_file) path = self.last_execution_file("debug.txt") reports = find_vulnerablility_sites(path, kernel, report_file, args.mode == 3) report = Report(reports, kernel) targets, exits, conditions, total_size = report.analyze() if args.mode == 2: # We are only interested in targets exits, conditions = list(), dict() if total_size > 4096: print("Too much overwritten data, need to check it?") data = dict() # targets data["targets"] = list() for each in targets: logger.debug("choose target 0x%x" % each[SPOT_ADDR]) data["targets"].append(each[SPOT_ADDR]) # exits data["exits"] = list() for each in exits: data["exits"].append(each) # conditions count = 0 conds = dict() for tgt, cond in conditions: conds["con_%d" % count] = {"target": tgt, "condition": cond} count += 1 data["conditions"] = conds # annotation data["annotations"] = {"fun_0": {"pc": 0x0, "onExecute": "track"}} # type -- 1: store 2: memset 3: strcpy 4: memcpy spots = dict() for i, spot in enumerate(targets): spot_cfg = {"addr": spot[SPOT_ADDR], "type": spot[SPOT_TYPE]} signature = spot[SPOT_SIG] if signature: spot_cfg["signature"] = signature spots["spot_%d" % i] = spot_cfg data["spots"] = spots data["debug"] = args.debug # syscalls syscalls = dict() addr = kernel.func_start("entry_SYSCALL_64") if addr != 0: syscalls["entry_SYSCALL_64"] = addr else: print("Failed to get address of entry_SYSCALL_64\n") data["syscall"] = syscalls data["workdir"] = self.getConfig("workdir") if data["workdir"] is None: data["workdir"] = self.project_dir config += lua.encode(data) + '\n' return config