def watchKernelIA32E(self, cpu, cell, cr3, pid, comm): ''' Watch kernel page directories and tables for IA32E 64-bit configurations. ''' mask64 = 0x000ffffffffff000 cell_name = self.top.getTopComponentName(cpu) #self.lgr.debug('watchKernelIA32E, first PML4 entry for kernel is %d on %s %d (%s)' % (self.PML4_entry, cell_name, pid, comm)) pg_dir_ptr_table_base_addr = (self.PML4_entry * 8) + cr3 ''' watch the kernel entry in the PML4 table ''' self.setPageTableHap(cpu, cell, pg_dir_ptr_table_base_addr, 8, PML4, pid, comm) pg_dir_ptr_table_base = SIM_read_phys_memory( cpu, pg_dir_ptr_table_base_addr, 8) ''' watch the kernel page directory pointer table ''' self.setPageTableHap(cpu, cell, pg_dir_ptr_table_base, 4096, PG_DIR_PTR_TBL, pid, comm) #self.lgr.debug('pg_dir_ptr_table_base_addr is 0x%x, table base is 0x%x' % (pg_dir_ptr_table_base_addr, pg_dir_ptr_table_base)) ''' Look at each entry in the page directory pointer table ''' pg_dir_ptr_addr = pg_dir_ptr_table_base for i in range(512): pg_dir_ptr_entry = SIM_read_phys_memory(cpu, pg_dir_ptr_addr, 8) big_pages = memUtils.testBit(pg_dir_ptr_entry, 7) u_s = memUtils.testBit(pg_dir_ptr_entry, 2) if big_pages == 0: pg_dir_addr = pg_dir_ptr_entry & mask64 if pg_dir_addr != 0 and u_s == 0: #self.lgr.debug('watchKernelIA32E, pg_dir_ptr_entry: 0x%x pg_dir_addr: 0x%x big: %x u/s %x on %s %d (%s)' % (pg_dir_ptr_entry, pg_dir_addr, big_pages, u_s, cell_name, pid, comm)) self.setPageTableHap(cpu, cell, pg_dir_addr, 4096, PG_DIR, pid, comm) pg_dir_ptr_addr = pg_dir_ptr_addr + 8
def watchKernelPageExtended(self, cpu, cell, cr3, pid, comm): ''' PAE: Watch kernel page directories and tables for PAE configurations. This is used for linux. Note we igore page tables whose s/u bit is u. Not clear why these page tables are updated when userland pages in an allocated page. ''' self.lgr.debug('watchKernel, watchKernelPageExtended') mask64 = 0x000ffffffffff000 big_pages = memUtils.testBit(self.dir_ptr_entry[cpu], 7) u_s = memUtils.testBit(self.dir_ptr_entry[cpu], 2) pg_dir_addr = self.dir_ptr_entry[cpu] & mask64 if pg_dir_addr == 0: self.lgr.error('watchKernelPageExtended, got zero for pg_dir_addr') return #self.lgr.debug('watchPageExtended, dir_ptr_entry: 0x%x pg_dir_addr: 0x%x big: %x u/s %x' % (self.dir_ptr_entry[cpu], # pg_dir_addr, big_pages, u_s)) self.setPageTableHap(cpu, cell, pg_dir_addr, 4096, PG_DIR, pid, comm) for i in range(512): pg_dir_entry = SIM_read_phys_memory(cpu, pg_dir_addr, 8) big_pages = memUtils.testBit(pg_dir_entry, 7) u_s = memUtils.testBit(pg_dir_entry, 2) if big_pages == 0: pg_table_addr = pg_dir_entry & mask64 if pg_table_addr != 0 and u_s == 0: #self.lgr.debug('watchPageExtended, pg_dir_entry: 0x%x pg_table_addr: 0x%x big: %x u/s %x' % (pg_dir_entry, pg_table_addr, big_pages, u_s)) self.setPageTableHap(cpu, cell, pg_table_addr, 4096, PG_TBL, pid, comm) else: #self.lgr.debug('watchKernelPageExtended pg_dir_entry 0x%x is to a big page u/s %x' % (pg_dir_entry, u_s)) pass pg_dir_addr = pg_dir_addr + 8
def findPageTable(cpu, addr, lgr, use_sld=None): if cpu.architecture == 'arm': return findPageTableArm(cpu, addr, lgr, use_sld) elif isIA32E(cpu): lgr.debug('findPageTable is IA32E') return findPageTableIA32E(cpu, addr, lgr) else: #lgr.debug('findPageTable not IA32E') ptable_info = PtableInfo() reg_num = cpu.iface.int_register.get_number("cr3") cr3 = cpu.iface.int_register.read(reg_num) reg_num = cpu.iface.int_register.get_number("cr4") cr4 = cpu.iface.int_register.read(reg_num) ''' determine if PAE being used ''' addr_extend = memUtils.testBit(cr4, 5) #print('addr_extend is %d' % addr_extend) if addr_extend == 0: ''' Traditional page table. ''' offset = memUtils.bitRange(addr, 0, 11) ptable = memUtils.bitRange(addr, 12, 21) pdir = memUtils.bitRange(addr, 22, 31) #lgr.debug('traditional paging addr 0x%x pdir: 0x%x ptable: 0x%x offset 0x%x ' % (addr, pdir, ptable, offset)) pdir_entry_addr = cr3 + (pdir * 4) #lgr.debug('cr3: 0x%x pdir_entry_addr: 0x%x' % (cr3, pdir_entry_addr)) ptable_info.pdir_addr = pdir_entry_addr pdir_entry = SIM_read_phys_memory(cpu, pdir_entry_addr, 4) if pdir_entry == 0: return ptable_info ptable_info.ptable_protect = memUtils.testBit(pdir_entry, 2) ptable_info.ptable_exists = True pdir_entry_20 = memUtils.bitRange(pdir_entry, 12, 31) ptable_base = pdir_entry_20 * PAGE_SIZE #lgr.debug('pdir_entry: 0x%x 20 0x%x ptable_base: 0x%x' % (pdir_entry, pdir_entry_20, ptable_base)) ptable_entry_addr = ptable_base + (4 * ptable) ptable_info.ptable_addr = ptable_entry_addr if use_sld is not None: entry = use_sld else: entry = SIM_read_phys_memory(cpu, ptable_entry_addr, 4) #lgr.debug('ptable_entry_addr is 0x%x, page table entry contains 0x%x' % (ptable_entry_addr, entry)) if entry == 0: return ptable_info ptable_info.page_protect = memUtils.testBit(entry, 2) ptable_info.page_exists = True entry_20 = memUtils.bitRange(entry, 12, 31) page_base = entry_20 * PAGE_SIZE paddr = page_base + offset ptable_info.page_addr = paddr #lgr.debug('phys addr is 0x%x' % paddr) return ptable_info else: #lgr.debug('call findPageTableExtend') return findPageTableExtended(cpu, addr, lgr)
def findPageTableExtended(cpu, addr, lgr): WORD_SIZE = 8 mask64 = 0x000ffffffffff000 ptable_info = PtableInfo() ptable_info.entry_size = WORD_SIZE reg_num = cpu.iface.int_register.get_number("cr3") cr3 = cpu.iface.int_register.read(reg_num) reg_num = cpu.iface.int_register.get_number("cr4") cr4 = cpu.iface.int_register.read(reg_num) ''' determine if PAE being used ''' addr_extend = memUtils.testBit(cr4, 5) #print('addr_extend is %d' % addr_extend) if addr_extend != 0: ''' Extended page table. ''' offset = memUtils.bitRange(addr, 0,11) ptable = memUtils.bitRange(addr, 12,20) pdir = memUtils.bitRange(addr, 21,29) pdir_pointer_table = memUtils.bitRange(addr, 30,31) dir_ptr_entry_addr = cr3 + WORD_SIZE * pdir_pointer_table dir_ptr_entry = SIM_read_phys_memory(cpu, dir_ptr_entry_addr, WORD_SIZE) dir_ptr_entry_addr = dir_ptr_entry & mask64 pdir_entry_addr = dir_ptr_entry_addr + (pdir * WORD_SIZE) ptable_info.pdir_addr = pdir_entry_addr pdir_entry = SIM_read_phys_memory(cpu, pdir_entry_addr, WORD_SIZE) if pdir_entry == 0: return ptable_info ptable_info.ptable_protect = memUtils.testBit(pdir_entry, 2) ptable_info.ptable_exists = True pdir_entry_24 = pdir_entry & mask64 ptable_base = pdir_entry_24 lgr.debug('pdir_entry: 0x%x 24 0x%x ptable_base: 0x%x' % (pdir_entry, pdir_entry_24, ptable_base)) ptable_entry_addr = ptable_base + (WORD_SIZE*ptable) lgr.debug('ptable_entry_addr 0x%x ptable 0x%x' % (ptable_entry_addr, ptable_base)) ptable_info.ptable_addr = ptable_entry_addr entry = SIM_read_phys_memory(cpu, ptable_entry_addr, WORD_SIZE) lgr.debug('ptable_entry_addr is 0x%x, page table entry contains 0x%x' % (ptable_entry_addr, entry)) if entry == 0: return ptable_info ptable_info.page_protect = memUtils.testBit(entry, 2) ptable_info.page_exists = True entry_24 = entry & mask64 page_base = entry_24 paddr = page_base + offset ptable_info.page_addr = paddr #lgr.debug('phys addr is 0x%x' % paddr) return ptable_info else: lgr.error('addr_extended is zero?')
def get40(cpu, addr, lgr): try: value = SIM_read_phys_memory(cpu, addr, 8) except: lgr.debug('nothing mapped at 0x%x' % addr) return 0, 0, 0 retval = memUtils.bitRange(value, 12, 50) << 12 page_size = memUtils.testBit(value, 7) present = memUtils.testBit(value, 0) return retval, present, page_size
def isIA32E(cpu): reg_num = cpu.iface.int_register.get_number("cr4") cr4 = cpu.iface.int_register.read(reg_num) reg_num = cpu.iface.int_register.get_number("efer") efer = cpu.ia32_efer pae = memUtils.testBit(cr4, 5) lme = memUtils.testBit(efer, 8) #print('efer is 0x%x lme %d pae %d' % (efer, lme, pae)) if pae and lme: return True else: return False
def conditionalMet(self, mn): if self.cpu.architecture == 'arm': return armCond.condMet(self.cpu, mn) else: if mn.startswith('cmov'): eflags = self.top.getReg('eflags', self.cpu) if mn == 'cmovne' and not memUtils.testBit(eflags, 6): return True elif mn == 'cmove' and memUtils.testBit(eflags, 6): return True else: return False else: return True
def thinkExecuted(self, page_info): if self.cpu.architecture == 'arm': nx = memUtils.testBit(page_info.entry, 0) accessed = memUtils.testBit(page_info.entry, 4) if nx or not accessed: #self.lgr.debug('thinkExecuted will skip 0x%x nx %r accessed %r' % (page_info.logical, nx, accessed)) return False else: writable = memUtils.testBit(page_info.entry, 1) accessed = memUtils.testBit(page_info.entry, 5) if writable or not accessed: #self.lgr.debug('thinkExecuted will skip %r %r' % (writable, accessed)) return False return True
def __init__(self, entry, arch): if arch != 'arm': self.writable = memUtils.testBit(entry, 1) self.accessed = memUtils.testBit(entry, 5) else: self.nx = memUtils.testBit(entry, 0) ap = memUtils.bitRange(entry, 4, 5) self.accessed = True self.writable = False if ap == 1 or ap == 0: self.accessed = False self.writable = False if ap == 3: self.writable = True
def watchKernelPage(self, cpu, cell, pid, comm): ''' Watch kernel page tables for modification ''' mask32 = 0xfffff000 cr3 = self.cr3[cpu] cr4 = self.cr4[cpu] cell_name = self.top.getTopComponentName(cpu) ''' determine if PAE being used ''' addr_extend = memUtils.testBit(cr4, 5) if addr_extend != 0: if self.os_utils[cell_name].mem_utils.WORD_SIZE == 4: #self.lgr.debug('watchKernelPage, using PAE extended memory') self.watchKernelPageExtended(cpu, cell, cr3, pid, comm) else: #self.lgr.debug('watchKernelPage, using IA32E mode') self.watchKernelIA32E(cpu, cell, cr3, pid, comm) else: ''' Traditional page table. Assume upper half kernel, and watch those directories & page tables Get the directory table entry that starts kernel base address ''' self.lgr.debug('watchKernelPage, traditional paging') bottom = self.param[cell_name].kernel_base index = bottom >> 22 remain = 1024 - index offset = index * 4 pg_dir_addr = cr3 + offset #self.lgr.debug('watchKernelPage, index is %d remain %d offset %x' % (index, remain, offset)) # watch the section of the page directory that the kernel uses for its stuff self.setPageTableHap(cpu, cell, pg_dir_addr, 4 * remain, PG_DIR, pid, comm) for i in range(remain): entry = SIM_read_phys_memory(cpu, pg_dir_addr, 4) pg_table_addr = entry & mask32 big_pages = memUtils.testBit(entry, 7) u_s = memUtils.testBit(entry, 2) if big_pages == 0: pg_table_addr = entry & mask32 #if pg_table_addr != 0 and u_s == 0: if pg_table_addr != 0: #self.lgr.debug('watchKernelPage, pg_dir_entry: 0x%x pg_table_addr: 0x%x big: %x u/s %x' % (entry, pg_table_addr, big_pages, u_s)) self.setPageTableHap(cpu, cell, pg_table_addr, 4096, PG_TBL, pid, comm) else: #self.lgr.debug('watchKernelPage pg_dir_entry 0x%x is to a big page u/s %x' % (entry, u_s)) pass pg_dir_addr = pg_dir_addr + 4
def __init__(self, param, lgr, cpu, cell, pid=0): self.pid = pid self.cpu = cpu self.lgr = lgr pages = pageUtils.getPageBases(cpu, lgr, param.kernel_base) breaks = [] range_start = None prev_physical = None pcell = cpu.physical_memory for page_info in pages: writable = memUtils.testBit(page_info.entry, 1) accessed = memUtils.testBit(page_info.entry, 5) if writable or not accessed: self.lgr.debug('will skip %r %r' % (writable, accessed)) continue self.lgr.debug('phys: 0x%x logical: 0x%x' % (page_info.physical, page_info.logical)) if range_start is None: range_start = page_info.physical prev_physical = page_info.physical else: if page_info.physical == prev_physical + pageUtils.PAGE_SIZE: prev_physical = page_info.physical else: self.lgr.debug( 'Page not contiguous: 0x%x range_start: 0x%x prev_physical: 0x%x' % (page_info.physical, range_start, prev_physical)) break_num = SIM_breakpoint(pcell, Sim_Break_Physical, Sim_Access_Execute, range_start, pageUtils.PAGE_SIZE, 0) breaks.append(break_num) range_start = page_info.physical prev_physical = page_info.physical break_num = SIM_breakpoint(pcell, Sim_Break_Physical, Sim_Access_Execute, range_start, pageUtils.PAGE_SIZE, 0) breaks.append(break_num) self.lgr.debug('set %d breaks', len(breaks)) ''' break_num = SIM_breakpoint(cell, Sim_Break_Linear, Sim_Access_Execute, 0x100000, 0x10000000, 0) breaks = [break_num] ''' hap_clean = hapCleaner.HapCleaner(cpu) stop_action = hapCleaner.StopAction(hap_clean, [break_num]) self.stop_hap = SIM_hap_add_callback("Core_Simulation_Stopped", self.stopHap, stop_action) SIM_run_command('reverse')
def getPageEntrySize(cpu): ''' TBD FIX THIS ''' if cpu.architecture == 'arm': return 4 reg_num = cpu.iface.int_register.get_number("cr3") cr3 = cpu.iface.int_register.read(reg_num) reg_num = cpu.iface.int_register.get_number("cr4") cr4 = cpu.iface.int_register.read(reg_num) ''' determine if PAE being used ''' addr_extend = memUtils.testBit(cr4, 5) #print('addr_extend is %d' % addr_extend) if addr_extend == 0: return 4 else: return 8
def getPageBasesExtended(cpu, lgr, kernel_base): ENTRIES_PER_TABLE = 512 WORD_SIZE = 8 retval = [] reg_num = cpu.iface.int_register.get_number("cr3") cr3 = cpu.iface.int_register.read(reg_num) page_table_directory = cr3 pdir_index = 0 for pdir_table_index in range(4): pdir_entry_addr = SIM_read_phys_memory(cpu, page_table_directory, 8) for i in range(ENTRIES_PER_TABLE): pdir_entry = SIM_read_phys_memory(cpu, pdir_entry_addr, WORD_SIZE) pdir_entry_20 = memUtils.bitRange(pdir_entry, 12, 31) ptable_base = pdir_entry_20 * PAGE_SIZE if pdir_entry != 0: ptable_entry_addr = ptable_base ptable_index = 0 for j in range(ENTRIES_PER_TABLE): ptable_entry = SIM_read_phys_memory( cpu, ptable_entry_addr, WORD_SIZE) present = memUtils.testBit(ptable_entry, 0) if present: entry_20 = memUtils.bitRange(ptable_entry, 12, 31) page_base = entry_20 * PAGE_SIZE logical = 0 logical = memUtils.setBitRange(logical, pdir_index, 22) #lgr.debug('logical now 0x%x from index %d' % (logical, pdir_index)) logical = memUtils.setBitRange(logical, ptable_index, 12) if logical >= kernel_base: break #lgr.debug('logical now 0x%x from ptable index %d' % (logical, ptable_index)) addr_info = PageAddrInfo(logical, page_base, ptable_entry) retval.append(addr_info) ptable_entry_addr += WORD_SIZE ptable_index += 1 pdir_entry_addr += WORD_SIZE pdir_index += 1 page_table_directory += WORD_SIZE return retval
def findPageTableArm(cpu, va, lgr, use_sld=None): ptable_info = PtableInfo() ttbr = cpu.translation_table_base0 base = memUtils.bitRange(ttbr, 14, 31) base_shifted = base << 14 first_index = memUtils.bitRange(va, 20, 31) first_shifted = first_index << 2 first_addr = base_shifted | first_shifted ptable_info.pdir_addr = first_addr #lgr.debug('findPageTableArm first_index 0x%x ndex_shifted 0x%x addr 0x%x' % (first_index, first_shifted, first_addr)) fld = SIM_read_phys_memory(cpu, first_addr, 4) if fld == 0: return ptable_info #ptable_info.ptable_protect = memUtils.testBit(fld, 2) ptable_info.ptable_exists = True pta = memUtils.bitRange(fld, 10, 31) pta_shifted = pta << 10 #print('fld 0x%x pta 0x%x pta_shift 0x%x' % (fld, pta, pta_shifted)) second_index = memUtils.bitRange(va, 12, 19) second_shifted = second_index << 2 second_addr = pta_shifted | second_shifted ptable_info.ptable_addr = second_addr sld = SIM_read_phys_memory(cpu, second_addr, 4) #print('sld 0x%x second_index 0x%x second_shifted 0x%x second_addr 0x%x' % (sld, second_index, second_shifted, second_addr)) if use_sld is None: if sld == 0: return ptable_info else: sld = use_sld #ptable_info.page_protect = memUtils.testBit(sld, 2) ptable_info.page_exists = True small_page_base = memUtils.bitRange(sld, 12, 31) s_shifted = small_page_base << 12 ptable_info.page_addr = s_shifted ptable_info.nx = memUtils.testBit(sld, 0) ptable_info.entry = sld return ptable_info
def getPageBases(cpu, lgr, kernel_base): if cpu.architecture == 'arm': return getPageBasesArm(cpu, lgr, kernel_base) ENTRIES_PER_TABLE = 1024 retval = [] reg_num = cpu.iface.int_register.get_number("cr3") cr3 = cpu.iface.int_register.read(reg_num) pdir_entry_addr = cr3 pdir_index = 0 for i in range(ENTRIES_PER_TABLE): pdir_entry = SIM_read_phys_memory(cpu, pdir_entry_addr, 4) pdir_entry_20 = memUtils.bitRange(pdir_entry, 12, 31) ptable_base = pdir_entry_20 * PAGE_SIZE if pdir_entry != 0: ptable_entry_addr = ptable_base ptable_index = 0 for j in range(ENTRIES_PER_TABLE): ptable_entry = SIM_read_phys_memory(cpu, ptable_entry_addr, 4) present = memUtils.testBit(ptable_entry, 0) if present: entry_20 = memUtils.bitRange(ptable_entry, 12, 31) page_base = entry_20 * PAGE_SIZE logical = 0 logical = memUtils.setBitRange(logical, pdir_index, 22) #lgr.debug('logical now 0x%x from index %d' % (logical, pdir_index)) logical = memUtils.setBitRange(logical, ptable_index, 12) if logical >= kernel_base: break #lgr.debug('logical now 0x%x from ptable index %d' % (logical, ptable_index)) addr_info = PageAddrInfo(logical, page_base, ptable_entry) retval.append(addr_info) ptable_entry_addr += 4 ptable_index += 1 pdir_entry_addr += 4 pdir_index += 1 return retval
def pageFaultHap(self, compat32, third, forth, memory): if self.fault_hap is None: return #self.lgr.debug('pageFaultHap') #cpu, comm, pid = self.task_utils.curProc() #self.lgr.debug('pageFaultHap pid:%d third: %s forth: %s' % (pid, str(third), str(forth))) #cpu = SIM_current_processor() #if cpu != hap_cpu: # self.lgr.debug('pageFaultHap, wrong cpu %s %s' % (cpu.name, hap_cpu.name)) # return #use_cell = self.cell #if self.debugging_pid is not None: # use_cell = self.context_manager.getRESimContext() cpu, comm, pid = self.task_utils.curProc() if not self.context_manager.watchingThis(): #self.lgr.debug('pageFaultHap pid:%d, contextManager says not watching' % pid) return eip = self.exception_eip cur_pc = self.mem_utils.getRegValue(cpu, 'pc') access_type = None if self.cpu.architecture == 'arm': if cur_pc == self.param.page_fault: ''' prefetch abort ''' reg_num = None else: reg_num = self.cpu.iface.int_register.get_number( "combined_data_far") data_fault_reg = self.cpu.iface.int_register.get_number( "combined_data_fsr") fault = self.cpu.iface.int_register.read(data_fault_reg) access_type = memUtils.testBit(fault, 11) self.lgr.debug( 'data fault pid:%d reg value 0x%x violation type: %d' % (pid, fault, access_type)) else: reg_num = self.cpu.iface.int_register.get_number("cr2") if reg_num is not None: cr2 = self.cpu.iface.int_register.read(reg_num) #self.lgr.debug('cr2 read is 0x%x' % cr2) else: cr2 = eip if pid not in self.faulted_pages: self.faulted_pages[pid] = [] if cr2 in self.faulted_pages[pid]: #self.lgr.debug('pageFaultHap, addr 0x%x already handled for pid:%d cur_pc: 0x%x' % (cr2, pid, cur_pc)) return self.faulted_pages[pid].append(cr2) #self.lgr.debug('pageFaultHapAlone for %d (%s) faulting address: 0x%x' % (pid, comm, cr2)) #self.lgr.debug('pageFaultHap for %d (%s) at 0x%x faulting address: 0x%x' % (pid, comm, eip, cr2)) #self.lgr.debug('len of faulted pages is now %d' % len(self.faulted_pages)) if cpu.architecture == 'arm': page_info = pageUtils.findPageTableArm(self.cpu, cr2, self.lgr) elif pageUtils.isIA32E(cpu): page_info = pageUtils.findPageTableIA32E(self.cpu, cr2, self.lgr) else: page_info = pageUtils.findPageTable(self.cpu, cr2, self.lgr) prec = Prec(self.cpu, comm, pid, cr2, eip) if pid not in self.pending_faults: self.pending_faults[pid] = prec #self.lgr.debug('pageFaultHap add pending fault for %d addr 0x%x cycle 0x%x' % (pid, prec.cr2, prec.cycles)) if self.mode_hap is None: #self.lgr.debug('pageFaultGen adding mode hap') self.mode_hap = SIM_hap_add_callback_obj( "Core_Mode_Change", cpu, 0, self.modeChanged, pid) hack_rec = (compat32, page_info, prec) SIM_run_alone(self.pageFaultHapAlone, hack_rec)
def __init__(self, top, param, cell_config, master_config, hap_manager, os_utils, kernel_info, page_size, unx_regions, cr3, cr4, lgr): ''' cr3 and cr4 per-cpu dictionaries ''' self.haps_added = 0 self.haps_removed = 0 self.lgr = lgr self.top = top self.cell_config = cell_config self.master_config = master_config self.param = param # note these are os_p_utils self.os_utils = os_utils self.kernel_info = kernel_info self.page_size = page_size self.rop_phys = False self.unx_regions = unx_regions self.cr3 = cr3 self.cr4 = cr4 self.PML4_entry = None #signal.signal(signal.SIGINT, self.signal_handler) self.record_profile = False if master_config.rop_profile_record: self.record_profile = True for cell_name in self.cell_config.cells: self.lgr.debug('watchKernel init for cell %s' % cell_name) obj = SIM_get_object(cell_name) cell = obj.cell_context #cmd = '%s.get-processor-list' % cell_name #proclist = SIM_run_command(cmd) #cpu = SIM_get_object(proclist[0]) if master_config.kernelNoX(cell_name): self.lgr.debug('watchKernel nox for %s' % cell_name) # set the entire kernel space nox_break = SIM_breakpoint(cell, Sim_Break_Linear, Sim_Access_Execute, master_config.ps_strings + 1, 0xffffffff, 0) # remove kernel text section from break area SIM_breakpoint_remove( nox_break, Sim_Access_Execute, master_config.kernel_text[cell_name], master_config.kernel_text_size[cell_name]) if cell_name in master_config.kernel_text2 and master_config.kernel_text2[ cell_name] is not None: SIM_breakpoint_remove( nox_break, Sim_Access_Execute, master_config.kernel_text2[cell_name], master_config.kernel_text_size2[cell_name]) # remove cgc text sections from break area if cell_name in cell_config.cell_cgc_address and cell_config.cell_cgc_address[ cell_name] is not None: SIM_breakpoint_remove( nox_break, Sim_Access_Execute, cell_config.cell_cgc_address[cell_name], master_config.cgc_text_size) #master_config.text, master_config.text_size, 0) hap_manager.addBreak(cell_name, None, nox_break, None) for cpu in self.cell_config.cell_cpu_list[cell_name]: self.haps_added += 1 nox_hap = SIM_hap_add_callback_index( "Core_Breakpoint_Memop", self.nox_callback, cpu, nox_break) hap_manager.addHap(cpu, cell_name, None, nox_hap, None) # TBD perhaps optimize to only allocate if needed, for now these config values are per process type # not per cell_name if True or master_config.kernelRop( cell_name) or master_config.kernelUnx(cell_name): #self.lgr.debug('watchKernel ropcop for %s' % cell_name) for cpu in self.cell_config.cell_cpu_list[cell_name]: self.exempt_returns[cpu] = [] #self.loadProfiles(cell_name, cpu) self.kernel_ret_counts[cpu] = {} self.kernel_ret_break[cpu] = [] self.kernel_ret_hap[cpu] = [] self.kernel_unx_break[cpu] = [] self.kernel_unx_hap[cpu] = [] self.ret_hits[cpu] = 0 self.total_ret_hits[cpu] = 0 if True or master_config.kernelPageTable(cell_name): #self.lgr.debug('watchKernel kernelPageTable for %s' % cell_name) self.kernel_pt_hap[cpu] = [] self.kernel_pt_break[cpu] = [] addr_extend = memUtils.testBit(cr4[cpu], 5) if addr_extend != 0: if self.os_utils[cell_name].mem_utils.WORD_SIZE == 4: self.initPageDirAddressExtended(cpu, cr3[cpu]) self.lgr.debug('watchKernel using Extended') else: self.initPageDirIA32E(cpu, cr3[cpu]) self.lgr.debug('watchKernel using IA32E paging')