def alloc_page_table(old, pid, frm, index, to, perm, from_type, to_type): cond = z3.And( # The to argument is a valid page and is marked as free is_pn_valid(to), old.pages[to].type == dt.page_type.PAGE_TYPE_FREE, # The pid is valid and is either current running process or child embryo is_pid_valid(pid), z3.Or(pid == old.current, z3.And( old.procs[pid].ppid == old.current, old.procs[pid].state == dt.proc_state.PROC_EMBRYO)), # The from parameter is valid and of type PML4 and owned by pid is_pn_valid(frm), old.pages[frm].owner == pid, old.pages[frm].type == from_type, # Index is a valid page index z3.ULT(index, 512), # perm has no unsafe bits on it and it is present perm & (dt.MAX_INT64 ^ dt.PTE_PERM_MASK) == 0, perm & dt.PTE_P != 0, # index does not have the P bit in PML4 old.pages[frm].data(index) & dt.PTE_P == 0, ) new = old.copy() new.pages[to].owner = pid new.pages[to].type = to_type new.pages[frm].data[index] = ( (z3.UDiv(new.pages_ptr_to_int, util.i64(dt.PAGE_SIZE)) + to) << dt.PTE_PFN_SHIFT) | perm # Zero out the new page new.pages[to].data = util.i64(0) # Maintain the "shadow" pgtable new.pages[frm].pgtable_pn[index] = to new.pages[to].pgtable_reverse_pn = frm new.pages[to].pgtable_reverse_idx = index new.pages[frm].pgtable_perm[index] = perm new.pages[frm].pgtable_type[index] = dt.PGTYPE_PAGE new.pages[to].pgtable_pn = util.i64(0) new.pages[to].pgtable_perm = util.i64(0) new.pages[to].pgtable_type = dt.PGTYPE_NONE new.procs[pid].nr_pages[to] += 1 new.flush_tlb(pid) return cond, util.If(cond, new, old)
def memset(ctx, ptr, val, size): val = z3.Extract(7, 0, val) size = size.as_long() # If we're passed a bitcasted pointer we just check if the write size is a # multiple of the underlying types write size, then we can just ignore the bitcast. if isinstance(ptr, BitcastPointer): ptr = ptr._ptr inner = ptr.type().deref() # We are memsetting an array whose inner type matches the size of the val assert inner.is_array() assert inner.deref().size() % val.size() == 0 val = z3.Concat(*([val] * (inner.deref().size() / val.size()))) if inner.deref().is_int(): array_len = ptr.type().deref().length() dst_start = ptr.getelementptr(ctx, util.i64(0), util.i64(0)) dst_end = ptr.getelementptr( ctx, util.i64(0), util.i64(array_len - 1)) dst_start_path = dst_start.canonical_path() dst_end_path = dst_end.canonical_path() dst_tup, dst_start_args = ptr._ref.build_field_tuple_and_path( ctx, dst_start_path) _, dst_end_args = ptr._ref.build_field_tuple_and_path( ctx, dst_end_path) dstfn = ctx['references'][ptr._ref._name][dst_tup] def newf(*args): assert len(args) == len(dst_end_args) cond = [] for a, b in zip(args[:-1], dst_start_args[:-1]): cond.append(a == b) cond.append(z3.UGE(args[-1], dst_start_args[-1])) cond.append(z3.ULE(args[-1], dst_end_args[-1])) cond = z3.And(*cond) return util.If(cond, val, dstfn(*args)) ctx['references'][ptr._ref._name][dst_tup] = newf return ptr else: raise NotImplementedError( "Don't know how to memset {!r}".format(inner))
def pgentry2pfn(ks, off, perm, type): res = util.i64(0) res = util.If(type == dt.PGTYPE_PCIPAGE, util.i64(dt.PCI_START), res) res = util.If(type == dt.PGTYPE_IOMMU_FRAME, ks.dmapages_ptr_to_int, res) res = util.If(type == dt.PGTYPE_DEVICES, ks.devices_ptr_to_int, res) res = util.If(type == dt.PGTYPE_FILE_TABLE, ks.file_table_ptr_to_int, res) res = util.If(type == dt.PGTYPE_PAGE_DESC, ks.page_desc_table_ptr_to_int, res) res = util.If(type == dt.PGTYPE_PROC, ks.proc_table_ptr_to_int, res) res = util.If(type == dt.PGTYPE_PAGE, ks.pages_ptr_to_int, res) return ((z3.UDiv(res, util.i64(dt.PAGE_SIZE)) + off) << dt.PTE_PFN_SHIFT) | perm
def sys_map_pcipage(old, pt, index, pcipn, perm): cond = z3.And( # pt is a valid PT page is_pn_valid(pt), old.pages[pt].type == dt.page_type.PAGE_TYPE_X86_PT, old.pages[pt].owner == old.current, z3.ULT(index, 512), # pcipn is a valid pci page owned by current is_pcipn_valid(pcipn), old.pcipages[pcipn].valid, old.pci[old.pcipages[pcipn].owner].owner == old.current, # perm has no unsafe bits on it and it is present perm & (dt.MAX_INT64 ^ dt.PTE_PERM_MASK) == 0, perm & dt.PTE_P != 0, # slot should be empty old.pages[pt].data(index) & dt.PTE_P == 0, ) new = old.copy() new.pages[pt].data[index] = ((z3.UDiv( dt.PCI_START, util.i64(dt.PAGE_SIZE)) + pcipn) << dt.PTE_PFN_SHIFT) | perm # maintain the "shadow" pgtable new.pages[pt].pgtable_pn[index] = pcipn new.pages[pt].pgtable_perm[index] = perm new.pages[pt].pgtable_type[index] = dt.PGTYPE_PCIPAGE new.flush_tlb(old.current) return cond, util.If(cond, new, old)
def pages_equiv(conj, ctx, kernelstate): pn = util.FreshBitVec('pn', dt.pn_t) idx = util.FreshBitVec('page_index', 64) conj.append( z3.ForAll([pn, idx], z3.Implies( z3.And(is_pn_valid(pn), z3.ULT(idx, 512)), util.global_to_uf_dict(ctx, '@pages')[()]( util.i64(0), pn, idx) == kernelstate.pages[pn].data(idx)))) conj.append( z3.ForAll( [pn], z3.Implies( is_pn_valid(pn), util.global_field_element(ctx, '@page_desc_table', 'pid', pn) == kernelstate.pages[pn].owner))) conj.append( z3.ForAll( [pn], z3.Implies( is_pn_valid(pn), util.global_field_element(ctx, '@page_desc_table', 'type', pn) == kernelstate.pages[pn].type)))
def io_equiv(conj, ctx, kernelstate): port = util.FreshBitVec('port', dt.uint16_t) conj.append( z3.ForAll([port], util.global_to_uf_dict(ctx, '@io_table')[()]( util.i64(0), z3.ZeroExt(64 - port.size(), port)) == kernelstate.io[port].owner))
def vectors_equiv(conj, ctx, kernelstate): vector = util.FreshBitVec('vector', dt.uint8_t) conj.append( z3.ForAll([vector], util.global_to_uf_dict(ctx, '@vector_table')[()]( util.i64(0), z3.ZeroExt( 64 - vector.size(), vector)) == kernelstate.vectors[vector].owner))
def pci_equiv(conj, ctx, kernelstate): devid = util.FreshBitVec('devid', dt.devid_t) conj.append( z3.ForAll([devid], util.global_to_uf_dict(ctx, '@pci_table')[()]( util.i64(0), z3.ZeroExt(64 - devid.size(), devid)) == kernelstate.pci[devid].owner))
def sys_map_page(old, pid, frm, index, pa, perm, from_type): pfn = z3.UDiv(pa, util.i64(dt.PAGE_SIZE)) n = pfn - z3.UDiv(old.page_desc_table_ptr_to_int, util.i64(dt.PAGE_SIZE)) cond = z3.And( is_pid_valid(pid), # the pid is either current or an embryo belonging to current z3.Or(pid == old.current, z3.And( old.procs[pid].ppid == old.current, old.procs[pid].state == dt.proc_state.PROC_EMBRYO)), # frm is a valid pn of type PT whose owner is pid is_pn_valid(frm), old.pages[frm].type == from_type, old.pages[frm].owner == pid, # Index is a valid page index z3.ULT(index, 512), # perm has no unsafe bits on it and it is present and non-writable perm & (dt.MAX_INT64 ^ dt.PTE_PERM_MASK) == 0, perm & dt.PTE_P != 0, # index does not have the P bit in the from page old.pages[frm].data(index) & dt.PTE_P == 0, ) new = old.copy() new.pages[frm].data[index] = ((z3.UDiv( new.page_desc_table_ptr_to_int, util.i64(dt.PAGE_SIZE)) + n) << dt.PTE_PFN_SHIFT) | perm # maintain the "shadow" pgtable new.pages[frm].pgtable_pn[index] = n new.pages[frm].pgtable_perm[index] = perm new.pages[frm].pgtable_type[index] = dt.PGTYPE_PAGE_DESC new.flush_tlb(pid) return cond, util.If(cond, new, old)
def sys_protect_frame(old, pt, index, frame, perm): cond = z3.And( is_pn_valid(pt), old.pages[pt].type == dt.page_type.PAGE_TYPE_X86_PT, old.pages[pt].owner == old.current, # Index is a valid page index z3.ULT(index, 512), is_pn_valid(frame), old.pages[frame].type == dt.page_type.PAGE_TYPE_FRAME, old.pages[frame].owner == old.current, # index must be preset old.pages[pt].data(index) & dt.PTE_P != 0, # the entry in the pt must be the frame z3.Extract(63, 40, z3.UDiv(old.pages_ptr_to_int, util.i64(dt.PAGE_SIZE)) + frame) == z3.BitVecVal(0, 24), z3.Extract(39, 0, z3.UDiv(old.pages_ptr_to_int, util.i64( dt.PAGE_SIZE)) + frame) == z3.Extract(51, 12, old.pages[pt].data(index)), # no unsafe bits in perm is set perm & (dt.MAX_INT64 ^ dt.PTE_PERM_MASK) == 0, # P bit is set in perm perm & dt.PTE_P != 0 ) new = old.copy() new.pages[pt].data[index] = ( (z3.UDiv(new.pages_ptr_to_int, util.i64(dt.PAGE_SIZE)) + frame) << dt.PTE_PFN_SHIFT) | perm # The only thing that changed is the permission. new.pages[pt].pgtable_perm[index] = perm new.flush_tlb(old.current) return cond, util.If(cond, new, old)
def sys_alloc_io_bitmap(old, pn1, pn2, pn3): cond = z3.And( pn1 + 1 == pn2, pn2 + 1 == pn3, z3.Not(old.procs[old.current].use_io_bitmap), is_pn_valid(pn1), old.pages[pn1].type == dt.page_type.PAGE_TYPE_FREE, is_pn_valid(pn2), old.pages[pn2].type == dt.page_type.PAGE_TYPE_FREE, is_pn_valid(pn3), old.pages[pn3].type == dt.page_type.PAGE_TYPE_FREE, ) new = old.copy() new.pages[pn1].owner = old.current new.pages[pn1].type = dt.page_type.PAGE_TYPE_PROC_DATA new.pages[pn1].data = util.i64(0xffffffffffffffff) new.procs[old.current].nr_pages[pn1] += 1 new.pages[pn2].owner = old.current new.pages[pn2].type = dt.page_type.PAGE_TYPE_PROC_DATA new.pages[pn2].data = util.i64(0xffffffffffffffff) new.procs[old.current].nr_pages[pn2] += 1 new.pages[pn3].owner = old.current new.pages[pn3].type = dt.page_type.PAGE_TYPE_PROC_DATA new.pages[pn3].data = util.i64(0xffffffffffffffff) new.procs[old.current].nr_pages[pn3] += 1 new.procs[old.current].io_bitmap_a = pn1 new.procs[old.current].io_bitmap_b = pn2 new.procs[old.current].use_io_bitmap = z3.BoolVal(True) return cond, util.If(cond, new, old)
def free_page_table_page(old, frm, index, to, from_type, to_type): cond = z3.And( # The frm pn has the correct type and owned by current is_pn_valid(frm), old.pages[frm].type == from_type, old.pages[frm].owner == old.current, # Index is a valid page index z3.ULT(index, 512), # The to pn has the correct type and owned by current is_pn_valid(to), old.pages[to].type == to_type, old.pages[to].owner == old.current, # index does have the P bit in the from page old.pages[frm].data(index) & dt.PTE_P != 0, # The current pgtable entry matches to... z3.Extract(63, 40, z3.UDiv(old.pages_ptr_to_int, util.i64(dt.PAGE_SIZE)) + to) == z3.BitVecVal(0, 24), z3.Extract(39, 0, z3.UDiv(old.pages_ptr_to_int, util.i64( dt.PAGE_SIZE)) + to) == z3.Extract(51, 12, old.pages[frm].data(index)), ) new = old.copy() new.pages[frm].data[index] = util.i64(0) new.pages[to].owner = z3.BitVecVal(0, dt.pid_t) new.pages[to].type = dt.page_type.PAGE_TYPE_FREE new.procs[old.current].nr_pages[to] -= 1 new.flush_tlb(old.current) return cond, util.If(cond, new, old)
def sys_map_pml4(old, pid, index, perm): cond = z3.And( is_pid_valid(pid), # the pid is either current or an embryo belonging to current z3.Or(pid == old.current, z3.And( old.procs[pid].ppid == old.current, old.procs[pid].state == dt.proc_state.PROC_EMBRYO)), # Index is a valid page index z3.ULT(index, 512), # perm has no unsafe bits on it and it is present and non-writable perm & (dt.MAX_INT64 ^ dt.PTE_PERM_MASK) == 0, perm & dt.PTE_P != 0, perm & dt.PTE_W == 0, # index does not have the P bit in the page table root at that index old.pages[old.procs[pid].page_table_root].data( index) & dt.PTE_P == 0, ) new = old.copy() frm = old.procs[pid].page_table_root new.pages[frm].data[index] = ( (z3.UDiv(new.pages_ptr_to_int, util.i64(dt.PAGE_SIZE)) + frm) << dt.PTE_PFN_SHIFT) | perm # maintain the "shadow" pgtable new.pages[frm].pgtable_pn[index] = frm new.pages[frm].pgtable_perm[index] = perm new.pages[frm].pgtable_type[index] = dt.PGTYPE_PAGE new.pages[frm].pgtable_reverse_pn = frm new.pages[frm].pgtable_reverse_idx = index new.flush_tlb(pid) return cond, util.If(cond, new, old)
def sys_alloc_iommu_root(old, devid, pn): cond = z3.And( old.pci[devid].owner == 0, is_pn_valid(pn), old.pages[pn].type == dt.page_type.PAGE_TYPE_FREE, ) new = old.copy() new.pci[devid].owner = old.current new.pci[devid].page_table_root = pn new.pages[pn].owner = old.current new.pages[pn].type = dt.page_type.PAGE_TYPE_IOMMU_PML4 # bzero page new.pages[pn].data = util.i64(0) new.procs[old.current].nr_pages[pn] += 1 new.procs[new.current].nr_devs[devid] += 1 new.flush_iotlb() return cond, util.If(cond, new, old)
def alloc_iommu_page_table_page(old, frm, index, to, perm, from_type, to_type): cond = z3.And( # to page is valid and free is_pn_valid(to), old.pages[to].type == dt.page_type.PAGE_TYPE_FREE, # from page is a valid page with correct type is_pn_valid(frm), old.pages[frm].type == from_type, old.pages[frm].owner == old.current, # index is a valid page index z3.ULT(index, 512), # permission bits check perm & (dt.MAX_INT64 ^ (dt.DMAR_PTE_R | dt.DMAR_PTE_W)) == 0, old.pages[frm].data(index) == 0, ) new = old.copy() new.pages[frm].data[index] = (new.pages_ptr_to_int + to * dt.PAGE_SIZE) | perm new.pages[frm].pgtable_pn[index] = to new.pages[frm].pgtable_perm[index] = perm new.pages[to].type = to_type new.pages[to].owner = old.current new.pages[to].data = util.i64(0) new.procs[old.current].nr_pages[to] += 1 new.flush_iotlb() return cond, util.If(cond, new, old)
def sys_map_iommu_frame(old, pt, index, to, perm): cond = z3.And( # to is a valid IOMMU_FRAME owned by current is_dmapn_valid(to), old.dmapages[to].type == dt.page_type.PAGE_TYPE_IOMMU_FRAME, old.dmapages[to].owner == old.current, # pt is a valid X86_PT page owned by current is_pn_valid(pt), old.pages[pt].type == dt.page_type.PAGE_TYPE_X86_PT, old.pages[pt].owner == old.current, # Index valid z3.ULT(index, 512), # permissions contain no unsafe bits perm & (dt.MAX_INT64 ^ dt.PTE_PERM_MASK) == 0, perm & dt.PTE_P != 0, # index slot is unused in pt old.pages[pt].data(index) & dt.PTE_P == 0, ) new = old.copy() new.pages[pt].data[index] = ( (z3.UDiv(new.dmapages_ptr_to_int, util.i64(dt.PAGE_SIZE)) + to) << dt.PTE_PFN_SHIFT) | perm new.pages[pt].pgtable_pn[index] = to new.pages[pt].pgtable_perm[index] = perm new.pages[pt].pgtable_type[index] = dt.PGTYPE_IOMMU_FRAME new.flush_tlb(old.current) return cond, util.If(cond, new, old)
def spec_invariants(kernelstate): conj = [] pid = util.FreshBitVec('pid', dt.pid_t) pn = util.FreshBitVec('pn', dt.pn_t) # # procs' page table, hvm and stack are # # 1) valid conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid), z3.And( is_pn_valid(kernelstate.procs[pid].page_table_root), is_pn_valid(kernelstate.procs[pid].hvm), is_pn_valid(kernelstate.procs[pid].stack))))) # 2) owned by that proc conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid), z3.Implies( is_status_live(kernelstate.procs[pid].state), z3.And( kernelstate.pages[kernelstate.procs[pid].page_table_root].owner == pid, kernelstate.pages[kernelstate.procs[pid].hvm].owner == pid, kernelstate.pages[kernelstate.procs[pid].stack].owner == pid))))) # 3) have the correct type conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid), z3.Implies( is_status_live(kernelstate.procs[pid].state), z3.And( kernelstate.pages[kernelstate.procs[pid].page_table_root].type == dt.page_type.PAGE_TYPE_X86_PML4, kernelstate.pages[kernelstate.procs[pid].hvm].type == dt.page_type.PAGE_TYPE_PROC_DATA, kernelstate.pages[kernelstate.procs[pid].stack].type == dt.page_type.PAGE_TYPE_PROC_DATA))))) ## # Sleeping PROC's ipc_page is a frame owned by that pid conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid), z3.Implies( kernelstate.procs[pid].state == dt.proc_state.PROC_SLEEPING, z3.And( is_pn_valid(kernelstate.procs[pid].ipc_page), kernelstate.pages[kernelstate.procs[pid] .ipc_page].type == dt.page_type.PAGE_TYPE_FRAME, kernelstate.pages[kernelstate.procs[pid].ipc_page].owner == pid))))) ## Non-zombie procs with use_io_bitmaps own their (valid) bitmap pages conj.append(z3.ForAll([pid], z3.Implies( z3.And( is_pid_valid(pid), kernelstate.procs[pid].use_io_bitmap, kernelstate.procs[pid].state != dt.proc_state.PROC_ZOMBIE), z3.And( is_pn_valid(kernelstate.procs[pid].io_bitmap_a), is_pn_valid(kernelstate.procs[pid].io_bitmap_b), kernelstate.pages[kernelstate.procs[pid].io_bitmap_a].owner == pid, kernelstate.pages[kernelstate.procs[pid].io_bitmap_b].owner == pid, kernelstate.pages[kernelstate.procs[pid].io_bitmap_a].type == dt.page_type.PAGE_TYPE_PROC_DATA, kernelstate.pages[kernelstate.procs[pid].io_bitmap_b].type == dt.page_type.PAGE_TYPE_PROC_DATA)))) # page has an owner <=> page is not free conj.append(z3.ForAll([pn], z3.Implies(is_pn_valid(pn), is_pid_valid(kernelstate.pages[pn].owner) == (kernelstate.pages[pn].type != dt.page_type.PAGE_TYPE_FREE)))) conj.append(z3.ForAll([pn], z3.Implies(is_pn_valid(pn), z3.Implies(kernelstate.pages[pn].type == dt.page_type.PAGE_TYPE_FREE, z3.Not(is_pid_valid(kernelstate.pages[pn].owner)))))) # a sleeping proc's ipc_fd is either invalid or empty conj.append(z3.ForAll([pid], z3.Implies(z3.And( is_pid_valid(pid), kernelstate.procs[pid].state == dt.proc_state.PROC_SLEEPING), z3.Or(z3.Not(is_fd_valid(kernelstate.procs[pid].ipc_fd)), z3.Not(is_fn_valid(kernelstate.procs[pid].ofile(kernelstate.procs[pid].ipc_fd))))))) ############## # Unused procs's refcount is all zero # conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid), # z3.Implies(kernelstate.procs[pid].state == dt.proc_state.PROC_UNUSED, # z3.And( # kernelstate.procs[pid].nr_pages(dt.NPAGE - 1) == z3.BitVecVal(0, dt.size_t)))))) # kernelstate.procs[pid].nr_children(dt.NPROC - 1) == z3.BitVecVal(0, dt.size_t), # kernelstate.procs[pid].nr_fds(dt.NOFILE - 1) == z3.BitVecVal(0, dt.size_t), # kernelstate.procs[pid].nr_devs(dt.NPCIDEV - 1) == z3.BitVecVal(0, dt.size_t)))))) # # unused procs don't have a parent # conj.append(z3.ForAll([pid], z3.Implies( # z3.And( # is_pid_valid(pid), # kernelstate.procs[pid].state == dt.proc_state.PROC_UNUSED), # kernelstate.procs[pid].ppid == z3.BitVecVal(0, dt.pid_t)))) # # unused procs don't have fds # conj.append(z3.ForAll([pid, fd], z3.Implies( # z3.And( # is_pid_valid(pid), # kernelstate.procs[pid].state == dt.proc_state.PROC_UNUSED), # z3.Not(is_fn_valid(kernelstate.procs[pid].ofile(fd)))))) # unused fn has refcount == 0 # conj.append(z3.ForAll([fn], z3.Implies(is_fn_valid(fn), # z3.Implies(kernelstate.files[fn].type == dt.file_type.FD_NONE, # kernelstate.files[fn].refcnt( # z3.Concat( # z3.BitVecVal(dt.NPROC - 1, dt.pid_t), # z3.BitVecVal(dt.NOFILE - 1, dt.fd_t))) == z3.BitVecVal(0, dt.size_t))))) ############## # disjointed-ness of memory regions conj.append(z3.And( z3.Extract(63, 40, z3.UDiv(kernelstate.pages_ptr_to_int, util.i64(4096)) + dt.NPAGES_PAGES) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.proc_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_PROC_TABLE) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.page_desc_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_PAGE_DESC_TABLE) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.file_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_FILE_TABLE) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.devices_ptr_to_int,util.i64(4096)) + dt.NPAGES_DEVICES) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.dmapages_ptr_to_int,util.i64(4096)) + dt.NDMAPAGE) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.pages_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.proc_table_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.page_desc_table_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.file_table_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.devices_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24), z3.Extract(63, 40, z3.UDiv(kernelstate.dmapages_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24), z3.ULT(z3.UDiv(kernelstate.pages_ptr_to_int, util.i64(4096)) + dt.NPAGES_PAGES, z3.UDiv(kernelstate.proc_table_ptr_to_int, util.i64(4096))), z3.ULT(z3.UDiv(kernelstate.proc_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_PROC_TABLE, z3.UDiv(kernelstate.page_desc_table_ptr_to_int, util.i64(4096))), z3.ULT(z3.UDiv(kernelstate.page_desc_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_PAGE_DESC_TABLE, z3.UDiv(kernelstate.file_table_ptr_to_int, util.i64(4096))), z3.ULT(z3.UDiv(kernelstate.file_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_FILE_TABLE, z3.UDiv(kernelstate.devices_ptr_to_int, util.i64(4096))), z3.ULT(z3.UDiv(kernelstate.devices_ptr_to_int, util.i64(4096)) + dt.NPCIDEV, z3.UDiv(kernelstate.dmapages_ptr_to_int, util.i64(4096))), z3.ULT(z3.UDiv(kernelstate.dmapages_ptr_to_int, util.i64(4096)) + dt.NDMAPAGE, z3.UDiv(dt.PCI_START, util.i64(4096))), )) # Current is a valid pid conj.append(is_pid_valid(kernelstate.current)) # Current is always running conj.append(kernelstate.procs[kernelstate.current].state == dt.proc_state.PROC_RUNNING), # A running proc must be current conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid), z3.Implies(kernelstate.procs[pid].state == dt.proc_state.PROC_RUNNING, pid == kernelstate.current)))) return z3.And(*conj)
def pdb(ctx, *args): from ipdb import set_trace set_trace() return util.i64(0)
def bzero(ctx, ptr, size): size = size.as_long() # If we're passed a bitcasted pointer we just check if the write size is a # multiple of the underlying types write size, then we can just ignore the bitcast. if isinstance(ptr, BitcastPointer): ptr = ptr._ptr inner = ptr.type().deref() if inner.is_int(): assert size * 8 <= 64 ptr.write(ctx, z3.BitVecVal(0, size * 8)) elif inner.is_struct(): assert inner.size() / \ 8 == size, "Can not partially bzero a struct: {} v {}".format( inner.size() / 8, size) for i, field in enumerate(inner.fields()): subptr = ptr.getelementptr(ctx, util.i64(0), util.i64(i), type=itypes.PointerType(field)) bzero(ctx, subptr, z3.simplify(z3.BitVecVal(field.size() / 8, 64))) elif inner.is_array(): write_size = inner.deref().size() if inner.deref().is_int(): array_len = ptr.type().deref().length() dst_start = ptr.getelementptr(ctx, util.i64(0), util.i64(0)) dst_end = ptr.getelementptr(ctx, util.i64(0), util.i64(array_len - 1)) dst_start_path = dst_start.canonical_path() dst_end_path = dst_end.canonical_path() dst_tup, dst_start_args = ptr._ref.build_field_tuple_and_path( ctx, dst_start_path) _, dst_end_args = ptr._ref.build_field_tuple_and_path( ctx, dst_end_path) dstfn = ctx['references'][ptr._ref._name][dst_tup] def newf(*args): assert len(args) == len(dst_end_args) cond = [] for a, b in zip(args[:-1], dst_start_args[:-1]): cond.append(a == b) cond.append(z3.UGE(args[-1], dst_start_args[-1])) cond.append(z3.ULE(args[-1], dst_end_args[-1])) cond = z3.And(*cond) return util.If(cond, z3.BitVecVal(0, write_size), dstfn(*args)) ctx['references'][ptr._ref._name][dst_tup] = newf else: raise NotImplementedError( "Don't know how to bzero {!r}".format(inner)) else: raise NotImplementedError("Don't know how to bzero {!r}".format(inner))
def memcpy(ctx, dst, src, size): if isinstance(dst, BitcastPointer): dst = dst._ptr if isinstance(src, BitcastPointer): src = src._ptr # Same paranoid checks assert dst.type().is_pointer() assert src.type().is_pointer() assert dst.type().deref().is_array() assert src.type().deref().is_array() assert dst.type().deref().size() == src.type().deref().size() assert dst.type().deref().length() == src.type().deref().length() dst_start = dst.getelementptr(ctx, util.i64(0), util.i64(0)) src_start = src.getelementptr(ctx, util.i64(0), util.i64(0)) dst_end = dst.getelementptr(ctx, util.i64(0), util.i64(0)) src_end = src.getelementptr(ctx, util.i64(0), util.i64(0)) dst_start_path = dst_start.canonical_path() src_start_path = src_start.canonical_path() dst_end_path = dst_end.canonical_path() src_end_path = src_end.canonical_path() assert dst_start_path[-1].as_long() == src_start_path[-1].as_long() assert dst_end_path[-1].as_long() == src_end_path[-1].as_long() dst_tup, dst_start_args = dst._ref.build_field_tuple_and_path( ctx, dst_start_path) src_tup, src_start_args = src._ref.build_field_tuple_and_path( ctx, src_start_path) _, dst_end_args = dst._ref.build_field_tuple_and_path(ctx, dst_end_path) _, src_end_args = src._ref.build_field_tuple_and_path(ctx, src_end_path) dst_end_args[-1] += size src_end_args[-1] += size assert len(dst_start_args) == len(dst_end_args) assert len(dst_end_args) == len(src_end_args) dstfn = ctx['references'][dst._ref._name][dst_tup] srcfn = ctx['references'][src._ref._name][src_tup] # At this point we know that the src and dst are same-sized arrays. # They are both indexed starting from 0 up to length - 1. # So, we just do update the uf using an ite of the form # arg1 == dst_arg1, arg2 == dst_arg2, .. dst_argn_start <= arg1 < dst_argn_end def newf(*args): assert len(args) == len(dst_end_args) cond = [] for a, b in zip(args[:-1], dst_start_args[:-1]): cond.append(a == b) cond.append(z3.UGE(args[-1], dst_start_args[-1])) cond.append(z3.ULT(args[-1], dst_end_args[-1])) cond = z3.And(*cond) srcargs = src_start_args[:-1] + [args[-1]] return util.If(cond, srcfn(*srcargs), dstfn(*args)) ctx['references'][dst._ref._name][dst_tup] = newf return dst
def sys_clone(old, pid, pml4, stack, hvm): cond = z3.And( is_pid_valid(pid), old.procs[pid].state == dt.proc_state.PROC_UNUSED, is_pn_valid(pml4), old.pages[pml4].type == dt.page_type.PAGE_TYPE_FREE, is_pn_valid(stack), old.pages[stack].type == dt.page_type.PAGE_TYPE_FREE, is_pn_valid(hvm), old.pages[hvm].type == dt.page_type.PAGE_TYPE_FREE, z3.Distinct(pml4, stack, hvm), ) new = old.copy() # Initialize the proc new.procs[pid].ppid = new.current new.procs[pid].state = dt.proc_state.PROC_EMBRYO new.procs[pid].killed = z3.BoolVal(False) new.procs[pid].ipc_from = z3.BitVecVal(0, dt.pid_t) new.procs[pid].ipc_val = z3.BitVecVal(0, dt.uint64_t) new.procs[pid].ipc_page = z3.BitVecVal(0, dt.pn_t) new.procs[pid].ipc_size = z3.BitVecVal(0, dt.size_t) new.procs[pid].ipc_fd = z3.BitVecVal(0, dt.fd_t) new.procs[pid].use_io_bitmap = z3.BoolVal(False) new.procs[pid].io_bitmap_a = z3.BitVecVal(0, dt.pn_t) new.procs[pid].io_bitmap_b = z3.BitVecVal(0, dt.pn_t) # all refcnts should be zero at this point (according to invariants): # no need to zero them # new.proc_nr_pages = 0 # new.proc_nr_children = 0 # new.procs.nr_fds = 0 # new.proc_nr_devs = 0 new.procs[pid].ofile = z3.BitVecVal(0, dt.fn_t) new.procs[pid].intr = z3.BitVecVal(0, 64) # Maintain the "shadow" pgtable new.pages[pml4].pgtable_pn = util.i64(0) new.pages[pml4].pgtable_perm = util.i64(0) new.pages[pml4].pgtable_type = dt.PGTYPE_NONE # Claim the root pml4, the stack and hvm pages # We need to do four things to claim a page. # 1) Change the type from free to something else # 2) change the owner # 3) zero the page contents # 4) bump the refcount for the owner new.pages[pml4].type = dt.page_type.PAGE_TYPE_X86_PML4 new.pages[pml4].owner = pid new.pages[pml4].data = util.i64(0) new.procs[pid].nr_pages[pml4] += 1 new.pages[stack].type = dt.page_type.PAGE_TYPE_PROC_DATA new.pages[stack].owner = pid new.pages[stack].data = util.i64(0) new.procs[pid].nr_pages[stack] += 1 new.pages[hvm].type = dt.page_type.PAGE_TYPE_PROC_DATA new.pages[hvm].owner = pid new.pages[hvm].data = util.i64(0) new.procs[pid].nr_pages[hvm] += 1 new.procs[pid].page_table_root = pml4 new.procs[pid].stack = stack new.procs[pid].hvm = hvm new.procs[new.current].nr_children[pid] += 1 # Copy parent's hvm to child's hvm new.pages.data = lambda pn, idx, oldfn: \ util.If(pn == hvm, oldfn(new.procs[new.current].hvm, idx), oldfn(pn, idx)) # Copy parent's stack to child's stack new.pages.data = lambda pn, idx, oldfn: \ util.If(pn == stack, oldfn(new.procs[new.current].stack, idx), oldfn(pn, idx)) return cond, util.If(cond, new, old)