def state_equiv(kernelstate, userstate): conj = [] pid = util.FreshBitVec('pid.eq', kdt.pid_t) idx1 = util.FreshBitVec('idx1.eq', kdt.size_t) idx2 = util.FreshBitVec('idx2.eq', kdt.size_t) idx3 = util.FreshBitVec('idx', kdt.size_t) idx4 = util.FreshBitVec('idx', kdt.size_t) conj.append(z3.ForAll([pid, idx1, idx2, idx3, idx4], z3.Implies( z3.And( kspec.is_pid_valid(pid), kspec.is_status_live(kernelstate.procs[pid].state), z3.ULT(idx1, 512), z3.ULT(idx2, 512), z3.ULT(idx3, 512), z3.ULT(idx4, 512), ), z3.And( pgwalk_rw(kernelstate, pid, idx1, idx2, idx3, idx4) == userstate.writable(pid, idx1, idx2, idx3, idx4) )))) return z3.And(*conj)
def run_test_execution(self): for upper_bound in (0, 1, 50): x, y = z3.BitVec("x", 4), z3.BitVec("y", 8) formula: z3.BoolRef = z3.ULT(z3.ZeroExt(4, x) + y, z3.BitVecVal(upper_bound, 8)) self.assert_eamp_edge_scheduler_execution(100, 2, 0.99, FormulaParamsZ3(formula=formula, variables=[x, y])) x, y = z3.BitVec("x", 8), z3.BitVec("y", 8) formula: z3.BoolRef = z3.ULT(y, 100) model_count = (2 ** 8) * 100 for q in (1, 2, 3, 4) if RUN_MINI_BENCHMARK else (1,): t0 = perf_counter() self.assert_eamp_edge_scheduler_execution(100, q, 0.99, FormulaParamsZ3( formula=formula, variables=[x, y] ), model_count) print(f"Mini Benchmark {self.get_eamp_edge_scheduler_class()} q={q} took {perf_counter() - t0:.2f}") zs = [z3.BitVec(f"z{idx}", 1) for idx in range(50)] formula: z3.BoolRef = z3.And([zs[idx] == 1 for idx in range(25)]) model_count = 2 ** 25 self.assert_eamp_edge_scheduler_execution( 100, 1, 0.99, FormulaParamsZ3( formula=formula, variables=zs ), model_count)
def mmap_impl(old, current, va, perm): new = old idx1, idx2, idx3, idx4 = va # Pick a few pages -- we don't care how they are picked. pml4 = old.procs[current].page_table_root pdptpn = util.FreshBitVec('pdptpn', kdt.pn_t) pdpn = util.FreshBitVec('pdpn', kdt.pn_t) ptpn = util.FreshBitVec('ptpn', kdt.pn_t) framepn = util.FreshBitVec('framepn', kdt.pn_t) condpdpt, new = kspec.sys_alloc_pdpt(new, current, pml4, idx1, pdptpn, perm) condpdpt = z3.Or( condpdpt, z3.And( z3.ULT(idx1, 512), new.pages[pml4].pgtable_pn(idx1) == pdptpn, (new.pages[pml4].data(idx1) & kdt.PTE_P) != z3.BitVecVal( 0, kdt.size_t), new.pages[pml4].pgtable_perm(idx1) == perm, # The above implies this # new.pages[pml4].data(idx1) == (((z3.UDiv(new.pages_ptr_to_int, util.i64(4096)) + pdptpn) << kdt.PTE_PFN_SHIFT) | perm), )) condpd, new = kspec.sys_alloc_pd(new, current, pdptpn, idx2, pdpn, perm) condpd = z3.Or( condpd, z3.And( z3.ULT(idx2, 512), new.pages[pdptpn].pgtable_pn(idx2) == pdpn, (new.pages[pdptpn].data(idx2) & kdt.PTE_P) != z3.BitVecVal( 0, kdt.size_t), new.pages[pdptpn].pgtable_perm(idx2) == perm, )) condpt, new = kspec.sys_alloc_pt(new, current, pdpn, idx3, ptpn, perm) condpt = z3.Or( condpt, z3.And( z3.ULT(idx3, 512), new.pages[pdpn].pgtable_pn(idx3) == ptpn, (new.pages[pdpn].data(idx3) & kdt.PTE_P) != z3.BitVecVal( 0, kdt.size_t), new.pages[pdpn].pgtable_perm(idx3) == perm, )) condframe, new = kspec.sys_alloc_frame(new, current, ptpn, idx4, framepn, perm) cond = z3.And(condpdpt, condpd, condpt, condframe) return cond, util.If(cond, new, old)
def do_O(op, stack, state): bit, = pop_values(stack, state) old = prepare(state.esil["old"]) cur = prepare(state.esil["cur"]) m = [genmask(bit & 0x3f), genmask((bit + 0x3f) & 0x3f)] c_in = z3.If(z3.ULT((cur & m[0]), (old & m[0])), ONE, ZERO) c_out = z3.If(z3.ULT((cur & m[1]), (old & m[1])), ONE, ZERO) of = ((c_in ^ c_out) == 1) stack.append(z3.If(of, ONE, ZERO))
def do_O(op, stack, state): bit, = pop_values(stack, state) old = state.esil["old"] cur = state.esil["cur"] m = [genmask(bit & 0x3f), genmask((bit + 0x3f) & 0x3f)] c_in = z3.If(z3.ULT((cur & m[0]), (old & m[0])), ONE, ZERO) c_out = z3.If(z3.ULT((cur & m[1]), (old & m[1])), ONE, ZERO) #print(z3.simplify(c_in)) #print(z3.simplify(c_out)) of = ((c_in ^ c_out) == 1) stack.append(z3.If(of, ONE, ZERO))
def invariants(self): constraint = [] if self._is_x_pow2: ix = self.fabric.cols.bit_length() constraint.append(z3.Extract(ix, ix, self.x) == 0) else: constraint.append(z3.ULT(self.x, self.fabric.cols)) if self._is_y_pow2: iy = self.fabric.rows.bit_length() constraint.append(z3.Extract(iy, iy, self.y) == 0) else: constraint.append(z3.ULT(self.y, self.fabric.rows)) return z3.And(constraint)
def get_antialias_constraint(self, address, register="sp"): register = self.get_reg_before(self.arch.registers[register][0]) num_bytes = self.arch.bits / 8 return z3.And( # Don't allow the address to be overlaping the register z3.Or(z3.ULT(address, register - num_bytes), z3.UGT(address, register + num_bytes)), # Don't allow the address or register to wrap around z3.ULT(address, address + num_bytes), z3.UGT(address, address - num_bytes), z3.ULT(register, register + num_bytes), z3.UGT(register, register - num_bytes), )
def test_array_of_stucts(self): ctx = newctx() points = it.ArrayType(10, it.StructType( 'Point', [it.IntType(64), it.IntType(64)])) p = dt.fresh_ptr(ctx, util.fresh_name('p'), points) y = util.FreshBitVec('y', 64) x = util.FreshBitVec('x', 64) # x and y are within bounds ctx['solver'].add(z3.ULT(x, 10)) ctx['solver'].add(z3.ULT(y, 10)) p.getelementptr(ctx, x, util.i64(0)).write(ctx, x) s = z3.Solver() s.add(z3.Not(z3.Implies(x == y, p.getelementptr( ctx, y, util.i64(0)).read(ctx) == y))) self.assertEquals(s.check(), z3.unsat)
def do_CF(op, stack, state): bits, = pop_values(stack, state) mask = genmask(bits & 0x3f) old = state.esil["old"] cur = state.esil["cur"] cf = z3.ULT((cur & mask), (old & mask)) stack.append(z3.If(cf, ONE, ZERO))
def pages_equiv(conj, ctx, kernelstate): pn = util.FreshBitVec('pn', dt.pn_t) idx = util.FreshBitVec('page_index', 64) conj.append( z3.ForAll([pn, idx], z3.Implies( z3.And(is_pn_valid(pn), z3.ULT(idx, 512)), util.global_to_uf_dict(ctx, '@pages')[()]( util.i64(0), pn, idx) == kernelstate.pages[pn].data(idx)))) conj.append( z3.ForAll( [pn], z3.Implies( is_pn_valid(pn), util.global_field_element(ctx, '@page_desc_table', 'pid', pn) == kernelstate.pages[pn].owner))) conj.append( z3.ForAll( [pn], z3.Implies( is_pn_valid(pn), util.global_field_element(ctx, '@page_desc_table', 'type', pn) == kernelstate.pages[pn].type)))
def ULT(self, other): if isinstance(other, int): other = BVV(other, self.size) else: assert isinstance(other, BV) assert self.size == other.size return BoolExpr(z3.ULT(self.z3obj, other.z3obj))
def build_field_tuple_and_path(self, ctx, path): typ = self._type fields = [] newpath = [] while len(path) > 0: if typ.is_int(): assert False, "Can't have base type if there is more left in path" elif typ.is_array() or typ.is_pointer(): if typ.is_array(): if not util.path_condition_implies( ctx, z3.ULT(path[0], typ.length()), print_model=True): util.print_stacktrace(ctx) raise IndexError( "Can not prove index %s is within array bounds %s" % (path[0], typ.length())) if typ.is_pointer(): if not util.path_condition_implies(ctx, path[0] == 0): util.print_stacktrace(ctx) raise RuntimeError("Pointer arithmetic not supported") typ = typ.deref() newpath.append(path[0]) elif typ.is_struct(): field = util.simplify(path[0]).as_long() fields.append(field) typ = typ.field(field) else: assert False, "Unhandled case" path = path[1:] return tuple(fields), newpath
def sys_map_pcipage(old, pt, index, pcipn, perm): cond = z3.And( # pt is a valid PT page is_pn_valid(pt), old.pages[pt].type == dt.page_type.PAGE_TYPE_X86_PT, old.pages[pt].owner == old.current, z3.ULT(index, 512), # pcipn is a valid pci page owned by current is_pcipn_valid(pcipn), old.pcipages[pcipn].valid, old.pci[old.pcipages[pcipn].owner].owner == old.current, # perm has no unsafe bits on it and it is present perm & (dt.MAX_INT64 ^ dt.PTE_PERM_MASK) == 0, perm & dt.PTE_P != 0, # slot should be empty old.pages[pt].data(index) & dt.PTE_P == 0, ) new = old.copy() new.pages[pt].data[index] = ((z3.UDiv( dt.PCI_START, util.i64(dt.PAGE_SIZE)) + pcipn) << dt.PTE_PFN_SHIFT) | perm # maintain the "shadow" pgtable new.pages[pt].pgtable_pn[index] = pcipn new.pages[pt].pgtable_perm[index] = perm new.pages[pt].pgtable_type[index] = dt.PGTYPE_PCIPAGE new.flush_tlb(old.current) return cond, util.If(cond, new, old)
def sys_alloc_iommu_frame(old, frm, index, to, perm): cond = z3.And( # to page is valid and free is_dmapn_valid(to), old.dmapages[to].type == dt.page_type.PAGE_TYPE_FREE, # from page is a valid page with correct type is_pn_valid(frm), old.pages[frm].type == dt.page_type.PAGE_TYPE_IOMMU_PT, old.pages[frm].owner == old.current, # index is a valid page index z3.ULT(index, 512), # permission bits check perm & (dt.MAX_INT64 ^ (dt.DMAR_PTE_R | dt.DMAR_PTE_W)) == 0, old.pages[frm].data(index) == 0, ) new = old.copy() new.pages[frm].data[index] = (new.dmapages_ptr_to_int + to * dt.PAGE_SIZE) | perm new.pages[frm].pgtable_pn[index] = to new.pages[frm].pgtable_perm[index] = perm new.dmapages[to].type = dt.page_type.PAGE_TYPE_IOMMU_FRAME new.dmapages[to].owner = new.current new.procs[new.current].nr_dmapages[to] += 1 new.flush_iotlb() return cond, util.If(cond, new, old)
def do_B(op, stack, state): bits, = pop_values(stack, state) mask = genmask(bits & 0x3f) old = prepare(state.esil["old"]) cur = prepare(state.esil["cur"]) bf = z3.ULT((old & mask), (cur & mask)) stack.append(z3.If(bf, ONE, ZERO))
def ashr(self, ctx, return_type, a, atype, b, btype, nuw=False, nsw=False): assert atype == return_type assert atype == btype assert not nuw and not nsw return util.partial_eval( ctx, util.If(z3.ULT(b, z3.BitVecVal(btype.size(), btype.size())), a >> b, self.get_poison(btype)))
def ULT(a: BitVec, b: BitVec) -> Bool: """Create an unsigned less than expression. :param a: :param b: :return: """ annotations = a.annotations + b.annotations return Bool(z3.ULT(a.raw, b.raw), annotations)
def alloc_page_table(old, pid, frm, index, to, perm, from_type, to_type): cond = z3.And( # The to argument is a valid page and is marked as free is_pn_valid(to), old.pages[to].type == dt.page_type.PAGE_TYPE_FREE, # The pid is valid and is either current running process or child embryo is_pid_valid(pid), z3.Or(pid == old.current, z3.And( old.procs[pid].ppid == old.current, old.procs[pid].state == dt.proc_state.PROC_EMBRYO)), # The from parameter is valid and of type PML4 and owned by pid is_pn_valid(frm), old.pages[frm].owner == pid, old.pages[frm].type == from_type, # Index is a valid page index z3.ULT(index, 512), # perm has no unsafe bits on it and it is present perm & (dt.MAX_INT64 ^ dt.PTE_PERM_MASK) == 0, perm & dt.PTE_P != 0, # index does not have the P bit in PML4 old.pages[frm].data(index) & dt.PTE_P == 0, ) new = old.copy() new.pages[to].owner = pid new.pages[to].type = to_type new.pages[frm].data[index] = ( (z3.UDiv(new.pages_ptr_to_int, util.i64(dt.PAGE_SIZE)) + to) << dt.PTE_PFN_SHIFT) | perm # Zero out the new page new.pages[to].data = util.i64(0) # Maintain the "shadow" pgtable new.pages[frm].pgtable_pn[index] = to new.pages[to].pgtable_reverse_pn = frm new.pages[to].pgtable_reverse_idx = index new.pages[frm].pgtable_perm[index] = perm new.pages[frm].pgtable_type[index] = dt.PGTYPE_PAGE new.pages[to].pgtable_pn = util.i64(0) new.pages[to].pgtable_perm = util.i64(0) new.pages[to].pgtable_type = dt.PGTYPE_NONE new.procs[pid].nr_pages[to] += 1 new.flush_tlb(pid) return cond, util.If(cond, new, old)
def do_B(op, stack, state): bits, = pop_values(stack, state) mask = genmask(bits & 0x3f) old = state.esil["old"] cur = state.esil["cur"] bf = z3.ULT((old & mask), (cur & mask)) #print(bits, mask, z3.simplify(bf)) stack.append(z3.If(bf, ONE, ZERO))
def abs_diff(pos1, pos2, fab_dims): ''' abs_diff :: z3.BitVec[a] -> z3.BitVec[a] -> (int, int) - > z3.BitVec[a] Takes two z3 BitVec and returns the absolute value of their difference ''' #zero-extend to avoid overflow n = fab_dims[0] * fab_dims[1] - pos1.size() return z3.If(z3.ULT(pos1, pos2), z3.ZeroExt(n, pos2) - z3.ZeroExt(n, pos1), z3.ZeroExt(n, pos1) - z3.ZeroExt(n, pos2))
def sys_map_file(old, pid, frm, index, n, perm): cond = z3.And( z3.ULT(n, dt.NPAGES_FILE_TABLE), is_pid_valid(pid), # the pid is either current or an embryo belonging to current z3.Or(pid == old.current, z3.And( old.procs[pid].ppid == old.current, old.procs[pid].state == dt.proc_state.PROC_EMBRYO)), # frm is a valid pn of type PT whose owner is pid is_pn_valid(frm), old.pages[frm].type == dt.page_type.PAGE_TYPE_X86_PT, old.pages[frm].owner == pid, # Index is a valid page index z3.ULT(index, 512), # perm has no unsafe bits on it and it is present and non-writable perm & (dt.MAX_INT64 ^ dt.PTE_PERM_MASK) == 0, perm & dt.PTE_P != 0, perm & dt.PTE_W == 0, # index does not have the P bit in the from page old.pages[frm].data(index) & dt.PTE_P == 0, ) new = old.copy() new.pages[frm].data[index] = ( (z3.UDiv(new.file_table_ptr_to_int, util.i64(dt.PAGE_SIZE)) + n) << dt.PTE_PFN_SHIFT) | perm # maintain the "shadow" pgtable new.pages[frm].pgtable_pn[index] = n new.pages[frm].pgtable_perm[index] = perm new.pages[frm].pgtable_type[index] = dt.PGTYPE_FILE_TABLE new.flush_tlb(pid) return cond, util.If(cond, new, old)
def _touches_address(self, op_name, address): "Return a boolean indicating whether `op_name` touches `address`" # Check for overflow, which is defined to wrap around upper_bound = self.base_address[op_name] + self.width_bytes[op_name] overflow = z3.ULT(upper_bound, self.base_address[op_name]) return z3.If( overflow, # If overflow, account for wraparound z3.Or( z3.UGE(address, self.base_address[op_name]), z3.ULT(address, upper_bound), ), # If no overflow, the address should be in [base, base + offset) z3.And( z3.UGE(address, self.base_address[op_name]), z3.ULT(address, upper_bound), ), )
def BVS(self, ast, result=None): #pylint:disable=unused-argument name, mn, mx, stride, _ = ast.args size = ast.size() expr = z3.BitVec(name, size, ctx=self._context) if mn is not None: expr = z3.If(z3.ULT(expr, mn), mn, expr, ctx=self._context) if mx is not None: expr = z3.If(z3.UGT(expr, mx), mx, expr, ctx=self._context) if stride is not None: expr = (expr / stride) * stride return expr
def _overlap(self, a, b): "Return a boolean indicating that `a` and `b` overlap" base_a = self.base_address[a] base_b = self.base_address[b] max_a = base_a + self.width_bytes[a] max_b = base_b + self.width_bytes[b] overflow_a = z3.ULT(max_a, base_a) overflow_b = z3.ULT(max_b, base_b) # The non-overflow case is: # z3.And(z3.ULT(base_a, max_b), z3.ULT(base_b, max_a)) # # If max_b overflows, then base_a < max_b is effectively true, and vice # versa. Therefore, logical-or the overflow conditions in too. return z3.And( z3.Or(overflow_b, z3.ULT(base_a, max_b)), z3.Or(overflow_a, z3.ULT(base_b, max_a)), )
def _(term, smt): x = smt.eval(term.x) y = smt.eval(term.y) z = smt._conditional_value([z3.ULT(y, y.size())], op(x,y), term.name) for f in poisons: if smt.has_analysis(f, term): smt.add_nonpoison( z3.Implies(smt.get_analysis(f, term), poisons[f](x,y,z))) elif f in term.flags: smt.add_nonpoison(poisons[f](x,y,z)) return z
def __init__(self, meta, opcode, src1, src2, target): self.meta = meta self.opcode = opcode self.src1 = src1 self.src2 = src2 self.target = target self.op = { "beq": lambda a, b: a == b, "bne": lambda a, b: a != b, "blt": lambda a, b: a < b, "bltu": lambda a, b: z3.ULT(a, b), "bge": lambda a, b: a >= b, "bgeu": lambda a, b: z3.UGE(a, b), }[opcode.lower()]
def newf(*args): assert len(args) == len(dst_end_args) cond = [] for a, b in zip(args[:-1], dst_start_args[:-1]): cond.append(a == b) cond.append(z3.UGE(args[-1], dst_start_args[-1])) cond.append(z3.ULT(args[-1], dst_end_args[-1])) cond = z3.And(*cond) srcargs = src_start_args[:-1] + [args[-1]] return util.If(cond, srcfn(*srcargs), dstfn(*args))
def sys_alloc_port(old, port): cond = z3.And( old.io[port].owner == 0, old.procs[old.current].use_io_bitmap, ) new = old.copy() new.io[port].owner = old.current new.procs[old.current].nr_ports[port] += 1 page = util.If(z3.ULT(port, 0x8000), new.procs[new.current].io_bitmap_a, new.procs[new.current].io_bitmap_b) port = z3.ZeroExt(64 - port.size(), util.If(z3.ULT(port, 0x8000), port, port - 0x8000)) idx = z3.UDiv(port, 64) mask = 1 << (port % 64) new.pages[page].data[idx] = new.pages[page].data(idx) & ~mask return cond, util.If(cond, new, old)
def sys_send(old, pid, val, pn, size, fd): cond = z3.And( is_pid_valid(pid), old.procs[pid].state == dt.proc_state.PROC_SLEEPING, is_pn_valid(pn), old.pages[pn].owner == old.current, z3.ULE(size, dt.PAGE_SIZE), z3.Implies(is_fd_valid(fd), is_fn_valid(old.procs[old.current].ofile(fd))), ) new = old.copy() new.procs[pid].ipc_from = old.current new.procs[pid].ipc_val = val new.procs[pid].ipc_size = size # memcpy new.pages.data = lambda pn0, idx0, oldfn: \ util.If(z3.And(pn0 == old.procs[pid].ipc_page, z3.ULT(idx0, size)), oldfn(pn, idx0), oldfn(pn0, idx0)) ######## new2 = new.copy() cond2 = z3.And(is_fd_valid(fd), is_fd_valid(new2.procs[pid].ipc_fd)) fn = old.procs[old.current].ofile(fd) fd = old.procs[pid].ipc_fd new2.procs[pid].ofile[fd] = fn # bump proc nr_fds new2.procs[pid].nr_fds[fd] += 1 # bump file refcnt new2.files[fn].refcnt[(pid, fd)] += 1 new3 = util.If(cond2, new2, new) new3.procs[pid].state = dt.proc_state.PROC_RUNNING new3.procs[old.current].state = dt.proc_state.PROC_RUNNABLE new3.current = pid return cond, util.If(cond, new3, old)
def symbolic_keccak(svm, data): sha_constraints = [] sha_func, sha_func_inv = constraints.get_sha_functions(data.size()) hash_vector = sha_func(data) sha_constraints.append(sha_func_inv(sha_func(data)) == data) hash_vector_features = extract_index_features(hash_vector) data_concrete = svm_utils.is_bv_concrete(data) if data_concrete: concrete_data = svm_utils.get_concrete_int(data) data_bytes = ethereum.utils.zpad( ethereum.utils.int_to_bytes(concrete_data), data.size() // 8) hash_value = int.from_bytes(ethereum.utils.sha3_256(data_bytes), 'big') SIZE_PER_SHA_LEN = 2**100 limit_left = 1024 + SIZE_PER_SHA_LEN * data.size() limit_right = limit_left + SIZE_PER_SHA_LEN if not data_concrete: sha_constraints.append(z3.ULT(limit_left, hash_vector)) sha_constraints.append(z3.ULT(hash_vector, limit_right)) # last 4 bits are 0 => hashes are 16 words between each other sha_constraints.append(z3.Extract(3, 0, hash_vector) == 0) elif data_concrete: storage_range = limit_right - limit_left scaled_hash_value = limit_left + int( (hash_value / svm_utils.TT256M1) * storage_range) scaled_hash_value = scaled_hash_value // 16 * 16 sha_constraints.append( hash_vector == z3.BitVecVal(scaled_hash_value, VECTOR_LEN)) # elif storage_node == svm.storage_root and data_concrete: # hash_value = hash_value // 16 * 16 # sha_constraints.append(hash_vector == z3.BitVecVal(hash_value, VECTOR_LEN)) return sha_constraints, hash_vector