def count_cells( symbol_grid: SymbolGrid, start: Point, direction: Direction, count: Callable[[ArithRef], ArithRef] = lambda c: IntVal(1), stop: Callable[[ArithRef], BoolRef] = lambda c: BoolVal(False) ) -> ArithRef: """Returns a count of cells along a sightline through a grid. Args: symbol_grid (grilops.grids.SymbolGrid): The grid to check against. start (grilops.geometry.Point): The location of the cell where the sightline should begin. This is the first cell checked. direction (grilops.geometry.Direction): The direction to advance to reach the next cell in the sightline. count (Callable[[ArithRef], ArithRef]): A function that accepts a symbol as an argument and returns the integer value to add to the count when this symbol is encountered. By default, each symbol will count with a value of one. stop (Callable[[ArithRef], BoolRef]): A function that accepts a symbol as an argument and returns True if we should stop following the sightline when this symbol is encountered. By default, the sightline will continue to the edge of the grid. Returns: An `ArithRef` for the count of cells along the sightline through the grid. """ return reduce_cells(symbol_grid, start, direction, cast(ArithRef, IntVal(0)), lambda a, c: a + count(c), lambda a, c: stop(c))
def get_quadratic_splines_from_polynomial(self, frame_id, state_id, polynomial, low_val, high_val, integer_var): # Assumes that polynomial is rel ind to frame_id! state_valuation = self.state_graph.get_state_valuation(state_id) low_delta = z3_evaluate_polynomial_at_point(polynomial, integer_var.variable, low_val) high_delta = z3_evaluate_polynomial_at_point(polynomial, integer_var.variable, high_val) # add relative inductive polynomials only state_args = [ eq_no_coerce(var.variable, val) if var != integer_var else ge_no_coerce(var.variable, low_val) for var, val in state_valuation.items() ] + [integer_var.variable <= high_val] if z3_values_check_eq(low_val, high_val): return [(state_args, low_delta)] if self.settings.int_to_real: intermediate_val = low_val + z3_real_floored_division( high_val - low_val, IntVal(2)) else: intermediate_val = low_val + z3_integer_division( high_val - low_val, IntVal(2)) intermediate_delta = z3_evaluate_polynomial_at_point( polynomial, integer_var.variable, intermediate_val) # We can stop to recurse data_points = [(low_val, low_delta), (intermediate_val, intermediate_delta), (high_val, high_delta)] quadratic_spline = self._interpolator.get_interpolating_polynomial( data_points, integer_var.variable) rel_ind_result = self.p_solver.is_relative_inductive( frame_id, state_args, quadratic_spline) if rel_ind_result == True: return [(state_args, quadratic_spline)] else: res_1 = self.get_quadratic_splines_from_polynomial( frame_id, state_id, polynomial, low_val, intermediate_val, integer_var) res_2 = self.get_quadratic_splines_from_polynomial( frame_id, state_id, polynomial, intermediate_val, high_val, integer_var) return res_1 + res_2
def as_formula(self): if self.is_top(): return BoolVal(True) if self.is_bottom(): return BoolVal(False) constraints = [] for var in self.dict.keys(): interval = self.dict[var] if not interval.is_high_inf(): constraints.append(Int(var) <= IntVal(interval.high.n)) if not interval.is_low_minf(): constraints.append(Int(var) >= IntVal(interval.low.n)) return And(constraints)
def __add_shape_instance_constraints(self): # pylint: disable=R0914 int_vals = {} for i in range(max(len(self.__lattice.points), len(self.__variants))): int_vals[i] = IntVal(i) quadtree = ExpressionQuadTree(self.__lattice.points) for instance_id in [ self.__lattice.point_to_index(p) for p in self.__lattice.points ]: quadtree.add_expr((HAS_INSTANCE_ID, instance_id), lambda p, i=instance_id: fast_eq( self.__shape_instance_grid[p], int_vals[i])) quadtree.add_expr((NOT_HAS_INSTANCE_ID, instance_id), lambda p, i=instance_id: fast_ne( self.__shape_instance_grid[p], int_vals[i])) for shape_index in range(len(self.__variants)): quadtree.add_expr((HAS_SHAPE_TYPE, shape_index), lambda p, i=shape_index: fast_eq( self.__shape_type_grid[p], int_vals[i])) root_options = defaultdict(list) for shape_index, variants in enumerate(self.__variants): # pylint: disable=R1702 for variant in variants: for root_point in self.__lattice.points: instance_id = self.__lattice.point_to_index(root_point) point_payload_tuples = [] for offset_vector, payload in variant.offsets_with_payloads: point = root_point.translate(offset_vector) if point not in self.__shape_instance_grid: point_payload_tuples = None break point_payload_tuples.append((point, payload)) if point_payload_tuples: and_terms = [] for point, payload in point_payload_tuples: and_terms.append( quadtree.get_point_expr( (HAS_INSTANCE_ID, instance_id), point)) and_terms.append( quadtree.get_point_expr( (HAS_SHAPE_TYPE, shape_index), point)) if self.__shape_payload_grid: and_terms.append( self.__shape_payload_grid[point] == payload) and_terms.append( quadtree.get_other_points_expr( (NOT_HAS_INSTANCE_ID, instance_id), [t[0] for t in point_payload_tuples])) root_options[root_point].append(fast_and(*and_terms)) for p in self.__lattice.points: instance_id = self.__lattice.point_to_index(p) not_has_instance_id_expr = quadtree.get_other_points_expr( (NOT_HAS_INSTANCE_ID, instance_id), []) or_terms = root_options[p] if or_terms: or_terms.append(not_has_instance_id_expr) self.__solver.add(Or(*or_terms)) else: self.__solver.add(not_has_instance_id_expr)
def GetValFromType(typ, raw_val): srt = GetSortFromType(typ) if srt == IntSort(): return IntVal(raw_val) elif srt == BoolSort(): return BoolVal(raw_val) elif is_bv_sort(srt): sz = srt.size() return BitVecVal(raw_val, sz) else: raise SynthException('Unknown sort')
def __init__(self, file_name): self.solver = Optimize() inputs = [Int(f'model_{i}') for i in range(14)] self.solver.add([i >= 1 for i in inputs]) self.solver.add([i <= 9 for i in inputs]) # Please don't ask me to explain this. There's a common pattern in the input code that treats z like a number # of base 26 and the operations are either right shift or left shift on that number +- some value. self.solver.add(inputs[0] + 6 - 6 == inputs[13]) self.solver.add(inputs[1] + 11 - 6 == inputs[12]) self.solver.add(inputs[2] + 5 - 13 == inputs[11]) self.solver.add(inputs[3] + 6 - 8 == inputs[8]) self.solver.add(inputs[4] + 8 - 1 == inputs[5]) self.solver.add(inputs[6] + 9 - 16 == inputs[7]) self.solver.add(inputs[9] + 13 - 16 == inputs[10]) my_sum = IntVal(0) for index in range(len(inputs)): my_sum = (my_sum * 10) + inputs[index] self.value = Int('value') self.solver.add(my_sum == self.value)
def work_its_hours(worker): sum_of_hours = sum(variables_for_worker(worker)) number_of_hours_to_work = IntVal(worker.hours_per_week) return sum_of_hours == number_of_hours_to_work
def __init__(self, val: Any, lineno: int, ctx: Context): super().__init__(expr=z3_val(val, ctx), kind=kind(val), \ line=IntVal(lineno, ctx))
def __init__(self, id: int, kind: Kind, lineno: int, ctx: Context): super().__init__(expr=z3.Const(f'const_{id}', \ kind.to_z3_sort(ctx)), kind=kind, line=IntVal(lineno, ctx))
n_bits = 64 # Input vars X = BitVec('X', n_bits) A = BitVec('A', n_bits) B = BitVec('B', n_bits) # Constants BitWidth = BitVecVal(n_bits, n_bits) # Requirements rule.require(ULT(A, BitWidth)) rule.require(ULT(B, BitWidth)) # Non optimized result nonopt = SHR(B, SHL(A, X)) # Optimized result Mask = SHR(B, SHL(A, Int2BV(IntVal(-1), n_bits))) opt = If( UGT(A, B), AND(SHL(A - B, X), Mask), If( UGT(B, A), AND(SHR(B - A, X), Mask), AND(X, Mask) ) ) rule.check(nonopt, opt)
def to_term(self, times=None): return IntVal(self.value)
from opcodes import SHL from rule import Rule from z3 import BitVec, BV2Int, Int2BV, IntVal """ Shift left workaround that Solidity implements due to a bug in Boost. """ rule = Rule() n_bits = 8 bigint_bits = 16 # Input vars X = BitVec('X', n_bits) A = BitVec('A', n_bits) B = BitVec('B', bigint_bits) # Compute workaround workaround = Int2BV( BV2Int((Int2BV(BV2Int(X), bigint_bits) << Int2BV(BV2Int(A), bigint_bits)) & Int2BV(BV2Int(Int2BV(IntVal(-1), n_bits)), bigint_bits)), n_bits) rule.check(workaround, SHL(A, X))
def get_generalization_polynomials(self, frame_id, state_id, low_val, low_delta, high_val, high_delta, input_variable, splits=0): data_points = [] if self.settings.use_states_of_same_kind: same_kind_id = self._state_of_the_same_kind_cache.get_first_state_of_this_kind( state_id, input_variable) if same_kind_id != -1: same_kind_valuation = self.state_graph.get_state_valuation( same_kind_id)[input_variable] # #If the same kind valuation sits between low-val and high-val if z3_values_check_neq(same_kind_valuation, low_val) and z3_values_check_lt( same_kind_valuation, high_val): data_points.append( (same_kind_valuation, RepushingObligationQueue. smallest_probability_for_state[same_kind_id])) same_kind_id = self._state_of_the_same_kind_cache.get_last_state_of_this_kind( state_id, input_variable) if same_kind_id != -1: same_kind_valuation = self.state_graph.get_state_valuation( same_kind_id)[input_variable] #If the same kind valuation sits between low-val and high-val if z3_values_check_neq(same_kind_valuation, low_val) and z3_values_check_lt( same_kind_valuation, high_val): data_points.append( (same_kind_valuation, RepushingObligationQueue. smallest_probability_for_state[same_kind_id])) data_points = data_points + [(low_val, low_delta), (high_val, high_delta)] state_valuation = self.state_graph.get_state_valuation(state_id) polynomial = self._interpolator.get_interpolating_polynomial( data_points, input_variable.variable) state_args = [ eq_no_coerce(var.variable, val) if var != input_variable else ge_no_coerce(var.variable, low_val) for var, val in state_valuation.items() ] + [input_variable.variable <= high_val] rel_ind_result = self.p_solver.is_relative_inductive( frame_id, state_args, polynomial) if rel_ind_result == True: #print("We were able to generalize! (inputvar: %s)" % (input_variable.name)) #print('Polynomial for %s in [%s, %s]: %s (for frame %s)' % ( #input_variable.name, low_val, high_val, polynomial, frame_id + 1)) if self.is_poly_probability(polynomial, low_val, high_val, input_variable.variable): return [(low_val, low_delta, high_val, high_delta, polynomial)] else: return [] else: i = 1 while i <= self.settings.max_num_ctgs: #if len(data_points) == 4: # del data_points[3] value_of_input_variable_from_ctg = rel_ind_result[ input_variable.variable] #print('CTG: Value of input var %s (CTG %s): %s' % ( # input_variable.name, i, value_of_input_variable_from_ctg)) approx_phi_value = self.approximate_phi_value_for_state( frame_id, [ eq_no_coerce(var.variable, val) if var != input_variable else eq_no_coerce( var.variable, value_of_input_variable_from_ctg) for var, val in state_valuation.items() ]) data_points = self.replace_or_add( data_points, value_of_input_variable_from_ctg, approx_phi_value) polynomial = self._interpolator.get_interpolating_polynomial( data_points, input_variable.variable) #print('Current values for input var: %s' % [x[0] for x in data_points]) #print('Current polynomial: %s' % polynomial) # print('Data points after interpol: %s' % data_points) rel_ind_result = self.p_solver.is_relative_inductive( frame_id, state_args, polynomial) if rel_ind_result == True: if self.is_poly_probability(polynomial, low_val, high_val, input_variable.variable): return [(low_val, low_delta, high_val, high_delta, polynomial)] else: return [] i = i + 1 if self.settings.int_to_real: intermediate_val = low_val + z3_real_floored_division( high_val - low_val, IntVal(2)) else: intermediate_val = low_val + z3_integer_division( high_val - low_val, IntVal(2)) #intermediate_val = IntVal(2) intermediate_delta = self.approximate_phi_value_for_state( frame_id, [ eq_no_coerce(var.variable, val) if var != input_variable else eq_no_coerce(var.variable, intermediate_val) for var, val in state_valuation.items() ]) splits = splits + 1 if splits <= Generalizer.split_limit: res_1 = self.get_generalization_polynomials( frame_id, state_id, low_val, low_delta, intermediate_val, intermediate_delta, input_variable, splits) res_2 = self.get_generalization_polynomials( frame_id, state_id, intermediate_val, intermediate_delta, high_val, high_delta, input_variable, splits) return res_1 + res_2 else: return []