def vexop_to_simop(op, extended=True, fp=True): res = operations.get(op) if res is None and extended: attrs = op_attrs(op) if attrs is None: raise UnsupportedIROpError("Operation not implemented") res = SimIROp(op, **attrs) if res is None: raise UnsupportedIROpError("Operation not implemented") if res._float and not fp: raise UnsupportedIROpError("Floating point support disabled") return res
def __init__(self, name, **attrs): self.name = name self.op_attrs = attrs self._generic_name = None self._from_size = None self._from_side = None self._from_type = None self._from_signed = None self._to_size = None self._to_type = None self._to_signed = None self._conversion = None self._vector_size = None self._vector_signed = None self._vector_type = None self._vector_zero = None self._vector_count = None self._rounding_mode = None for k, v in self.op_attrs.items(): if v is not None and ('size' in k or 'count' in k): v = int(v) setattr(self, '_%s' % k, v) # determine the output size #pylint:disable=no-member self._output_type = pyvex.get_op_retty(name) #pylint:enable=no-member self._output_size_bits = pyvex.const.get_type_size(self._output_type) size_check = self._to_size is None or ( self._to_size * 2 if self._generic_name == 'DivMod' else self._to_size) == self._output_size_bits if not size_check: raise SimOperationError( "VEX output size doesn't match detected output size") # # Some categorization # generic_names.add(self._generic_name) if self._conversion is not None: conversions[(self._from_type, self._from_signed, self._to_type, self._to_signed)].append(self) if len({self._vector_type, self._from_type, self._to_type} & {'F', 'D'}) != 0: self._float = True if len({self._vector_type, self._from_type, self._to_type} & {'D'}) != 0: # fp_ops.add(self.name) raise UnsupportedIROpError("BCD ops aren't supported") else: self._float = False # # Now determine the operation # self._calculate = None # is it explicitly implemented? if hasattr(self, '_op_' + name): self._calculate = getattr(self, '_op_' + name) # if the generic name is None and there's a conversion present, this is a standard # widening or narrowing or sign-extension elif self._generic_name is None and self._conversion: # convert int to float if self._float and self._from_type == 'I': self._calculate = self._op_int_to_fp # convert float to differently-sized float elif self._from_type == 'F' and self._to_type == 'F': self._calculate = self._op_fp_to_fp elif self._from_type == 'F' and self._to_type == 'I': self._calculate = self._op_fp_to_int # this concatenates the args into the high and low halves of the result elif self._from_side == 'HL': self._calculate = self._op_concat # this just returns the high half of the first arg elif self._from_size > self._to_size and self._from_side == 'HI': self._calculate = self._op_hi_half # this just returns the high half of the first arg elif self._from_size > self._to_size and self._from_side in ('L', 'LO'): self._calculate = self._op_lo_half elif self._from_size > self._to_size and self._from_side is None: self._calculate = self._op_extract elif self._from_size < self._to_size and self.is_signed: self._calculate = self._op_sign_extend elif self._from_size < self._to_size and not self.is_signed: self._calculate = self._op_zero_extend else: l.error( "%s is an unexpected conversion operation configuration", self) assert False elif self._float and self._vector_zero: # /* --- lowest-lane-only scalar FP --- */ f = getattr(claripy, 'fp' + self._generic_name, None) if f is not None: f = partial(f, claripy.fp.RM.default()) # always? really? f = f if f is not None else getattr( self, '_op_fgeneric_' + self._generic_name, None) if f is None: raise SimOperationError( "no fp implementation found for operation {}".format( self._generic_name)) self._calculate = partial(self._auto_vectorize, f) # other conversions elif self._conversion and self._generic_name not in { 'Round', 'Reinterp' }: if self._generic_name == "DivMod": self._calculate = self._op_divmod else: unsupported_conversions.append(self.name) common_unsupported_generics[self._generic_name] += 1 # generic bitwise elif self._generic_name in bitwise_operation_map: assert self._from_side is None self._calculate = self._op_mapped # generic mapping operations elif self._generic_name in arithmetic_operation_map or \ self._generic_name in shift_operation_map: assert self._from_side is None if self._float and self._vector_count is None: self._calculate = self._op_float_mapped elif not self._float and self._vector_count is not None: self._calculate = self._op_vector_mapped elif self._float and self._vector_count is not None: self._calculate = self._op_vector_float_mapped else: self._calculate = self._op_mapped # TODO: clean up this mess # specifically-implemented generics elif self._float and hasattr(self, '_op_fgeneric_%s' % self._generic_name): calculate = getattr(self, '_op_fgeneric_%s' % self._generic_name) if self._vector_size is not None and \ not hasattr(calculate, 'supports_vector'): # NOTE: originally this branch just marked the op as unsupported but I think we can do better # "marking unsupported" seems to include adding the op to the vector_operations list? why self._calculate = partial(self._auto_vectorize, calculate) else: self._calculate = calculate elif not self._float and hasattr( self, '_op_generic_%s' % self._generic_name): calculate = getattr(self, '_op_generic_%s' % self._generic_name) if self._vector_size is not None and \ not hasattr(calculate, 'supports_vector'): # NOTE: same as above self._calculate = partial(self._auto_vectorize, calculate) else: self._calculate = calculate else: common_unsupported_generics[self._generic_name] += 1 other_operations.append(name) # if we're here and calculate is None, we don't support this if self._calculate is None: raise UnsupportedIROpError( "no calculate function identified for %s" % self.name)