def code_py_value_as_const( self, env: Environment, py_value) -> Union[_NotImplementedType, _ir.Value]: ic = env.ic with ic.loc, ic.ip: if py_value is True: return basicpy_ops.BoolConstantOp(ic.bool_type, ic.i1_true).result elif py_value is False: return basicpy_ops.BoolConstantOp(ic.bool_type, ic.i1_false).result elif py_value is None: return basicpy_ops.SingletonOp(ic.none_type).result elif isinstance(py_value, int): ir_type = env.target.impl_int_type ir_attr = _ir.IntegerAttr.get(ir_type, py_value) return std_ops.ConstantOp(ir_type, ir_attr).result elif isinstance(py_value, float): ir_type = env.target.impl_float_type ir_attr = _ir.FloatAttr.get(ir_type, py_value) return std_ops.ConstantOp(ir_type, ir_attr).result elif isinstance(py_value, str): return basicpy_ops.StrConstantOp( ic.str_type, _ir.StringAttr.get(py_value)).result elif isinstance(py_value, bytes): return basicpy_ops.BytesConstantOp( ic.bytes_type, _ir.StringAttr.get(py_value)).result elif isinstance(py_value, type(...)): return basicpy_ops.SingletonOp(ic.ellipsis_type).result return NotImplemented
def constant(self, type_var_name: str, value: Any) -> Value: try: type = self.type_mapping[type_var_name] except KeyError: raise ValueError(f"Unbound type variable '{type_var_name}' (" f"expected one of {self.type_mappings.keys()}") try: if (_is_floating_point_type(type)): return std.ConstantOp(type, FloatAttr.get(type, float(value))).result elif (_is_integer_type(type)): return std.ConstantOp(type, IntegerAttr.get(type, int(value))).result except ValueError: raise ValueError(f"Unable to cast value {value} to type {type}") raise NotImplementedError(f"Unimplemented constant type {type}")
def fill_tensor(out): zero = std.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result # TODO: FillOp.result is None. When len(results) == 1 we expect it to # be results[0] as per _linalg_ops_gen.py. This seems like an # orthogonal bug in the generator of _linalg_ops_gen.py. return linalg.FillOp(output=out, value=zero).results[0]
def expression(self, expr: ScalarExpression) -> Value: if expr.scalar_arg: try: return self.block_arg_mapping[expr.scalar_arg.arg] except KeyError: raise ValueError( f"Argument {expr.scalar_arg.arg} is not bound for " f"this structured op.") elif expr.scalar_const: value_attr = Attribute.parse(expr.scalar_const.value) return std.ConstantOp(value_attr.type, value_attr).result elif expr.scalar_index: dim_attr = IntegerAttr.get(IntegerType.get_signless(64), expr.scalar_index.dim) return linalg.IndexOp(IndexType.get(), dim_attr).result elif expr.scalar_apply: try: fn = getattr(self, f"_eval_{expr.scalar_apply.fn_name}") except AttributeError: raise ValueError( f"Function '{expr.scalar_apply.fn_name}' is not a known " "scalar body function") operand_values = [ self.expression(operand) for operand in expr.scalar_apply.operands ] return fn(*operand_values) elif expr.symbolic_cast: operand_value = self.expression(expr.symbolic_cast.operand) return self.cast(expr.symbolic_cast.to_type.name, operand_value) raise NotImplementedError( f"Unimplemented scalar body expression: {expr}")
def emit_index(index): if index is None: return basicpy_ops.SingletonOp(ic.none_type, loc=ic.loc, ip=ic.ip).result else: return std_ops.ConstantOp(ic.index_type, _ir.IntegerAttr.get(ic.index_type, int(index)), loc=ic.loc, ip=ic.ip).result
def matmul_on_tensors(*outer_args): # TODO: in the future, should be writeable more concisely as: # zero = std.constant(0.0, elem_type) # tmp = linalg.fill(out, zero) # linalg.matmul(lhs, rhs, tmp) zero = std.ConstantOp(value=FloatAttr.get(return_elem_type, 0.), result=return_elem_type).result tensor_zero = linalg.FillOp(output=outer_args[-1], value=zero).results[0] args = outer_args[:-1] return op(*args, outs=[tensor_zero])
def _get_external_array_value(self, external_array): ic = self._ic if not isinstance(external_array, np.ndarray): raise TracingError("Expected ndarray but got: %r" % (external_array,)) found_it = self._external_arrays.get(id(external_array)) if found_it: return found_it[1] # Import it. dense_attr = _ir.DenseElementsAttr.get(external_array, context=ic.context) const_value = std_ops.ConstantOp(dense_attr.type, dense_attr, loc=ic.loc, ip=ic.ip).result self._external_arrays[id(external_array)] = (external_array, const_value) return const_value
def _emit_slice_value(self, slice_element): ic = self._ic if slice_element == None: return basicpy_ops.SingletonOp(ic.none_type, loc=ic.loc, ip=ic.ip).result elif slice_element == Ellipsis: return basicpy_ops.SingletonOp(ic.ellipsis_type, loc=ic.loc, ip=ic.ip).result elif isinstance(slice_element, int): return std_ops.ConstantOp(ic.index_type, _ir.IntegerAttr.get(ic.index_type, slice_element), loc=ic.loc, ip=ic.ip).result elif isinstance(slice_element, slice): return self._emit_slice_object(slice_element) else: # Assume array convertible. raise NotImplementedError( "TODO: Slicing with generic arrays not yet implemented")
def code_py_value_as_const( self, env: Environment, py_value) -> Union[_NotImplementedType, _ir.Value]: # TODO: Query for ndarray compat (for duck typed and such) # TODO: Have a higher level name resolution signal which indicates const ic = env.ic if isinstance(py_value, np.ndarray): dense_attr = _ir.DenseElementsAttr.get(py_value, context=ic.context) tensor_type = dense_attr.type tensor_value = std_ops.ConstantOp(tensor_type, dense_attr, loc=ic.loc, ip=ic.ip).result ndarray_type = _cext.shaped_to_ndarray_type(tensor_type) return numpy_ops.CreateArrayFromTensorOp(ndarray_type, tensor_value, loc=ic.loc, ip=ic.ip).result return NotImplemented
def fill_buffer(out): zero = std.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result linalg.FillOp(output=out, value=zero)