def from_backend_value(self, v, t): """Convert a backend value to an intermediate value.""" if isinstance(t, abstract.AbstractScalar): return self.to_scalar(v) elif isinstance(t, abstract.AbstractArray): # Convert torch tensor to numpy tensor. output = self.to_numpy(v) # If possible and necessary, cast numpy tensor to expected tensor. array_type = t.element.xtype() if array_type and array_type not in _type_map: # Probably u16, u32 or u64. Let's cast. output = output.astype(type_to_np_dtype(array_type)) return output elif isinstance(t, abstract.AbstractTuple): return tuple( self.from_backend_value(ve, te) for ve, te in zip(v, t.elements)) elif isinstance(t, abstract.AbstractTaggedUnion): return TaggedValue( v.tag, self.from_backend_value(v.value, t.options.get(v.tag))) elif isinstance(t, abstract.AbstractRandomState): return RandomStateWrapper(self.to_numpy(v)) elif isinstance(t, abstract.AbstractType): if isinstance(t.element, abstract.AbstractHandle): return HandleInstance else: myia_type = t.element.xtype() if myia_type in _type_map: return getattr(np, type_to_np_dtype(myia_type)) else: return v else: raise NotImplementedError(f"Don't know what to do for {t}")
def test_type_conversions(): assert np_dtype_to_type('float32') is Float[32] with pytest.raises(TypeError): np_dtype_to_type('float80') assert type_to_np_dtype(Float[16]) == 'float16' with pytest.raises(TypeError): type_to_np_dtype(Object)
def test_type_conversions(): assert np_dtype_to_type("float32") is Float[32] with pytest.raises(TypeError): np_dtype_to_type("float80") assert type_to_np_dtype(Float[16]) == "float16" with pytest.raises(TypeError): type_to_np_dtype(Object)
def python_scalar_to_array(c, x, t): """Implementation for primitive scalar_to_array.""" assert t.is_constant(AbstractArray) if t.value.element is ANYTHING or t.value.element.xtype() is ANYTHING: return f"np.array({c.ref(x)})" dtype = type_to_np_dtype(t.value.element.xtype()) return f"np.array({c.ref(x)}, dtype='{dtype}')"
def relay_take_grad_inp(c, _nb_indices, _indices, _values): assert _nb_indices.is_constant(int) values = c.ref(_values) r_indices = relay.reshape(c.ref(_indices), tuple(_indices.abstract.xshape()) + (1, )) n_rows = _nb_indices.value n_cols = _values.abstract.xshape()[-1] outputs = [] indices_dtype = type_to_np_dtype(_indices.abstract.element.xtype()) out_dtype = type_to_np_dtype(_values.abstract.element.xtype()) for i in range(n_rows): select_entries = relay.equal(r_indices, relay.const(i, indices_dtype)) casted_select = relay.cast(select_entries, out_dtype) select_dout = relay.multiply(casted_select, values) reshape_out = relay.reshape(select_dout, (-1, n_cols)) vector = relay.sum(reshape_out, 0) outputs.append(relay.reshape(vector, (1, n_cols))) return relay.concatenate(outputs, 0)
def convert_type(self, v, t): if isinstance(t.element, AbstractHandle): return HandleInstance else: myia_type = t.element.xtype() if myia_type is Tuple: return tuple else: return getattr(np, type_to_np_dtype(myia_type))
def pytorch_scalar_cast(op): """Implementation of scalar_cast.""" v = op.inputs[1] assert op.inputs[2].is_constant() dtype = type_to_np_dtype(op.inputs[2].value.xtype()) def _impl(v): return (v.astype(dtype), ) return _impl, (v, )
def convert_scalar(self, v, t): numpy_typename = type_to_np_dtype(t) # For type names below, we return raw value. if numpy_typename in ("bool", "int64", "uint64", "float64"): return { "bool": bool, "int64": int, "uint64": int, "float64": float, }[numpy_typename](v) # Otherwise, we return a Python string code for this value. return f"np.{numpy_typename}({v})"
def to_relay_type(self, a: AbstractScalar): """Convert a myia abstract to a Relay type.""" tp = a.xtype() if issubclass(tp, Bool): return relay.ty.scalar_type("bool") elif issubclass(tp, Nil): return relay.ty.TupleType([]) elif issubclass(tp, EnvType): return env_type() elif issubclass(tp, UniverseType): return relay.ty.TupleType([]) else: return relay.ty.scalar_type(type_to_np_dtype(tp))
def convert_scalar(self, v, t): """Convert the scalar to a TVM array.""" return tvm.runtime.ndarray.array( getattr(np, type_to_np_dtype(t))(v), self.context)
def convert_bool(self, v, t): """Convert the scalar to a TVM array.""" return relay.const(v, type_to_np_dtype(t))
def relay_cast(c, v, t): """Implementation of scalar_cast/array_cast for Relay.""" v = c.ref(v) assert t.is_constant() return relay.cast(v, type_to_np_dtype(t.value.xtype()))
def to_relay_type(self, a: AbstractArray): tp = a.element.xtype() return relay.ty.TensorType(a.xshape(), type_to_np_dtype(tp))
def python_scalar_cast(c, x, t): """Implementation for primitive scalar_cast.""" assert t.is_constant() dtype = type_to_np_dtype(t.value.xtype()) return f"np.{dtype}({c.ref(x)})"
def python_array_cast(c, x, t): """Implementation for primitive array_cast.""" assert t.is_constant() dtype = type_to_np_dtype(t.value.xtype()) return f"{c.ref(x)}.astype('{dtype}')"
def from_scalar(self, s, t): """Convert a scalar to a torch tensor.""" if s is None: return None dt = type_to_np_dtype(t) return np.asarray(s, dtype=dt)