def _get_func(self): """ Return a function that makes a value from an integer. The integer value is assumed to be a valid pointer for the type and no check is done to ensure that. """ from aesara.scalar import get_scalar_type if self._fn is None: with change_flags(compute_test_value="off"): v = get_scalar_type("int64")() self._fn = aesara.function( [v], _make_cdata(self)(v), mode=aesara.Mode(optimizer=None), profile=False, ) return self._fn
def c_code_cache_version(self): scalar_version = aes.get_scalar_type(self.dtype).c_code_cache_version() if scalar_version: return (11, ) + scalar_version else: return ()
def c_init_code(self, **kwargs): return aes.get_scalar_type(self.dtype).c_init_code(**kwargs)
def c_compile_args(self, **kwargs): return aes.get_scalar_type(self.dtype).c_compile_args(**kwargs)
def c_libraries(self, **kwargs): return aes.get_scalar_type(self.dtype).c_libraries(**kwargs)
def c_headers(self, **kwargs): return aes.get_scalar_type(self.dtype).c_headers(**kwargs)
def to_scalar_type(self): return aes.get_scalar_type(dtype=self.dtype)
def safe_new(x, tag="", dtype=None): """ Internal function that constructs a new variable from x with the same type, but with a different name (old name + tag). This function is used by gradient, or the R-op to construct new variables for the inputs of the inner graph such that there is no interference between the original graph and the newly constructed graph. """ if hasattr(x, "name") and x.name is not None: nw_name = x.name + tag else: nw_name = None if isinstance(x, aesara.Constant): if dtype and x.dtype != dtype: casted_x = x.astype(dtype) nwx = x.__class__(casted_x.type, x.data, x.name) nwx.tag = copy.copy(x.tag) return nwx else: return x.clone() # Note, as_tensor_variable will convert the Scalar into a # TensorScalar that will require a ScalarFromTensor op, # making the pushout optimization fail elif isinstance(x, scalar.ScalarVariable): if dtype: nw_x = scalar.get_scalar_type(dtype=dtype)() else: nw_x = x.type() nw_x.name = nw_name if aesara.config.compute_test_value != "off": # Copy test value, cast it if necessary try: x_test_value = gof.op.get_test_value(x) except TestValueError: pass else: # This clause is executed if no exception was raised nw_x.tag.test_value = nw_x.type.filter(x_test_value) return nw_x else: try: x = tensor.as_tensor_variable(x) except TypeError: # This could happen for example for random states pass # Cast x if needed. If x has a test value, this will also cast it. if dtype and x.dtype != dtype: x = x.astype(dtype) nw_x = x.type() nw_x.name = nw_name # Preserve test values so that the 'compute_test_value' option can be used. # The test value is deep-copied to ensure there can be no interactions # between test values, due to inplace operations for instance. This may # not be the most efficient memory-wise, though. if aesara.config.compute_test_value != "off": try: nw_x.tag.test_value = copy.deepcopy(gof.op.get_test_value(x)) except TestValueError: pass return nw_x
def safe_new(x: Variable, tag: str = "", dtype: Optional[Union[str, np.dtype]] = None) -> Variable: """Clone variables. Internal function that constructs a new variable from `x` with the same type, but with a different name (old name + tag). This function is used by `gradient`, or the R-op to construct new variables for the inputs of the inner graph such that there is no interference between the original graph and the newly constructed graph. """ if hasattr(x, "name") and x.name is not None: nw_name = x.name + tag else: nw_name = None if isinstance(x, Constant): # TODO: Do something better about this assert isinstance(x.type, HasDataType) if dtype and x.type.dtype != dtype: casted_x = cast(x, dtype) nwx = type(x)(casted_x.type, x.data, x.name) nwx.tag = copy.copy(x.tag) return nwx else: return x # Note, `as_tensor_variable` will convert the `ScalarType` into a # `TensorScalar` that will require a `ScalarFromTensor` `Op`, making the # push-out optimization fail elif isinstance(x, aes.ScalarVariable): if dtype: nw_x = aes.get_scalar_type(dtype=dtype)() else: nw_x = x.type() nw_x.name = nw_name if config.compute_test_value != "off": # Copy test value, cast it if necessary try: x_test_value = get_test_value(x) except TestValueError: pass else: # This clause is executed if no exception was raised nw_x.tag.test_value = nw_x.type.filter(x_test_value) return nw_x else: try: x = at.as_tensor_variable(x) except TypeError: # This could happen for example for random states pass # Cast `x` if needed. If `x` has a test value, this will also cast it. if dtype: # TODO: Do something better about this assert isinstance(x.type, HasDataType) if x.type.dtype != dtype: x = cast(x, dtype) nw_x = x.type() nw_x.name = nw_name # Preserve test values so that the `compute_test_value` option can be used. # The test value is deep-copied to ensure there can be no interactions # between test values, due to inplace operations for instance. This may # not be the most efficient memory-wise, though. if config.compute_test_value != "off": try: nw_x.tag.test_value = copy.deepcopy(get_test_value(x)) except TestValueError: pass return nw_x