def c_code_cache_version(self): scalar_version = scal.get_scalar_type( self.dtype).c_code_cache_version() if scalar_version: return (11, ) + scalar_version else: return ()
def _get_func(self): from theano.scalar import get_scalar_type if self._fn is None: v = get_scalar_type('int64')() self._fn = theano.function([v], _make_cdata(self)(v), profile=False) return self._fn
def safe_new(x, tag='', dtype=None): """ Internal function that constructs a new variable from x with the same type, but with a different name (old name + tag). This function is used by gradient, or the R-op to construct new variables for the inputs of the inner graph such that there is no interference between the original graph and the newly constructed graph. """ if hasattr(x, 'name') and x.name is not None: nw_name = x.name + tag else: nw_name = None if isinstance(x, theano.Constant): if dtype and x.dtype != dtype: casted_x = x.astype(dtype) nwx = x.__class__(casted_x.type, x.data, x.name) nwx.tag = copy(x.tag) return nwx else: return x.clone() # Note, as_tensor_variable will convert the Scalar into a # TensorScalar that will require a ScalarFromTensor op, # making the pushout optimization fail elif isinstance(x, scalar.ScalarVariable): if dtype: nw_x = scalar.get_scalar_type(dtype=dtype)() else: nw_x = x.type() nw_x.name = nw_name return nw_x else: try: x = tensor.as_tensor_variable(x) except TypeError: # This could happen for example for random states, and I really # want to avoid the convoluted logic that checks for cuda # ndarrays pass nw_x = x.type() if dtype and nw_x.dtype != dtype: nw_x = nw_x.astype(dtype).type() nw_x.name = nw_name # Preserve test values so that the 'compute_test_value' option can be used. # The test value is deep-copied to ensure there can be no interactions # between test values, due to inplace operations for instance. This may # not be the most efficient memory-wise, though. if theano.config.compute_test_value != 'off': try: nw_x.tag.test_value = copy.deepcopy(gof.op.get_test_value(x)) except AttributeError: # This means `x` has no test value. pass return nw_x
def __new__(self, *types): """ Upgrade any int types to float32 or float64 to avoid losing precision. """ conv = {scalar.int8: scalar.float32, scalar.int16: scalar.float32, scalar.int32: scalar.float64, scalar.int64: scalar.float64, scalar.uint8: scalar.float32, scalar.uint16: scalar.float32, scalar.uint32: scalar.float64, scalar.uint64: scalar.float64} return [scalar.get_scalar_type(scalar.Scalar.upcast(conv.get(t, t))) for t in types]
def _get_func(self): """ Return a function that makes a value from an integer. The integer value is assumed to be a valid pointer for the type and no check is done to ensure that. """ from theano.scalar import get_scalar_type if self._fn is None: with change_flags(compute_test_value='off'): v = get_scalar_type('int64')() self._fn = theano.function([v], _make_cdata(self)(v), profile=False) return self._fn
def _get_func(self): """ Return a function that makes a value from an integer. The integer value is assumed to be a valid pointer for the type and no check is done to ensure that. """ from theano.scalar import get_scalar_type if self._fn is None: with change_flags(compute_test_value='off'): v = get_scalar_type('int64')() self._fn = theano.function([v], _make_cdata(self)(v), mode=theano.Mode(optimizer=None), profile=False) return self._fn
def to_scalar_type(self): return scal.get_scalar_type(dtype=self.dtype)
def c_init_code(self): return scal.get_scalar_type(self.dtype).c_init_code()
def c_support_code(self): """Override `CLinkerObject.c_support_code` """ return scal.get_scalar_type(self.dtype).c_support_code()
def c_compile_args(self): return scal.get_scalar_type(self.dtype).c_compile_args()
def c_libraries(self): return scal.get_scalar_type(self.dtype).c_libraries()
def c_headers(self, c_compiler): """ Override `CLinkerObject.c_headers`. """ return scal.get_scalar_type(self.dtype).c_headers(c_compiler)
# Code: from __future__ import print_function import numpy as np import theano from theano import scalar as scal from theano import printing from theano.printing import pprint from theano.scalar import get_scalar_type, neg, sqr from theano.tensor import elemwise # ------------------------------------------------------------------------ # Types int8 = get_scalar_type('int8') int16 = get_scalar_type('int16') int32 = get_scalar_type('int32') int64 = get_scalar_type('int64') uint8 = get_scalar_type('uint8') uint16 = get_scalar_type('uint16') uint32 = get_scalar_type('uint32') uint64 = get_scalar_type('uint64') float32 = get_scalar_type('float32') float64 = get_scalar_type('float64') complex64 = get_scalar_type('complex64') complex128 = get_scalar_type('complex128') int_types = int8, int16, int32, int64 uint_types = uint8, uint16, uint32, uint64 float_types = float32, float64
def safe_new(x, tag="", dtype=None): """ Internal function that constructs a new variable from x with the same type, but with a different name (old name + tag). This function is used by gradient, or the R-op to construct new variables for the inputs of the inner graph such that there is no interference between the original graph and the newly constructed graph. """ if hasattr(x, "name") and x.name is not None: nw_name = x.name + tag else: nw_name = None if isinstance(x, theano.Constant): if dtype and x.dtype != dtype: casted_x = x.astype(dtype) nwx = x.__class__(casted_x.type, x.data, x.name) nwx.tag = copy.copy(x.tag) return nwx else: return x.clone() # Note, as_tensor_variable will convert the Scalar into a # TensorScalar that will require a ScalarFromTensor op, # making the pushout optimization fail elif isinstance(x, scalar.ScalarVariable): if dtype: nw_x = scalar.get_scalar_type(dtype=dtype)() else: nw_x = x.type() nw_x.name = nw_name if theano.config.compute_test_value != "off": # Copy test value, cast it if necessary try: x_test_value = gof.op.get_test_value(x) except TestValueError: pass else: # This clause is executed if no exception was raised nw_x.tag.test_value = nw_x.type.filter(x_test_value) return nw_x else: try: x = tensor.as_tensor_variable(x) except TypeError: # This could happen for example for random states pass # Cast x if needed. If x has a test value, this will also cast it. if dtype and x.dtype != dtype: x = x.astype(dtype) nw_x = x.type() nw_x.name = nw_name # Preserve test values so that the 'compute_test_value' option can be used. # The test value is deep-copied to ensure there can be no interactions # between test values, due to inplace operations for instance. This may # not be the most efficient memory-wise, though. if theano.config.compute_test_value != "off": try: nw_x.tag.test_value = copy.deepcopy(gof.op.get_test_value(x)) except TestValueError: pass return nw_x
def c_libraries(self, c_compiler): return scal.get_scalar_type(self.dtype).c_libraries(c_compiler)
def c_headers(self): """Override `CLinkerObject.c_headers` """ return scal.get_scalar_type(self.dtype).c_headers()
def c_code_cache_version(self): scalar_version = scal.get_scalar_type(self.dtype).c_code_cache_version() if scalar_version: return (11,) + scalar_version else: return ()
# Code: from __future__ import print_function import numpy import theano from theano import scalar as scal from theano import printing from theano.printing import pprint from theano.scalar import get_scalar_type, neg, sqr from theano.tensor import elemwise # ------------------------------------------------------------------------ # Types int8 = get_scalar_type('int8') int16 = get_scalar_type('int16') int32 = get_scalar_type('int32') int64 = get_scalar_type('int64') uint8 = get_scalar_type('uint8') uint16 = get_scalar_type('uint16') uint32 = get_scalar_type('uint32') uint64 = get_scalar_type('uint64') float32 = get_scalar_type('float32') float64 = get_scalar_type('float64') complex64 = get_scalar_type('complex64') complex128 = get_scalar_type('complex128') int_types = int8, int16, int32, int64 uint_types = uint8, uint16, uint32, uint64 float_types = float32, float64