Ejemplo n.º 1
0
 def c_code_cache_version(self):
     scalar_version = scal.get_scalar_type(
         self.dtype).c_code_cache_version()
     if scalar_version:
         return (11, ) + scalar_version
     else:
         return ()
Ejemplo n.º 2
0
Archivo: type.py Proyecto: intel/theano
    def _get_func(self):
        from theano.scalar import get_scalar_type

        if self._fn is None:
            v = get_scalar_type('int64')()
            self._fn = theano.function([v], _make_cdata(self)(v), profile=False)
        return self._fn
Ejemplo n.º 3
0
    def _get_func(self):
        from theano.scalar import get_scalar_type

        if self._fn is None:
            v = get_scalar_type('int64')()
            self._fn = theano.function([v],
                                       _make_cdata(self)(v),
                                       profile=False)
        return self._fn
Ejemplo n.º 4
0
def safe_new(x, tag='', dtype=None):
    """
    Internal function that constructs a new variable from x with the same
    type, but with a different name (old name + tag). This function is used
    by gradient, or the R-op to construct new variables for the inputs of
    the inner graph such that there is no interference between the original
    graph and the newly constructed graph.
    """
    if hasattr(x, 'name') and x.name is not None:
        nw_name = x.name + tag
    else:
        nw_name = None

    if isinstance(x, theano.Constant):
        if dtype and x.dtype != dtype:
            casted_x = x.astype(dtype)
            nwx = x.__class__(casted_x.type, x.data, x.name)
            nwx.tag = copy(x.tag)
            return nwx
        else:
            return x.clone()
    # Note, as_tensor_variable will convert the Scalar into a
    # TensorScalar that will require a ScalarFromTensor op,
    # making the pushout optimization fail
    elif isinstance(x, scalar.ScalarVariable):
        if dtype:
            nw_x = scalar.get_scalar_type(dtype=dtype)()
        else:
            nw_x = x.type()
        nw_x.name = nw_name
        return nw_x
    else:
        try:
            x = tensor.as_tensor_variable(x)
        except TypeError:
            # This could happen for example for random states, and I really
            # want to avoid the convoluted logic that checks for cuda
            # ndarrays
            pass
    nw_x = x.type()
    if dtype and nw_x.dtype != dtype:
        nw_x = nw_x.astype(dtype).type()
    nw_x.name = nw_name

    # Preserve test values so that the 'compute_test_value' option can be used.
    # The test value is deep-copied to ensure there can be no interactions
    # between test values, due to inplace operations for instance. This may
    # not be the most efficient memory-wise, though.
    if theano.config.compute_test_value != 'off':
        try:
            nw_x.tag.test_value = copy.deepcopy(gof.op.get_test_value(x))
        except AttributeError:
            # This means `x` has no test value.
            pass

    return nw_x
Ejemplo n.º 5
0
def safe_new(x, tag='', dtype=None):
    """
    Internal function that constructs a new variable from x with the same
    type, but with a different name (old name + tag). This function is used
    by gradient, or the R-op to construct new variables for the inputs of
    the inner graph such that there is no interference between the original
    graph and the newly constructed graph.
    """
    if hasattr(x, 'name') and x.name is not None:
        nw_name = x.name + tag
    else:
        nw_name = None

    if isinstance(x, theano.Constant):
        if dtype and x.dtype != dtype:
            casted_x = x.astype(dtype)
            nwx = x.__class__(casted_x.type, x.data, x.name)
            nwx.tag = copy(x.tag)
            return nwx
        else:
            return x.clone()
    # Note, as_tensor_variable will convert the Scalar into a
    # TensorScalar that will require a ScalarFromTensor op,
    # making the pushout optimization fail
    elif isinstance(x, scalar.ScalarVariable):
        if dtype:
            nw_x = scalar.get_scalar_type(dtype=dtype)()
        else:
            nw_x = x.type()
        nw_x.name = nw_name
        return nw_x
    else:
        try:
            x = tensor.as_tensor_variable(x)
        except TypeError:
            # This could happen for example for random states, and I really
            # want to avoid the convoluted logic that checks for cuda
            # ndarrays
            pass
    nw_x = x.type()
    if dtype and nw_x.dtype != dtype:
        nw_x = nw_x.astype(dtype).type()
    nw_x.name = nw_name

    # Preserve test values so that the 'compute_test_value' option can be used.
    # The test value is deep-copied to ensure there can be no interactions
    # between test values, due to inplace operations for instance. This may
    # not be the most efficient memory-wise, though.
    if theano.config.compute_test_value != 'off':
        try:
            nw_x.tag.test_value = copy.deepcopy(gof.op.get_test_value(x))
        except AttributeError:
            # This means `x` has no test value.
            pass

    return nw_x
Ejemplo n.º 6
0
 def __new__(self, *types):
     """
     Upgrade any int types to float32 or float64 to avoid losing precision.
     """
     conv = {scalar.int8: scalar.float32,
             scalar.int16: scalar.float32,
             scalar.int32: scalar.float64,
             scalar.int64: scalar.float64,
             scalar.uint8: scalar.float32,
             scalar.uint16: scalar.float32,
             scalar.uint32: scalar.float64,
             scalar.uint64: scalar.float64}
     return [scalar.get_scalar_type(scalar.Scalar.upcast(conv.get(t, t))) for t in types]
Ejemplo n.º 7
0
    def _get_func(self):
        """
        Return a function that makes a value from an integer.

        The integer value is assumed to be a valid pointer for the
        type and no check is done to ensure that.
        """
        from theano.scalar import get_scalar_type

        if self._fn is None:
            with change_flags(compute_test_value='off'):
                v = get_scalar_type('int64')()
                self._fn = theano.function([v], _make_cdata(self)(v),
                                           profile=False)
        return self._fn
Ejemplo n.º 8
0
    def _get_func(self):
        """
        Return a function that makes a value from an integer.

        The integer value is assumed to be a valid pointer for the
        type and no check is done to ensure that.
        """
        from theano.scalar import get_scalar_type

        if self._fn is None:
            with change_flags(compute_test_value='off'):
                v = get_scalar_type('int64')()
                self._fn = theano.function([v], _make_cdata(self)(v),
                                           mode=theano.Mode(optimizer=None),
                                           profile=False)
        return self._fn
Ejemplo n.º 9
0
 def to_scalar_type(self):
     return scal.get_scalar_type(dtype=self.dtype)
Ejemplo n.º 10
0
 def c_init_code(self):
     return scal.get_scalar_type(self.dtype).c_init_code()
Ejemplo n.º 11
0
 def c_support_code(self):
     """Override `CLinkerObject.c_support_code` """
     return scal.get_scalar_type(self.dtype).c_support_code()
Ejemplo n.º 12
0
 def c_compile_args(self):
     return scal.get_scalar_type(self.dtype).c_compile_args()
Ejemplo n.º 13
0
 def c_libraries(self):
     return scal.get_scalar_type(self.dtype).c_libraries()
Ejemplo n.º 14
0
    def c_headers(self, c_compiler):
        """
        Override `CLinkerObject.c_headers`.

        """
        return scal.get_scalar_type(self.dtype).c_headers(c_compiler)
Ejemplo n.º 15
0
 def c_compile_args(self):
     return scal.get_scalar_type(self.dtype).c_compile_args()
Ejemplo n.º 16
0
# Code:

from __future__ import print_function

import numpy as np
import theano
from theano import scalar as scal
from theano import printing
from theano.printing import pprint
from theano.scalar import get_scalar_type, neg, sqr
from theano.tensor import elemwise

# ------------------------------------------------------------------------
# Types
int8 = get_scalar_type('int8')
int16 = get_scalar_type('int16')
int32 = get_scalar_type('int32')
int64 = get_scalar_type('int64')
uint8 = get_scalar_type('uint8')
uint16 = get_scalar_type('uint16')
uint32 = get_scalar_type('uint32')
uint64 = get_scalar_type('uint64')
float32 = get_scalar_type('float32')
float64 = get_scalar_type('float64')
complex64 = get_scalar_type('complex64')
complex128 = get_scalar_type('complex128')

int_types = int8, int16, int32, int64
uint_types = uint8, uint16, uint32, uint64
float_types = float32, float64
Ejemplo n.º 17
0
 def c_init_code(self):
     return scal.get_scalar_type(self.dtype).c_init_code()
Ejemplo n.º 18
0
def safe_new(x, tag="", dtype=None):
    """
    Internal function that constructs a new variable from x with the same
    type, but with a different name (old name + tag). This function is used
    by gradient, or the R-op to construct new variables for the inputs of
    the inner graph such that there is no interference between the original
    graph and the newly constructed graph.

    """
    if hasattr(x, "name") and x.name is not None:
        nw_name = x.name + tag
    else:
        nw_name = None

    if isinstance(x, theano.Constant):
        if dtype and x.dtype != dtype:
            casted_x = x.astype(dtype)
            nwx = x.__class__(casted_x.type, x.data, x.name)
            nwx.tag = copy.copy(x.tag)
            return nwx
        else:
            return x.clone()
    # Note, as_tensor_variable will convert the Scalar into a
    # TensorScalar that will require a ScalarFromTensor op,
    # making the pushout optimization fail
    elif isinstance(x, scalar.ScalarVariable):
        if dtype:
            nw_x = scalar.get_scalar_type(dtype=dtype)()
        else:
            nw_x = x.type()
        nw_x.name = nw_name
        if theano.config.compute_test_value != "off":
            # Copy test value, cast it if necessary
            try:
                x_test_value = gof.op.get_test_value(x)
            except TestValueError:
                pass
            else:
                # This clause is executed if no exception was raised
                nw_x.tag.test_value = nw_x.type.filter(x_test_value)
        return nw_x
    else:
        try:
            x = tensor.as_tensor_variable(x)
        except TypeError:
            # This could happen for example for random states
            pass

    # Cast x if needed. If x has a test value, this will also cast it.
    if dtype and x.dtype != dtype:
        x = x.astype(dtype)

    nw_x = x.type()
    nw_x.name = nw_name
    # Preserve test values so that the 'compute_test_value' option can be used.
    # The test value is deep-copied to ensure there can be no interactions
    # between test values, due to inplace operations for instance. This may
    # not be the most efficient memory-wise, though.
    if theano.config.compute_test_value != "off":
        try:
            nw_x.tag.test_value = copy.deepcopy(gof.op.get_test_value(x))
        except TestValueError:
            pass

    return nw_x
Ejemplo n.º 19
0
 def c_libraries(self, c_compiler):
     return scal.get_scalar_type(self.dtype).c_libraries(c_compiler)
Ejemplo n.º 20
0
    def c_headers(self, c_compiler):
        """
        Override `CLinkerObject.c_headers`.

        """
        return scal.get_scalar_type(self.dtype).c_headers(c_compiler)
Ejemplo n.º 21
0
 def c_headers(self):
     """Override `CLinkerObject.c_headers` """
     return scal.get_scalar_type(self.dtype).c_headers()
Ejemplo n.º 22
0
 def to_scalar_type(self):
     return scal.get_scalar_type(dtype=self.dtype)
Ejemplo n.º 23
0
 def c_libraries(self):
     return scal.get_scalar_type(self.dtype).c_libraries()
Ejemplo n.º 24
0
 def c_headers(self):
     """Override `CLinkerObject.c_headers` """
     return scal.get_scalar_type(self.dtype).c_headers()
Ejemplo n.º 25
0
 def c_support_code(self):
     """Override `CLinkerObject.c_support_code` """
     return scal.get_scalar_type(self.dtype).c_support_code()
Ejemplo n.º 26
0
 def c_libraries(self, c_compiler):
     return scal.get_scalar_type(self.dtype).c_libraries(c_compiler)
Ejemplo n.º 27
0
 def c_code_cache_version(self):
     scalar_version = scal.get_scalar_type(self.dtype).c_code_cache_version()
     if scalar_version:
         return (11,) + scalar_version
     else:
         return ()
Ejemplo n.º 28
0
# Code:

from __future__ import print_function

import numpy
import theano
from theano import scalar as scal
from theano import printing
from theano.printing import pprint
from theano.scalar import get_scalar_type, neg, sqr
from theano.tensor import elemwise

# ------------------------------------------------------------------------
# Types
int8 = get_scalar_type('int8')
int16 = get_scalar_type('int16')
int32 = get_scalar_type('int32')
int64 = get_scalar_type('int64')
uint8 = get_scalar_type('uint8')
uint16 = get_scalar_type('uint16')
uint32 = get_scalar_type('uint32')
uint64 = get_scalar_type('uint64')
float32 = get_scalar_type('float32')
float64 = get_scalar_type('float64')
complex64 = get_scalar_type('complex64')
complex128 = get_scalar_type('complex128')

int_types = int8, int16, int32, int64
uint_types = uint8, uint16, uint32, uint64
float_types = float32, float64