def float32_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None): """SharedVariable Constructor for CudaNdarrayType from numpy.ndarray or CudaNdarray""" # if value isn't a float32 ndarray, or a CudaNdarray then raise if not isinstance(value, (numpy.ndarray, theano.sandbox.cuda.CudaNdarray)): raise TypeError('ndarray or CudaNdarray required') if isinstance(value, numpy.ndarray) and value.dtype.num != CudaNdarrayType.typenum: raise TypeError('float32 ndarray required') if broadcastable is None: broadcastable = (False,) * len(value.shape) type = CudaNdarrayType(broadcastable=broadcastable) get_value_return_ndarray = True if isinstance(value, theano.sandbox.cuda.CudaNdarray): get_value_return_ndarray = False if borrow: deviceval = value else: deviceval = value.copy() else: # type.broadcastable is guaranteed to be a tuple, which this next # function requires deviceval = type_support_filter(value, type.broadcastable, False, None) try: rval = CudaNdarraySharedVariable(type=type, value=deviceval, name=name, strict=strict) except Exception, e: print "ERROR", e raise
def float32_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target='gpu'): """ SharedVariable Constructor for CudaNdarrayType from numpy.ndarray or CudaNdarray. """ if target != 'gpu': raise TypeError('not for gpu') if theano.sandbox.cuda.use.device_number is None: theano.sandbox.cuda.use("gpu", force=True, default_to_move_computation_to_gpu=False, move_shared_float32_to_gpu=False, enable_cuda=False) # if value isn't a float32 ndarray, or a CudaNdarray then raise if not isinstance(value, (numpy.ndarray, theano.sandbox.cuda.CudaNdarray)): raise TypeError('ndarray or CudaNdarray required') if isinstance( value, numpy.ndarray) and value.dtype.num != CudaNdarrayType.typenum: raise TypeError('float32 ndarray required') if broadcastable is None: broadcastable = (False, ) * len(value.shape) type = CudaNdarrayType(broadcastable=broadcastable) get_value_return_ndarray = True if isinstance(value, theano.sandbox.cuda.CudaNdarray): get_value_return_ndarray = False if borrow: deviceval = value else: deviceval = value.copy() else: # type.broadcastable is guaranteed to be a tuple, which this next # function requires deviceval = type_support_filter(value, type.broadcastable, False, None) try: rval = CudaNdarraySharedVariable(type=type, value=deviceval, name=name, strict=strict) except Exception as e: print("ERROR", e) raise rval.get_value_return_ndarray = get_value_return_ndarray return rval
def float32_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target='gpu'): """ SharedVariable Constructor for CudaNdarrayType from numpy.ndarray or CudaNdarray. """ if target != 'gpu': raise TypeError('not for gpu') if theano.sandbox.cuda.use.device_number is None: theano.sandbox.cuda.use("gpu", force=True, default_to_move_computation_to_gpu=False, move_shared_float32_to_gpu=False, enable_cuda=False) # if value isn't a float32 ndarray, or a CudaNdarray then raise if not isinstance(value, (numpy.ndarray, theano.sandbox.cuda.CudaNdarray)): raise TypeError('ndarray or CudaNdarray required') if isinstance(value, numpy.ndarray) and value.dtype.num != CudaNdarrayType.typenum: raise TypeError('float32 ndarray required') if broadcastable is None: broadcastable = (False,) * len(value.shape) type = CudaNdarrayType(broadcastable=broadcastable) get_value_return_ndarray = True if isinstance(value, theano.sandbox.cuda.CudaNdarray): get_value_return_ndarray = False if borrow: deviceval = value else: deviceval = value.copy() else: # type.broadcastable is guaranteed to be a tuple, which this next # function requires deviceval = type_support_filter(value, type.broadcastable, False, None) try: rval = CudaNdarraySharedVariable(type=type, value=deviceval, name=name, strict=strict) except Exception as e: print("ERROR", e) raise rval.get_value_return_ndarray = get_value_return_ndarray return rval
def float32_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None): """SharedVariable Constructor for CudaNdarrayType from numpy.ndarray or CudaNdarray""" # if value isn't a float32 ndarray, or a CudaNdarray then raise if not isinstance(value, (numpy.ndarray, theano.sandbox.cuda.CudaNdarray)): raise TypeError('ndarray or CudaNdarray required') if isinstance( value, numpy.ndarray) and value.dtype.num != CudaNdarrayType.typenum: raise TypeError('float32 ndarray required') if broadcastable is None: broadcastable = (False, ) * len(value.shape) type = CudaNdarrayType(broadcastable=broadcastable) get_value_return_ndarray = True if isinstance(value, theano.sandbox.cuda.CudaNdarray): get_value_return_ndarray = False if borrow: deviceval = value else: deviceval = value.copy() else: # type.broadcastable is guaranteed to be a tuple, which this next # function requires deviceval = type_support_filter(value, type.broadcastable, False, None) try: rval = CudaNdarraySharedVariable(type=type, value=deviceval, name=name, strict=strict) except Exception, e: print "ERROR", e raise