def test_true_div(self): # true_div's upcast policy is not exactly "upgrade_to_float", # so the test is a little bit different x_range = list(range(-127, 128)) y_range = list(range(-127, 0)) + list(range(1, 127)) xi = int8('xi') yi = int8('yi') xf = Scalar(theano.config.floatX)('xf') yf = Scalar(theano.config.floatX)('yf') ei = true_div(xi, yi) fi = theano.function([xi, yi], ei) ef = true_div(xf, yf) ff = theano.function([xf, yf], ef) for x_val in x_range: for y_val in y_range: outi = fi(x_val, y_val) outf = ff(x_val, y_val) assert outi.dtype == outf.dtype, 'incorrect dtype' assert np.allclose(outi, outf), 'insufficient precision'
def local_gpu_elemwise(node, context_name): op = node.op scal_op = op.scalar_op name = op.name if name: name = 'Gpu' + name if len(node.outputs) > 1: return res = GpuElemwise(scal_op, name=name, inplace_pattern=copy.copy(op.inplace_pattern), nfunc_spec=op.nfunc_spec) # If the elemwise operation is a pow, casts might be required on the # inputs and or outputs because only the (float, float)->float and # (double, double)->double cases are implemented at the moment. if isinstance(op.scalar_op, Pow): # Only transfer the computation on the gpu if the output dtype is # floating point. Else, give up on the transfer to the gpu. out_dtype = node.outputs[0].dtype if out_dtype not in ['float16', 'float32', 'float64']: return # Transfer the inputs on the GPU and cast them to the right dtype. new_inputs = [] for inp in node.inputs: if inp.dtype != out_dtype: gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype))) new_inputs.append( gpu_cast_op(as_gpuarray_variable(inp, context_name))) else: new_inputs.append(as_gpuarray_variable(inp, context_name)) # Perform the exponent on the gpu and transfer the output back to the # cpu. gpu_output = res(*new_inputs) cpu_output = host_from_gpu(gpu_output) return [cpu_output] else: return res
def test_filter_float_subclass(): """Make sure `Scalar.filter` can handle `float` subclasses.""" with config.change_flags(floatX="float64"): test_type = Scalar("float64") nan = np.array([np.nan], dtype="float64")[0] assert isinstance(nan, float) filtered_nan = test_type.filter(nan) assert isinstance(filtered_nan, float) with config.change_flags(floatX="float32"): # Try again, except this time `nan` isn't a `float` test_type = Scalar("float32") nan = np.array([np.nan], dtype="float32")[0] assert isinstance(nan, np.floating) filtered_nan = test_type.filter(nan) assert isinstance(filtered_nan, np.floating)