def test_composite_clone_float32(self): w = int8() x = float16() y = float32() cz = Composite([x, y], [tanh(x + cast(y, "float16"))]) c = Composite( [w, x, y], [ cz(x, y) - cz(x, y) ** 2 + cast(x, "int16") + cast(x, "float32") + cast(w, "float16") - constant(np.float16(1.0)) ], ) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc) v = uint8() w = float16() x = float16() y = float16() z = float16() c = Composite([v, w, x, y, z], [switch(v, mul(w, x, y), z)]) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc)
def test_composite_clone_float32(self): def has_f16(comp): if any(v.type == float16 for v in comp.fgraph.variables): return True return False w = int8() x = float16() y = float32() cz = Composite([x, y], [tanh(x + cast(y, "float16"))]) c = Composite( [w, x, y], [ cz(x, y) - cz(x, y)**2 + cast(x, "int16") + cast(x, "float32") + cast(w, "float16") - constant(np.float16(1.0)) ], ) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc) v = uint8() w = float16() x = float16() y = float16() z = float16() c = Composite([v, w, x, y, z], [switch(v, mul(w, x, y), z)]) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc)
def _test_binary(binary_op, x_range, y_range): xi = int8('xi') yi = int8('yi') xf = float32('xf') yf = float32('yf') ei = binary_op(xi, yi) fi = theano.function([xi, yi], ei) ef = binary_op(xf, yf) ff = theano.function([xf, yf], ef) for x_val in x_range: for y_val in y_range: outi = fi(x_val, y_val) outf = ff(x_val, y_val) assert outi.dtype == outf.dtype, 'incorrect dtype' assert np.allclose(outi, outf), 'insufficient precision'
def test_composite_clone_float32(self): w = int8() x = float16() y = float32() cz = Composite([x, y], [tanh(x + cast(y, 'float16'))]) c = Composite([w, x, y], [ cz(x, y) - cz(x, y)**2 + cast(x, 'int16') + cast(x, 'float32') + cast(w, 'float16') - constant(np.float16(1.0)) ]) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc)
def test_composite_clone_float32(self): w = int8() x = float16() y = float32() cz = Composite([x, y], [tanh(x + cast(y, 'float16'))]) c = Composite([w, x, y], [cz(x, y) - cz(x, y)**2 + cast(x, 'int16') + cast(x, 'float32') + cast(w, 'float16') - constant(np.float16(1.0))]) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc)
def test_true_div(self): # true_div's upcast policy is not exactly "upgrade_to_float", # so the test is a little bit different x_range = list(range(-127, 128)) y_range = list(range(-127, 0)) + list(range(1, 127)) xi = int8('xi') yi = int8('yi') xf = Scalar(theano.config.floatX)('xf') yf = Scalar(theano.config.floatX)('yf') ei = true_div(xi, yi) fi = theano.function([xi, yi], ei) ef = true_div(xf, yf) ff = theano.function([xf, yf], ef) for x_val in x_range: for y_val in y_range: outi = fi(x_val, y_val) outf = ff(x_val, y_val) assert outi.dtype == outf.dtype, 'incorrect dtype' assert np.allclose(outi, outf), 'insufficient precision'
def _test_unary(unary_op, x_range): xi = int8('xi') xf = float32('xf') ei = unary_op(xi) fi = theano.function([xi], ei) ef = unary_op(xf) ff = theano.function([xf], ef) for x_val in x_range: outi = fi(x_val) outf = ff(x_val) assert outi.dtype == outf.dtype, 'incorrect dtype' assert np.allclose(outi, outf), 'insufficient precision'
def test_0(self): a = int8() b = int32() c = complex64() d = float64() f = float32() assert isinstance((a // b).owner.op, IntDiv) assert isinstance((b // a).owner.op, IntDiv) assert isinstance((b / d).owner.op, TrueDiv) assert isinstance((b / f).owner.op, TrueDiv) assert isinstance((f / a).owner.op, TrueDiv) assert isinstance((d / b).owner.op, TrueDiv) assert isinstance((d / f).owner.op, TrueDiv) assert isinstance((f / c).owner.op, TrueDiv) assert isinstance((a / c).owner.op, TrueDiv)