def grad(self, inp, grads): (x, ) = inp (gz, ) = grads res = true_div(-1.0, expm1(-x)) # Correct gradient at 0.0 to be -inf res = switch(isinf(res), -np.inf, res) return [gz * res]
def test_straightforward(self): x, y, z = floats("xyz") e = mul(add(x, y), true_div(x, y)) C = Composite([x, y], [e]) c = C.make_node(x, y) # print c.c_code(['x', 'y'], ['z'], dict(id = 0)) g = FunctionGraph([x, y], [c.out]) fn = DualLinker().accept(g).make_function() assert fn(1.0, 2.0) == 1.5
def test_with_constants(self): x, y, z = floats("xyz") e = mul(add(70.0, y), true_div(x, y)) C = Composite([x, y], [e]) c = C.make_node(x, y) assert "70.0" in c.op.c_code(c, "dummy", ["x", "y"], ["z"], dict(id=0)) # print c.c_code(['x', 'y'], ['z'], dict(id = 0)) g = FunctionGraph([x, y], [c.out]) fn = DualLinker().accept(g).make_function() assert fn(1.0, 2.0) == 36.0
def test_true_div(self): # true_div's upcast policy is not exactly "upgrade_to_float", # so the test is a little bit different x_range = list(range(-127, 128)) y_range = list(range(-127, 0)) + list(range(1, 127)) xi = int8("xi") yi = int8("yi") xf = Scalar(aesara.config.floatX)("xf") yf = Scalar(aesara.config.floatX)("yf") ei = true_div(xi, yi) fi = aesara.function([xi, yi], ei) ef = true_div(xf, yf) ff = aesara.function([xf, yf], ef) for x_val in x_range: for y_val in y_range: outi = fi(x_val, y_val) outf = ff(x_val, y_val) assert outi.dtype == outf.dtype, "incorrect dtype" assert np.allclose(outi, outf), "insufficient precision"
def test_mul_add_true(): x, y, z = floats("xyz") e = mul(add(x, y), true_div(x, y)) g = FunctionGraph([x, y], [e]) fn = DualLinker().accept(g).make_function() assert fn(1.0, 2.0) == 1.5