): ga = aesara.grad(c, a) # This only works when "specialize" options are included mode = aesara.compile.get_default_mode().including("fast_run") fga = aesara.function([a], ga, mode=mode) utt.assert_allclose( fga(np.array([[[[30.0]]]], dtype=config.floatX)), np.zeros((1, 1, 1, 1), dtype=config.floatX), ) TestSoftsign = makeBroadcastTester( op=softsign, expected=upcast_int8_nfunc( lambda inputs: check_floatX(inputs, inputs / (1.0 + np.fabs(inputs)))), good=_good_broadcast_unary_normal_float_no_complex, name="SoftsignTester", ) class TestSigmoidBinaryCrossentropy: def _get_test_inputs(self, n=50): pred, target = np.random.randn(2, n).astype(config.floatX) # apply sigmoid to target, but not pred return [pred, 1 / (1 + np.exp(-target))] def test_matches_binary_crossentropy(self): # Test sigmoid_binary_crossentropy(p, t) == # binary_crossentropy(sigmoid(p), t).
simplify_mul, ) class TestSigmoid: def setup_method(self): utt.seed_rng() def test_elemwise(self): utt.verify_grad(sigmoid, [np.random.rand(3, 4)]) TestSigmoidBroadcast = makeBroadcastTester( op=sigmoid, expected=upcast_int8_nfunc( lambda inputs: check_floatX(inputs, 1 / (1 + np.exp(-inputs))) ), good=copymod( _good_broadcast_unary_normal_no_complex, without=["uint16"] ), # The reason that 'uint16' is excluted is that # theano works well but numpy overflows resulting # in an assertion error. # grad=_grad_broadcast_unary_normal, name="SigmoidTester", eps=1e-8, ) TestUltraFastSigmoidBroadcast = makeBroadcastTester( op=ultra_fast_sigmoid, expected=upcast_int8_nfunc( lambda inputs: check_floatX(inputs, 1 / (1 + np.exp(-inputs)))
op=sgn_inplace, expected=np.sign, good=_good_broadcast_unary_normal_no_complex, inplace=True, ) TestAbsInplaceBroadcast = makeBroadcastTester( op=abs__inplace, expected=lambda x: np.abs(x), good=_good_broadcast_unary_normal_abs, inplace=True, ) TestIntDivInplaceBroadcast = makeBroadcastTester( op=int_div_inplace, expected=lambda x, y: check_floatX((x, y), x // y), good=_good_broadcast_div_mod_normal_float_inplace, # I don't test the grad as the output is always an integer # (this is not a continuous output). # grad=_grad_broadcast_div_mod_normal, inplace=True, ) TestCeilInplaceBroadcast = makeBroadcastTester( op=ceil_inplace, expected=upcast_float16_ufunc(np.ceil), good=copymod( _good_broadcast_unary_normal_no_complex, without=["integers", "int8", "uint8", "uint16"], ), # corner cases includes a lot of integers: points where Ceil is not
expected_gammaln = [] expected_psi = [] expected_tri_gamma = [] expected_chi2sf = [] expected_gammainc = [] expected_gammaincc = [] expected_gammau = [] expected_gammal = [] expected_j0 = [] expected_j1 = [] expected_jv = [] expected_i0 = [] expected_i1 = [] expected_iv = [] expected_sigmoid = (upcast_int8_nfunc( lambda inputs: check_floatX(inputs, np.log1p(np.exp(inputs)))), ) skip_scipy = "scipy is not present" TestErfBroadcast = makeBroadcastTester( op=aet.erf, expected=expected_erf, good=_good_broadcast_unary_normal, grad=_grad_broadcast_unary_normal, eps=2e-10, mode=mode_no_scipy, skip=skip_scipy, ) TestErfInplaceBroadcast = makeBroadcastTester( op=inplace.erf_inplace, expected=expected_erf, good=_good_broadcast_unary_normal_float,
def expected_log1mexp(x): return check_floatX(x, np.log(-np.expm1(x)))
from aesara.graph.opt import check_stack_trace from aesara.tensor.math import clip, sigmoid from aesara.tensor.nnet.sigm import hard_sigmoid, ultra_fast_sigmoid from aesara.tensor.type import matrix from tests.tensor.utils import ( _good_broadcast_unary_normal_no_complex, check_floatX, copymod, makeBroadcastTester, upcast_int8_nfunc, ) TestUltraFastSigmoidBroadcast = makeBroadcastTester( op=ultra_fast_sigmoid, expected=upcast_int8_nfunc( lambda inputs: check_floatX(inputs, 1 / (1 + np.exp(-inputs)))), good=copymod(_good_broadcast_unary_normal_no_complex, without=["uint16"]), # numpy fucnting overflows with uint16. # grad=_grad_broadcast_unary_normal, name="UltraFastSigmoidTester", # This is an approx of the sigmoid. That is why we raise eps eps=5e-2, ) TestHardSigmoidBroadcast = makeBroadcastTester( op=hard_sigmoid, expected=upcast_int8_nfunc( lambda inputs: check_floatX(inputs, 1 / (1 + np.exp(-inputs)))), good=copymod(_good_broadcast_unary_normal_no_complex, without=["uint16"]), # numpy fucnting overflows with uint16. # grad=_grad_broadcast_unary_normal,