def test_binary_func(): x = nd.uniform(shape=(4, 5)) y = nd.uniform(shape=(4, 5)) f_add = lambda x, y: x+y f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)] autograd_assert(x, y, func=f_add, grad_func=f_add_grad) f_mul = lambda x, y: x*y f_mul_grad = lambda x, y: [y, x] autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad) f_compose = lambda x, y: x+x*y f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x] autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad)
def test_binary_func(): x = nd.uniform(shape=(4, 5)) y = nd.uniform(shape=(4, 5)) f_add = lambda x, y: x+y f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)] autograd_assert(x, y, func=f_add, grad_func=f_add_grad) f_mul = lambda x, y: x*y f_mul_grad = lambda x, y: [y, x] autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad) f_compose = lambda x, y: x+x*y f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x] autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad)
def test_operator_with_state(): def f_fc(a, b, weight, bias): x = a * b fc = nd.FullyConnected(x, weight, bias, num_hidden=32) return fc a = nd.uniform(shape=(64, 50)) b = nd.uniform(shape=(64, 50)) weight = nd.uniform(shape=(32, 50)) bias = nd.uniform(shape=(32, )) grad_func = grad_and_loss(f_fc) grad_vals, outputs = grad_func(a, b, weight, bias)
def test_operator_with_state(): def f_fc(a, b, weight, bias): x = a*b fc = nd.FullyConnected( x, weight, bias, num_hidden=32) return fc a = nd.uniform(shape=(64, 50)) b = nd.uniform(shape=(64, 50)) weight = nd.uniform(shape=(32, 50)) bias = nd.uniform(shape=(32, )) grad_func = grad_and_loss(f_fc) grad_vals, outputs = grad_func(a, b, weight, bias)
def test_argnum(): def f_with_mode(a, b, mode): if mode: return a+b else: return a*b a = nd.uniform(shape=(3, 2)) b = nd.uniform(shape=(3, 2)) f_add_grad = lambda x, y, mode: [nd.ones(x.shape), nd.ones(y.shape)] f_mul_grad = lambda x, y, mode: [y, x] autograd_assert(a, b, True, argnum=[0, 1], func=f_with_mode, grad_func=f_add_grad) autograd_assert(a, b, False, argnum=[0, 1], func=f_with_mode, grad_func=f_mul_grad)
def test_argnum(): def f_with_mode(a, b, mode): if mode: return a+b else: return a*b a = nd.uniform(shape=(3, 2)) b = nd.uniform(shape=(3, 2)) f_add_grad = lambda x, y, mode: [nd.ones(x.shape), nd.ones(y.shape)] f_mul_grad = lambda x, y, mode: [y, x] autograd_assert(a, b, True, argnum=[0, 1], func=f_with_mode, grad_func=f_add_grad) autograd_assert(a, b, False, argnum=[0, 1], func=f_with_mode, grad_func=f_mul_grad)
def test_binary_func(): def check_binary_func(x, y): f_add = lambda x, y: x+y f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)] autograd_assert(x, y, func=f_add, grad_func=f_add_grad) f_mul = lambda x, y: x*y f_mul_grad = lambda x, y: [y, x] autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad) f_compose = lambda x, y: x+x*y f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x] autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad) uniform_x = nd.uniform(shape=(4, 5)) uniform_y = nd.uniform(shape=(4, 5)) stypes = ['row_sparse', 'csr', 'default'] for stype_x in stypes: for stype_y in stypes: x = uniform_x.tostype(stype_x) y = uniform_y.tostype(stype_y) check_binary_func(x, y)
def test_unary_func(): x = nd.uniform(shape=(4, 5)) f_exp = lambda x: nd.exp(x) f_exp_grad = lambda x: [nd.exp(x)] autograd_assert(x, func=f_exp, grad_func=f_exp_grad) f_half = lambda x: x/2 f_half_grad = lambda x: [nd.ones(x.shape) * 0.5] autograd_assert(x, func=f_half, grad_func=f_half_grad) f_square = lambda x: x**2 f_square_grad = lambda x: [2*x] autograd_assert(x, func=f_square, grad_func=f_square_grad)
def test_unary_func(): x = nd.uniform(shape=(4, 5)) f_exp = lambda x: nd.exp(x) f_exp_grad = lambda x: [nd.exp(x)] autograd_assert(x, func=f_exp, grad_func=f_exp_grad) f_half = lambda x: x/2 f_half_grad = lambda x: [nd.ones(x.shape) * 0.5] autograd_assert(x, func=f_half, grad_func=f_half_grad) f_square = lambda x: x**2 f_square_grad = lambda x: [2*x] autograd_assert(x, func=f_square, grad_func=f_square_grad)
def test_binary_func(): def check_binary_func(x, y): f_add = lambda x, y: x+y f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)] autograd_assert(x, y, func=f_add, grad_func=f_add_grad) f_mul = lambda x, y: x*y f_mul_grad = lambda x, y: [y, x] autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad) f_compose = lambda x, y: x+x*y f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x] autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad) uniform_x = nd.uniform(shape=(4, 5)) uniform_y = nd.uniform(shape=(4, 5)) stypes = ['default', 'row_sparse', 'csr'] with EnvManager('MXNET_STORAGE_FALLBACK_LOG_VERBOSE', '0'): for stype_x in stypes: for stype_y in stypes: x = uniform_x.tostype(stype_x) y = uniform_y.tostype(stype_y) check_binary_func(x, y)
def test_binary_func(): def check_binary_func(x, y): f_add = lambda x, y: x+y f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)] autograd_assert(x, y, func=f_add, grad_func=f_add_grad) f_mul = lambda x, y: x*y f_mul_grad = lambda x, y: [y, x] autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad) f_compose = lambda x, y: x+x*y f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x] autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad) uniform_x = nd.uniform(shape=(4, 5)) uniform_y = nd.uniform(shape=(4, 5)) stypes = ['default', 'row_sparse', 'csr'] with EnvManager('MXNET_STORAGE_FALLBACK_LOG_VERBOSE', '0'): for stype_x in stypes: for stype_y in stypes: x = uniform_x.tostype(stype_x) y = uniform_y.tostype(stype_y) check_binary_func(x, y)
def test_unary_func(): def check_unary_func(x): f_exp = lambda x: nd.exp(x) f_exp_grad = lambda x: [nd.exp(x)] autograd_assert(x, func=f_exp, grad_func=f_exp_grad) f_half = lambda x: x/2 f_half_grad = lambda x: [nd.ones(x.shape) * 0.5] autograd_assert(x, func=f_half, grad_func=f_half_grad) f_square = lambda x: x**2 f_square_grad = lambda x: [2*x] autograd_assert(x, func=f_square, grad_func=f_square_grad) uniform = nd.uniform(shape=(4, 5)) stypes = ['row_sparse', 'csr', 'default'] for stype in stypes: check_unary_func(uniform.tostype(stype))
def test_unary_func(): def check_unary_func(x): f_exp = lambda x: nd.exp(x) f_exp_grad = lambda x: [nd.exp(x)] autograd_assert(x, func=f_exp, grad_func=f_exp_grad) f_half = lambda x: x/2 f_half_grad = lambda x: [nd.ones(x.shape) * 0.5] autograd_assert(x, func=f_half, grad_func=f_half_grad) f_square = lambda x: x**2 f_square_grad = lambda x: [2*x] autograd_assert(x, func=f_square, grad_func=f_square_grad) uniform = nd.uniform(shape=(4, 5)) stypes = ['default', 'row_sparse', 'csr'] with EnvManager('MXNET_STORAGE_FALLBACK_LOG_VERBOSE', '0'): for stype in stypes: check_unary_func(uniform.tostype(stype))
def test_unary_func(): def check_unary_func(x): f_exp = lambda x: nd.exp(x) f_exp_grad = lambda x: [nd.exp(x)] autograd_assert(x, func=f_exp, grad_func=f_exp_grad) f_half = lambda x: x/2 f_half_grad = lambda x: [nd.ones(x.shape) * 0.5] autograd_assert(x, func=f_half, grad_func=f_half_grad) f_square = lambda x: x**2 f_square_grad = lambda x: [2*x] autograd_assert(x, func=f_square, grad_func=f_square_grad) uniform = nd.uniform(shape=(4, 5)) stypes = ['default', 'row_sparse', 'csr'] with EnvManager('MXNET_STORAGE_FALLBACK_LOG_VERBOSE', '0'): for stype in stypes: check_unary_func(uniform.tostype(stype))