def test_block_grad(): x = sym.Variable("x") y = sym.block_grad(x) def forward(x): return x def backward(head_grads, x): return [np.zeros_like(head_grads)] dtype = "float32" inputs = [('x', (3, 4, 5), x)] helper(y, inputs, dtype, forward, backward, need_head_grads=False)
def test_block_grad(): x = sym.Variable("x") y = sym.block_grad(x) def forward(x): return x def backward(head_grads, x): return [np.zeros_like(head_grads)] shape = {'x': (3, 4, 5)} # Numerical grad checking would fail for this function check_function(y, forward, backward, shape=shape, numerical_grads=False)
def test_multi_loss_graph_gradients(): # input data shape1 = (1000, 100) data1 = sym.Variable('data1', shape=(1000, 100), dtype=0) # fake non-sparse label label = sym.full(fill_value=3) # square loss sub1 = sym.elemwise_sub(data1, label, name="sub1") square_loss = sym.sum(data=sub1**2, axis=1, name="square_loss") # fake loss1 shape2 = (1000, ) data2 = sym.Variable('data2', shape=shape2, dtype=0) loss1 = sym.sqrt(data2, name="loss1") # fake loss2 loss2 = sym.relu(data1, name='loss2') # block loss1 total_loss = sym.elemwise_sum(sym.block_grad(loss1), square_loss, num_args=2, name="total_loss") # grad_g.symbol.list_output_names() # >> ['loss1_grad_0_output', 'grad_sum_output'] grad_g = graph_util.get_gradient_graph([total_loss, loss2], total_loss.list_input_variables()) # infer shape in_shapes, out_shapes = graph_util.infer_shape(grad_g) assert out_shapes == [list(shape2), list(shape1)] # grad_data1 is elemwise_sum of grad_loss2, grad_square_loss grad_data1 = grad_g.symbol[1] assert grad_data1.list_attr()['num_args'] == '2' # block grad should return zero grad grad_data2 = grad_g.symbol[0] assert 'zeros_like' in grad_g.ir() # test reverse infer shape for label assert grad_g.apply('InferShape').json_attr('shape_num_unknown_nodes') == 0 # infer type in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g) assert out_dtypes == ['float32', 'float32'] # test reverse infer type for label assert grad_g.apply('InferType').json_attr('dtype_num_unknown_nodes') == 0
def test_multi_loss_graph_gradients(): # input data shape1 = (1000, 100) data1 = sym.Variable('data1', shape=(1000, 100), dtype=0) # fake non-sparse label label = sym.full(fill_value=3) # square loss sub1 = sym.elemwise_sub(data1, label, name="sub1") square_loss = sym.sum(data=sub1**2, axis=1, name="square_loss") # fake loss1 shape2 = (1000, ) data2 = sym.Variable('data2', shape=shape2, dtype=0) loss1 = sym.sqrt(data2, name="loss1") # fake loss2 loss2 = sym.relu(data1, name='loss2') # block loss1 total_loss = sym.elemwise_sum( sym.block_grad(loss1), square_loss, num_args=2, name="total_loss") # grad_g.symbol.list_output_names() # >> ['loss1_grad_0_output', 'grad_sum_output'] grad_g = graph_util.get_gradient_graph([total_loss, loss2], total_loss.list_input_variables()) # infer shape in_shapes, out_shapes = graph_util.infer_shape(grad_g) assert out_shapes == [list(shape2), list(shape1)] # grad_data1 is elemwise_sum of grad_loss2, grad_square_loss grad_data1 = grad_g.symbol[1] assert grad_data1.list_attr()['num_args'] == '2' # block grad should return zero grad grad_data2 = grad_g.symbol[0] assert 'zeros_like' in grad_g.ir() # test reverse infer shape for label assert grad_g.apply('InferShape').json_attr('shape_num_unknown_nodes') == 0 # infer type in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g) assert out_dtypes == ['float32', 'float32'] # test reverse infer type for label assert grad_g.apply('InferType').json_attr('dtype_num_unknown_nodes') == 0
def test_check_function(): # test the testing function x = sym.Variable("x") y = sym.Variable("y") # different styles of returning gradients from the backward function check_function(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads: [head_grads, 2 * head_grads], shape={ 'x': (1, 2), y: (1, 2) }, dtype='float32') check_function(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads: (head_grads, 2 * head_grads), shape={ 'x': (1, 2), y: (1, 2) }, dtype='float32') check_function(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads: { 'x': head_grads, 'y': 2 * head_grads }, shape={ 'x': (1, 2), y: (1, 2) }, dtype='float32') check_function(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads: {'y': 2 * head_grads}, shape={ 'x': (1, 2), y: (1, 2) }, dtype='float32') check_function(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads: [2 * head_grads], grad_input_vars=[y], shape={ 'x': (1, 2), y: (1, 2) }, dtype='float32') check_function(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads: 2 * head_grads, grad_input_vars=[y], shape={ 'x': (1, 2), y: (1, 2) }, dtype='float32') check_function(x + 2 * y, lambda x, y: x + 2 * y, lambda x, y, head_grads: 2 * head_grads, grad_input_vars=[y], shape={ 'x': (1, 2), y: (1, 2) }, dtype='float64') # test just numerical gradients # different styles of shape and dtype passing check_function(x + 2 * y, shape={ 'x': (1, 2), y: (1, 2) }, numerical_grads=True) check_function(x + 2 * y, shape={ 'x': (1, 2), y: (1, 2) }, dtype='float32', numerical_grads=True) check_function(x + 2 * y, shape={ 'x': (1, 2), y: (1, 2) }, dtype={ x: 'float32', 'y': 'float32' }, numerical_grads=True) check_function(x + 2 * y, shape=(1, 2), dtype='float32', numerical_grads=True) # specifying variable attributes on variable creation # (in this case type codes must be used) x = sym.Variable("x", dtype=0, shape=(1, 2)) check_function(x + 2 * y, shape={y: (1, 2)}, dtype={'y': 'float32'}, numerical_grads=True) y = sym.Variable("y", dtype=0, shape=(1, 2)) # shape overriding def _fwd1(x, y): assert x.shape == (1, 1) assert y.shape == (1, 2) return x + 2 * y check_function(x + 2 * y, _fwd1, shape={x: (1, 1)}) # in_range def _fwd2(x, y): assert x.shape == (100, ) assert (x <= 0.9).all() assert (x >= 0.8).all() return x + 2 * y check_function(x + 2 * y, _fwd2, shape=(100, ), in_range=(0.8, 0.9), numerical_grads=False) check_function(x + 2 * y, _fwd2, shape=(100, ), in_range={'x': (0.8, 0.9)}, numerical_grads=False) check_function(x + 2 * y, backward=lambda x, y, head_grads: [1.0, 2.0], in_range={'head_grads_0': (1.0, 1.0)}) # explicit passing of values check_function(x + 2 * y, backward=lambda x, y, head_grads: [1.0, 2.0], values={'head_grads_0': np.full((1, 2), 1.0)}) # check that the function reports errors def _check_function_must_fail(*args, **kwargs): error = AssertionError if 'error' in kwargs: error = kwargs['error'] del kwargs['error'] try: check_function(*args, quiet=True, **kwargs) except error: pass else: raise AssertionError("check_function didn't raise an exception") _check_function_must_fail(x + 2 * y, error=ValueError) _check_function_must_fail(x + 2 * y, lambda x, y: x + y) _check_function_must_fail(x + 2 * y, backward=lambda x, y, head_grads: [1.0, 2.0]) _check_function_must_fail(sym.block_grad(x + 2 * y), numerical_grads=True) _check_function_must_fail(x * x, numerical_grads=True, numerical_grads_params={ 'atol': 0.0, 'rtol': 0.0 }) _check_function_must_fail(sym.log(-x * x), numerical_grads=True, error=ValueError) # different styles of returning results from the forward function check_function(x + 2 * y, lambda x, y: [x + 2 * y], numerical_grads=False) _check_function_must_fail(x + 2 * y, lambda x, y: [x + 2 * y, x], numerical_grads=False, error=ValueError) _check_function_must_fail(x + 2 * y, lambda x, y: [], numerical_grads=False, error=ValueError) # multiple outputs z = sym.Group([2 * x + y, x + 2 * y]) check_function(z, lambda x, y: [2 * x + y, x + 2 * y]) check_function(z, lambda x, y: (2 * x + y, x + 2 * y)) check_function( z, backward=lambda x, y, head_grads: [2 * head_grads[0] + head_grads[1], head_grads[0] + 2 * head_grads[1]]) _check_function_must_fail(z, backward=lambda x, y, head_grads: [2 * head_grads[0], 2 * head_grads[1]]) check_function( z, backward=lambda x, y, head_grads: [head_grads[1], 2 * head_grads[1]], in_range={'head_grads_0': (0, 0)}) check_function(z, numerical_grads=True) z = sym.Group([sym.block_grad(2 * x + y), x + 2 * y]) check_function(z, lambda x, y: [2 * x + y, x + 2 * y], numerical_grads=False) _check_function_must_fail(z, lambda x, y: [2 * x + y, x + 2 * y]) _check_function_must_fail(z, numerical_grads=True) z = sym.Group([2 * x + y, sym.block_grad(x + 2 * y)]) _check_function_must_fail(z, numerical_grads=True) z = sym.Group([2 * x + y, x + 2 * y, x, y, sym.sum(x)]) check_function(z, lambda x, y: [2 * x + y, x + 2 * y, x, y, np.sum(x)]) # passing additional parameters to forward and backward def _fwd3(x, p): assert p == 'v' return x + 1 def _bwd3(x, p, head_grads): assert p == 'v' return head_grads check_function(x + 1, _fwd3, _bwd3, additional_params={'p': 'v'}) # implicitly created variables and shape/dtype inference for inputs x = sym.Variable("x", shape=(2, 3), dtype=0) b = sym.Variable("b") y = sym.dense(data=x, bias=b, units=4) # Don't check gradients on cuda because is doesn't yet support ewise after reduce check_function(y, exclude_targets={'cuda'}, numerical_grads=True) check_function(y, shape={'x': (3, 4)}, exclude_targets={'cuda'}, numerical_grads=True) check_function(y, dtype={'x': 'float64'}, exclude_targets={'cuda'}, numerical_grads=True) x = sym.Variable("x") b = sym.Variable("b") w = sym.Variable("w") y = sym.dense(data=x, bias=b, weight=w, units=4) def _fwd_dense(x, w, b): return np.dot(x, w.T) + b check_function(y, _fwd_dense, shape={'x': (1, 2)}, dtype={'x': 'float32'}, numerical_grads=False) check_function(y, _fwd_dense, shape={'x': (1, 2)}, dtype={'w': 'float64'}, numerical_grads=False) _check_function_must_fail(y, _fwd_dense, shape={'x': (1, 2)}, dtype={ 'w': 'float64', 'b': 'float32' }, numerical_grads=False, error=nnvm._base.NNVMError) # fails because no shape _check_function_must_fail(y, _fwd_dense, numerical_grads=False, error=ValueError) # ok because type is float32 by default check_function(y, _fwd_dense, shape={'x': (1, 2)}, numerical_grads=False)
def test_check_function(): # test the testing function x = sym.Variable("x") y = sym.Variable("y") # different styles of returning gradients from the backward function check_function(x + 2*y, lambda x, y: x + 2*y, lambda x, y, head_grads: [head_grads, 2*head_grads], shape={'x': (1, 2), y: (1, 2)}, dtype='float32') check_function(x + 2*y, lambda x, y: x + 2*y, lambda x, y, head_grads: (head_grads, 2*head_grads), shape={'x': (1, 2), y: (1, 2)}, dtype='float32') check_function(x + 2*y, lambda x, y: x + 2*y, lambda x, y, head_grads: {'x': head_grads, 'y': 2*head_grads}, shape={'x': (1, 2), y: (1, 2)}, dtype='float32') check_function(x + 2*y, lambda x, y: x + 2*y, lambda x, y, head_grads: {'y': 2*head_grads}, shape={'x': (1, 2), y: (1, 2)}, dtype='float32') check_function(x + 2*y, lambda x, y: x + 2*y, lambda x, y, head_grads: [2*head_grads], grad_input_vars=[y], shape={'x': (1, 2), y: (1, 2)}, dtype='float32') check_function(x + 2*y, lambda x, y: x + 2*y, lambda x, y, head_grads: 2*head_grads, grad_input_vars=[y], shape={'x': (1, 2), y: (1, 2)}, dtype='float32') check_function(x + 2*y, lambda x, y: x + 2*y, lambda x, y, head_grads: 2*head_grads, grad_input_vars=[y], shape={'x': (1, 2), y: (1, 2)}, dtype='float64') # test just numerical gradients # different styles of shape and dtype passing check_function(x + 2*y, shape={'x': (1, 2), y: (1, 2)}, numerical_grads=True) check_function(x + 2*y, shape={'x': (1, 2), y: (1, 2)}, dtype='float32', numerical_grads=True) check_function(x + 2*y, shape={'x': (1, 2), y: (1, 2)}, dtype={x: 'float32', 'y': 'float32'}, numerical_grads=True) check_function(x + 2*y, shape=(1, 2), dtype='float32', numerical_grads=True) # specifying variable attributes on variable creation # (in this case type codes must be used) x = sym.Variable("x", dtype=0, shape=(1, 2)) check_function(x + 2*y, shape={y: (1, 2)}, dtype={'y': 'float32'}, numerical_grads=True) y = sym.Variable("y", dtype=0, shape=(1, 2)) # shape overriding def _fwd1(x, y): assert x.shape == (1, 1) assert y.shape == (1, 2) return x + 2*y check_function(x + 2*y, _fwd1, shape={x: (1, 1)}) # in_range def _fwd2(x, y): assert x.shape == (100,) assert (x <= 0.9).all() assert (x >= 0.8).all() return x + 2*y check_function(x + 2*y, _fwd2, shape=(100,), in_range=(0.8, 0.9), numerical_grads=False) check_function(x + 2*y, _fwd2, shape=(100,), in_range={'x': (0.8, 0.9)}, numerical_grads=False) check_function(x + 2*y, backward=lambda x, y, head_grads: [1.0, 2.0], in_range={'head_grads_0': (1.0, 1.0)}) # explicit passing of values check_function(x + 2*y, backward=lambda x, y, head_grads: [1.0, 2.0], values={'head_grads_0': np.full((1, 2), 1.0)}) # check that the function reports errors def _check_function_must_fail(*args, **kwargs): error = AssertionError if 'error' in kwargs: error = kwargs['error'] del kwargs['error'] try: check_function(*args, quiet=True, **kwargs) except error: pass else: raise AssertionError("check_function didn't raise an exception") _check_function_must_fail(x + 2*y, error=ValueError) _check_function_must_fail(x + 2*y, lambda x, y: x + y) _check_function_must_fail(x + 2*y, backward=lambda x, y, head_grads: [1.0, 2.0]) _check_function_must_fail(sym.block_grad(x + 2*y), numerical_grads=True) _check_function_must_fail(x*x, numerical_grads=True, numerical_grads_params={'atol': 0.0, 'rtol': 0.0}) _check_function_must_fail(sym.log(-x*x), numerical_grads=True, error=ValueError) # different styles of returning results from the forward function check_function(x + 2*y, lambda x, y: [x + 2*y], numerical_grads=False) _check_function_must_fail(x + 2*y, lambda x, y: [x + 2*y, x], numerical_grads=False, error=ValueError) _check_function_must_fail(x + 2*y, lambda x, y: [], numerical_grads=False, error=ValueError) # multiple outputs z = sym.Group([2*x + y, x + 2*y]) check_function(z, lambda x, y: [2*x + y, x + 2*y]) check_function(z, lambda x, y: (2*x + y, x + 2*y)) check_function(z, backward=lambda x, y, head_grads: [2*head_grads[0] + head_grads[1], head_grads[0] + 2*head_grads[1]]) _check_function_must_fail(z, backward=lambda x, y, head_grads: [2*head_grads[0], 2*head_grads[1]]) check_function(z, backward=lambda x, y, head_grads: [head_grads[1], 2*head_grads[1]], in_range={'head_grads_0': (0, 0)}) check_function(z, numerical_grads=True) z = sym.Group([sym.block_grad(2*x + y), x + 2*y]) check_function(z, lambda x, y: [2*x + y, x + 2*y], numerical_grads=False) _check_function_must_fail(z, lambda x, y: [2*x + y, x + 2*y]) _check_function_must_fail(z, numerical_grads=True) z = sym.Group([2*x + y, sym.block_grad(x + 2*y)]) _check_function_must_fail(z, numerical_grads=True) z = sym.Group([2*x + y, x + 2*y, x, y, sym.sum(x)]) check_function(z, lambda x, y: [2*x + y, x + 2*y, x, y, np.sum(x)]) # passing additional parameters to forward and backward def _fwd3(x, p): assert p == 'v' return x + 1 def _bwd3(x, p, head_grads): assert p == 'v' return head_grads check_function(x + 1, _fwd3, _bwd3, additional_params={'p': 'v'}) # implicitly created variables and shape/dtype inference for inputs x = sym.Variable("x", shape=(2, 3), dtype=0) b = sym.Variable("b") y = sym.dense(data=x, bias=b, units=4) # Don't check gradients on cuda because is doesn't yet support ewise after reduce check_function(y, exclude_targets={'cuda'}, numerical_grads=True) check_function(y, shape={'x': (3, 4)}, exclude_targets={'cuda'}, numerical_grads=True) check_function(y, dtype={'x': 'float64'}, exclude_targets={'cuda'}, numerical_grads=True) x = sym.Variable("x") b = sym.Variable("b") w = sym.Variable("w") y = sym.dense(data=x, bias=b, weight=w, units=4) def _fwd_dense(x, w, b): return np.dot(x, w.T) + b check_function(y, _fwd_dense, shape={'x': (1,2)}, dtype={'x': 'float32'}, numerical_grads=False) check_function(y, _fwd_dense, shape={'x': (1,2)}, dtype={'w': 'float64'}, numerical_grads=False) _check_function_must_fail(y, _fwd_dense, shape={'x': (1,2)}, dtype={'w': 'float64', 'b': 'float32'}, numerical_grads=False, error=nnvm._base.NNVMError) # fails because no shape _check_function_must_fail(y, _fwd_dense, numerical_grads=False, error=ValueError) # ok because type is float32 by default check_function(y, _fwd_dense, shape={'x': (1,2)}, numerical_grads=False)