def unittest_helper(root_node, forward_input, expected_forward, expected_backward, device_id=-1, precision="float"): assert isinstance(root_node, Function) backward_pass = expected_backward is not None forward, backward = cntk_eval(root_node, forward_input, precision, cntk_device(device_id), backward_pass, expected_backward) # for forward we always expect only one result assert len(forward) == 1 forward = list(forward.values())[0] forward = np.atleast_1d(forward) for res, exp in zip(forward, expected_forward): assert res.shape == AA(exp).shape assert np.allclose(res, exp, atol=TOLERANCE_ABSOLUTE) if expected_backward: for key in expected_backward: res, exp = backward[key], expected_backward[key] if isinstance(res, list): assert len(res) == len(exp) for res_seq, exp_seq in zip(res, exp): assert res_seq.shape == AA(exp_seq).shape assert np.allclose( res_seq, exp_seq, atol=TOLERANCE_ABSOLUTE) elif isinstance(res, np.ndarray): assert res.shape == AA(exp).shape assert np.allclose(res, exp, atol=TOLERANCE_ABSOLUTE)
def test_op_dropout(shape, dropout_rate, device_id, precision): from cntk import dropout, input count = 10 resulted_non_zeros = 0 # As the dropout node is stochastic, we run it a couple times and aggregate # over the results to get more stable tests. for i in range(count): value = np.ones(shape=shape, dtype=PRECISION_TO_TYPE[precision]) a = input(shape=value.shape, dtype=sanitize_dtype_cntk(PRECISION_TO_TYPE[precision]), needs_gradient=True, name='a') dropout_node = dropout(a, dropout_rate=dropout_rate) value.shape = (1, ) + value.shape forward_input = {a: value} forward, backward = cntk_eval(dropout_node, forward_input, precision, cntk_device(device_id), backward_pass=True) resulted_non_zeros += np.count_nonzero(forward[dropout_node.output]) resulted_non_zeros /= count num_elements = np.multiply.reduce(shape) expected_non_zeros = num_elements * (1 - dropout_rate) max_off = 0.2 * num_elements assert (abs(resulted_non_zeros - expected_non_zeros) < max_off)
def test_op_dropout_with_explicit_seed(device_id, precision): from cntk import combine, dropout, input value = np.ones(shape=(10, 10), dtype=PRECISION_TO_TYPE[precision]) a = input(shape=value.shape, dtype=sanitize_dtype_cntk(PRECISION_TO_TYPE[precision]), needs_gradient=True, name='a') seed = 123 dropout_nodes = [ dropout(a, dropout_rate=0.5, seed=seed), dropout(a, dropout_rate=0.5, seed=seed), dropout(a, dropout_rate=0.5, seed=seed + 1), dropout(a, dropout_rate=0.5) ] value.shape = (1, 1) + value.shape forward_input = {a: value} results = [] for node in dropout_nodes: forward, backward = cntk_eval(node, forward_input, precision, cntk_device(device_id), backward_pass=True) results.append(forward[node.output]) assert np.allclose(results[0], results[1]) assert not np.allclose(results[0], results[2]) assert not np.allclose(results[0], results[3])
def test_changing_dropout_rate(): from cntk import dropout, input resulted_non_zeros = 0 shape = (100, 100) dtype = np.float32 value = np.ones(shape=shape, dtype=dtype) a = input(shape=shape, needs_gradient=True, dtype=dtype) dropout_node = dropout(a, dropout_rate=0.1) value.shape = (1, ) + value.shape for dropout_rate in [0.0, 0.25, 0.5, 0.78, 0.99999]: dropout_node.set_attribute('dropoutRate', dropout_rate) forward, _ = cntk_eval(dropout_node, {a: value}, dtype, backward_pass=True) resulted_non_zeros = np.count_nonzero(forward[dropout_node.output]) if (dropout_rate == 0): assert resulted_non_zeros == value.size assert np.isclose((1 - dropout_rate), resulted_non_zeros * 1.0 / value.size, atol=0.01)
def test_op_dropout_with_explicit_seed(device_id, precision): from cntk import combine, dropout value = np.ones(shape=(100, 100), dtype=PRECISION_TO_TYPE[precision]) a = C.input_variable(shape=value.shape, dtype=sanitize_dtype_cntk( PRECISION_TO_TYPE[precision]), needs_gradient=True, name='a') seed = 123 dropout_nodes = [ dropout(a, dropout_rate=0.5, seed=seed), dropout(a, dropout_rate=0.5, seed=seed), dropout(a, dropout_rate=0.5, seed=seed + 1), dropout(a, dropout_rate=0.5) ] cloned_nodes = [x.clone('clone') for x in dropout_nodes] value.shape = (1, 1) + value.shape results = [] for node in dropout_nodes + cloned_nodes: forward_input = {node.inputs[0]: value} forward, backward = cntk_eval(node, forward_input, precision, cntk_device(device_id), backward_pass=True) results.append(forward[node.output]) assert np.allclose(results[0], results[1]) assert not np.allclose(results[0], results[2]) assert not np.allclose(results[0], results[3]) clones = results[len(dropout_nodes):] for i in range(len(clones)): assert np.allclose(results[i], clones[i])
def test_op_dropout_with_explicit_seed(device_id, precision): from cntk import combine, dropout value = np.ones(shape=(100,100), dtype=PRECISION_TO_TYPE[precision]) a = C.input_variable(shape=value.shape, dtype=sanitize_dtype_cntk(PRECISION_TO_TYPE[precision]), needs_gradient=True, name='a') seed = 123; dropout_nodes= [ dropout(a, dropout_rate=0.5, seed=seed), dropout(a, dropout_rate=0.5, seed=seed), dropout(a, dropout_rate=0.5, seed=seed+1), dropout(a, dropout_rate=0.5) ] cloned_nodes = [x.clone('clone') for x in dropout_nodes] value.shape = (1, 1) + value.shape results = [] for node in dropout_nodes + cloned_nodes: forward_input = {node.inputs[0]: value} forward, backward = cntk_eval(node, forward_input, precision, cntk_device(device_id), backward_pass=True) results.append(forward[node.output]) assert np.allclose(results[0], results[1]) assert not np.allclose(results[0], results[2]) assert not np.allclose(results[0], results[3]) clones = results[len(dropout_nodes):] for i in range(len(clones)): assert np.allclose(results[i], clones[i])
def test_op_combine(left_operand, right_operand, operations, expected_results, device_id, precision): dt = PRECISION_TO_TYPE[precision] from .. import combine left_value = AA(left_operand, dtype=dt) right_value = AA(right_operand, dtype=dt) a = C.input_variable(shape=left_value.shape, dtype=sanitize_dtype_cntk(precision), needs_gradient=True, name='a') b = C.input_variable(shape=right_value.shape, dtype=sanitize_dtype_cntk(precision), needs_gradient=True, name='b') left_value.shape = (1, 1) + left_value.shape right_value.shape = (1, 1) + right_value.shape forward_input = {a: left_value, b: right_value} combine_list = [] for op in operations: combine_list.append(op(a, b)) combine_node = combine(combine_list) expected_forward_results = [ np.asarray([[i]], dtype=dt) for i in expected_results ] forward_results, _ = cntk_eval(combine_node, forward_input, precision, cntk_device(device_id)) results = list(forward_results.values()) assert compare_lists_of_np_arrays(results, expected_forward_results)
def test_changing_dropout_rate(): from cntk import dropout, input resulted_non_zeros = 0 shape = (100,100) dtype = np.float32 value = np.ones(shape=shape, dtype=dtype) a = input(shape=shape, needs_gradient=True, dtype=dtype) dropout_node = dropout(a, dropout_rate=0.1) value.shape = (1,) + value.shape for dropout_rate in [0.0, 0.25, 0.5, 0.78, 0.99999]: dropout_node.set_attribute('dropoutRate', dropout_rate) forward, _ = cntk_eval(dropout_node, {a: value}, dtype, backward_pass=True) resulted_non_zeros = np.count_nonzero(forward[dropout_node.output]) if (dropout_rate == 0): assert resulted_non_zeros == value.size assert np.isclose((1-dropout_rate), resulted_non_zeros* 1.0/ value.size, atol=0.01)
def test_op_combine(left_operand, right_operand, operations, expected_results, device_id, precision): dt = PRECISION_TO_TYPE[precision] from .. import combine left_value = AA(left_operand, dtype=dt) right_value = AA(right_operand, dtype=dt) a = C.input_variable(shape=left_value.shape, dtype=sanitize_dtype_cntk(precision), needs_gradient=True, name='a') b = C.input_variable(shape=right_value.shape, dtype=sanitize_dtype_cntk(precision), needs_gradient=True, name='b') left_value.shape = (1, 1) + left_value.shape right_value.shape = (1, 1) + right_value.shape forward_input = {a: left_value, b: right_value} combine_list = [] for op in operations: combine_list.append(op(a,b)) combine_node = combine(combine_list) expected_forward_results = [np.asarray([[i]], dtype=dt) for i in expected_results] forward_results, _ = cntk_eval(combine_node, forward_input, precision, cntk_device(device_id)) results = list(forward_results.values()) assert compare_lists_of_np_arrays(results, expected_forward_results)
def test_op_dropout(shape, dropout_rate, device_id, precision): from cntk import dropout count = 10 resulted_non_zeros = 0 # As the dropout node is stochastic, we run it a couple times and aggregate # over the results to get more stable tests. for i in range(count): value = np.ones(shape=shape, dtype=PRECISION_TO_TYPE[precision]) a = C.input_variable(shape=value.shape, dtype=sanitize_dtype_cntk(PRECISION_TO_TYPE[precision]), needs_gradient=True, name='a') dropout_node = dropout(a, dropout_rate=dropout_rate) value.shape = (1,) + value.shape forward_input = {a: value} forward, backward = cntk_eval(dropout_node, forward_input, precision, cntk_device(device_id), backward_pass=True) resulted_non_zeros += np.count_nonzero(forward[dropout_node.output]) resulted_non_zeros /= count num_elements = np.multiply.reduce(shape) expected_non_zeros = num_elements * (1 - dropout_rate) max_off = 0.2 * num_elements assert(abs(resulted_non_zeros - expected_non_zeros) < max_off)